merge code to master

Change-Id: Id066d7f2878418dcd7fb3dc7f1f405dd8599ba85
This commit is contained in:
xiaodongwang 2014-11-03 17:33:40 -08:00
parent 6b73c5b0c8
commit a60bdb09c6
48 changed files with 1478 additions and 390 deletions

View File

@ -5,8 +5,8 @@ A Deoployment Automation System. See Wiki page at https://wiki.openstack.org/wik
How to install Compass?
-----------------------
1. Run `git clone https://github.com/stackforge/compass-core`
2. Run `cd compass-core` to the Compass project directory.
1. Run `git clone https://github.com/huawei-cloud/compass`
2. Run `cd compass` to the Compass project directory.
3. Run `./install/install.sh` to setup compass environment. Please note that before you execute `install.sh`, you may setup your environment variables in `install/install.conf`, explanations and examples of those variables can be found in `install.conf`.
4. Run `source /etc/profile` to setup compass profile.
5. Run `./bin/refresh.sh` to initialize database.

163
bin/clean_installers.py Executable file
View File

@ -0,0 +1,163 @@
#!/usr/bin/env python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scripts to delete cluster and it hosts"""
import logging
import os
import os.path
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.actions import clean
from compass.db.api import adapter_holder as adapter_api
from compass.db.api import database
from compass.db.api import user as user_api
from compass.tasks.client import celery
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import setting_wrapper as setting
flags.add_bool('async',
help='run in async mode',
default=True)
flags.add('os_installers',
help='comma seperated os installers',
default='')
flags.add('package_installers',
help='comma separated package installers',
default='')
def clean_installers():
os_installers = [
os_installer
for os_installer in flags.OPTIONS.os_installers.split(',')
if os_installer
]
package_installers = [
package_installer
for package_installer in flags.OPTIONS.package_installers.split(',')
if package_installer
]
user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
adapters = adapter_api.list_adapters(user)
filtered_os_installers = {}
filtered_package_installers = {}
for adapter in adapters:
logging.info(
'got adapter: %s', adapter
)
if 'os_installer' in adapter:
os_installer = adapter['os_installer']
os_installer_name = os_installer['alias']
if not os_installers or os_installer_name in os_installers:
filtered_os_installers[os_installer_name] = os_installer
else:
logging.info(
'ignore os isntaller %s', os_installer_name
)
else:
logging.info(
'cannot find os installer in adapter %s',
adapter['name']
)
if 'package_installer' in adapter:
package_installer = adapter['package_installer']
package_installer_name = package_installer['alias']
if (
not package_installers or
package_installer_name in package_installers
):
filtered_package_installers[package_installer_name] = (
package_installer
)
else:
logging.info(
'ignore package installer %s', package_installer_name
)
else:
logging.info(
'cannot find package installer in adapter %s',
adapter['name']
)
logging.info(
'clean os installers: %s', filtered_os_installers.keys()
)
logging.info(
'clean package installers: %s', filtered_package_installers.keys()
)
if flags.OPTIONS.async:
for os_installer_name, os_installer in filtered_os_installers.items():
celery.send_task(
'compass.tasks.clean_os_installer',
(
os_installer['name'],
os_installer['settings']
)
)
for package_installer_name, package_installer in (
filtered_package_installers.items()
):
celery.send_task(
'compass.tasks.clean_package_installer',
(
package_installer['name'],
package_installer['settings']
)
)
else:
for os_installer_name, os_installer in (
filtered_os_installers.items()
):
try:
clean.clean_os_installer(
os_installer['name'],
os_installer['settings']
)
except Exception as error:
logging.error(
'failed to clean os installer %s', os_installer_name
)
logging.exception(error)
for package_installer_name, package_installer in (
filtered_package_installers.items()
):
try:
clean.clean_package_installer(
package_installer['name'],
package_installer['settings']
)
except Exception as error:
logging.error(
'failed to clean package installer %s',
package_installer_name
)
logging.exception(error)
if __name__ == '__main__':
flags.init()
logsetting.init()
database.init()
clean_installers()

View File

@ -16,11 +16,8 @@
"""binary to deploy a cluster by compass client api."""
import logging
import netaddr
import os
import re
import requests
import simplejson as json
import socket
import sys
import time
@ -32,6 +29,10 @@ sys.path.append(current_dir)
import switch_virtualenv
import netaddr
import requests
import simplejson as json
from compass.apiclient.restful import Client
from compass.utils import flags
from compass.utils import logsetting

View File

@ -28,17 +28,19 @@ sys.path.append(current_dir)
import switch_virtualenv
from compass.actions import delete
from compass.db.api import cluster as cluster_api
from compass.db.api import database
from compass.db.api import host as host_api
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import models
from compass.tasks.client import celery
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import setting_wrapper as setting
flags.add_bool('async',
help='run in async mode',
default=True)
flags.add('clusternames',
help='comma seperated cluster names',
default='')
@ -47,27 +49,54 @@ flags.add_bool('delete_hosts',
default=False)
if __name__ == '__main__':
flags.init()
logsetting.init()
database.init()
def delete_clusters():
clusternames = [
clustername
for clustername in flags.OPTIONS.clusternames.split(',')
if clustername
]
logging.info('delete clusters %s', clusternames)
user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
list_cluster_args = {}
if clusternames:
list_cluster_args['name'] = clusternames
clusters = cluster_api.list_clusters(
user, name=clusternames
user, **list_cluster_args
)
if flags.OPTIONS.delete_hosts:
for cluster in clusters:
hosts = cluster_api.list_cluster_hosts(
user, cluster['id'])
for host in hosts:
logging.info('delete host %s', host['hostname'])
host_api.del_host(user, host['id'])
delete_underlying_host = flags.OPTIONS.delete_hosts
for cluster in clusters:
logging.info('delete cluster %s', cluster['name'])
cluster_api.del_cluster(user, cluster['id'])
cluster_id = cluster['id']
hosts = cluster_api.list_cluster_hosts(user, cluster_id)
host_id_list = [host['id'] for host in hosts]
logging.info(
'delete cluster %s and cluster hosts %s',
cluster_id, host_id_list
)
logging.info('delete underlying host? %s', delete_underlying_host)
if flags.OPTIONS.async:
celery.send_task(
'compass.tasks.delete_cluster',
(
setting.COMPASS_ADMIN_EMAIL,
cluster_id,
host_id_list,
delete_underlying_host
)
)
else:
try:
delete.delete_cluster(
cluster_id,
host_id_list,
setting.COMPASS_ADMIN_EMAIL,
delete_underlying_host
)
except Exception as error:
logging.error('failed to delete cluster %s', cluster)
logging.exception(error)
if __name__ == '__main__':
flags.init()
logsetting.init()
database.init()
delete_clusters()

View File

@ -16,7 +16,6 @@
"""main script to poll machines which is connected to the switches."""
import functools
import lockfile
import logging
import os
import sys
@ -28,6 +27,7 @@ sys.path.append(current_dir)
import switch_virtualenv
import lockfile
from multiprocessing import Pool
from compass.actions import poll_switch

View File

@ -16,7 +16,6 @@
"""main script to run as service to update hosts installing progress."""
import functools
import lockfile
import logging
import os
import sys
@ -28,6 +27,8 @@ sys.path.append(current_dir)
import switch_virtualenv
import lockfile
from compass.actions import update_progress
from compass.db.api import database
from compass.tasks.client import celery

View File

@ -2,10 +2,7 @@
set -e
service mysqld restart
/opt/compass/bin/manage_db.py createdb
echo "You may run '/opt/compass/bin/clean_nodes.sh' to clean nodes on chef server"
echo "You may run '/opt/compass/bin/clean_clients.sh' to clean clients on chef server"
echo "you may run '/opt/compass/bin/clean_environments.sh' to clean environments on chef server"
echo "you may run '/opt/compass/bin/remove_systems.sh' to clean systems on cobbler"
/opt/compass/bin/clean_installers.py
/opt/compass/bin/clean_installation_logs.py
service httpd restart
service rsyslog restart

182
compass/actions/clean.py Normal file
View File

@ -0,0 +1,182 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to clean installers
"""
import chef
import logging
import xmlrpclib
from compass.actions import util
class CobblerInstaller(object):
"""cobbler installer"""
CREDENTIALS = "credentials"
USERNAME = 'username'
PASSWORD = 'password'
INSTALLER_URL = "cobbler_url"
def __init__(self, settings):
username = settings[self.CREDENTIALS][self.USERNAME]
password = settings[self.CREDENTIALS][self.PASSWORD]
cobbler_url = settings[self.INSTALLER_URL]
try:
self.remote = xmlrpclib.Server(cobbler_url)
self.token = self.remote.login(username, password)
logging.info('cobbler %s client created', cobbler_url)
except Exception as error:
logging.error(
'failed to login %s with (%s, %s)',
cobbler_url, username, password
)
logging.exception(error)
def clean(self):
systems = self.remote.get_systems()
for system in systems:
system_name = system['name']
try:
self.remote.remove_system(system_name, self.token)
logging.info('system %s is removed', system_name)
except Exception as error:
logging.error(
'failed to remove system %s', system_name
)
logging.exception(error)
class ChefInstaller(object):
DATABAGS = "databags"
CHEFSERVER_URL = "chef_url"
CHEFSERVER_DNS = "chef_server_dns"
CHEFSERVER_IP = "chef_server_ip"
KEY_DIR = "key_dir"
CLIENT = "client_name"
def __init__(self, settings):
installer_url = settings.get(self.CHEFSERVER_URL, None)
key_dir = settings.get(self.KEY_DIR, None)
client = settings.get(self.CLIENT, None)
try:
if installer_url and key_dir and client:
self.api = chef.ChefAPI(installer_url, key_dir, client)
else:
self.api = chef.autoconfigure()
logging.info(
'chef client created %s(%s, %s)',
installer_url, key_dir, client
)
except Exception as error:
logging.error(
'failed to create chef client %s(%s, %s)',
installer_url, key_dir, client
)
logging.exception(error)
def clean(self):
try:
for node_name in chef.Node.list(api=self.api):
node = chef.Node(node_name, api=self.api)
node.delete()
logging.info('delete node %s', node_name)
except Exception as error:
logging.error('failed to delete some nodes')
logging.exception(error)
try:
for client_name in chef.Client.list(api=self.api):
if client_name in ['chef-webui', 'chef-validator']:
continue
client = chef.Client(client_name, api=self.api)
client.delete()
logging.info('delete client %s', client_name)
except Exception as error:
logging.error('failed to delete some clients')
logging.exception(error)
try:
for env_name in chef.Environment.list(api=self.api):
if env_name == '_default':
continue
env = chef.Environment(env_name, api=self.api)
env.delete()
logging.info('delete env %s', env_name)
except Exception as error:
logging.error('failed to delete some envs')
logging.exception(error)
try:
for databag_name in chef.DataBag.list(api=self.api):
databag = chef.DataBag(databag_name, api=self.api)
for item_name, item in databag.items():
item.delete()
logging.info(
'delete item %s from databag %s',
item_name, databag_name
)
except Exception as error:
logging.error('failed to delete some databag items')
logging.exception(error)
OS_INSTALLERS = {
'cobbler': CobblerInstaller
}
PK_INSTALLERS = {
'chef_installer': ChefInstaller
}
def clean_os_installer(
os_installer_name, os_installer_settings
):
with util.lock('serialized_action', timeout=100) as lock:
if not lock:
raise Exception(
'failed to acquire lock to clean os installer'
)
if os_installer_name not in OS_INSTALLERS:
logging.error(
'%s not found in os_installers',
os_installer_name
)
os_installer = OS_INSTALLERS[os_installer_name](
os_installer_settings
)
os_installer.clean()
def clean_package_installer(
package_installer_name, package_installer_settings
):
with util.lock('serialized_action', timeout=100) as lock:
if not lock:
raise Exception(
'failed to acquire lock to clean package installer'
)
if package_installer_name not in PK_INSTALLERS:
logging.error(
'%s not found in os_installers',
package_installer_name
)
package_installer = PK_INSTALLERS[package_installer_name](
package_installer_settings
)
package_installer.clean()

147
compass/actions/delete.py Normal file
View File

@ -0,0 +1,147 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to delete a given cluster
"""
import logging
from compass.actions import util
from compass.db.api import cluster as cluster_api
from compass.db.api import user as user_db
from compass.deployment.deploy_manager import DeployManager
from compass.deployment.utils import constants as const
def delete_cluster(
cluster_id, host_id_list,
username=None, delete_underlying_host=False
):
"""Delete cluster.
:param cluster_id: id of the cluster.
:type cluster_id: int
.. note::
The function should be called out of database session.
"""
with util.lock('serialized_action', timeout=100) as lock:
if not lock:
raise Exception('failed to acquire lock to delete cluster')
user = user_db.get_user_object(username)
for host_id in host_id_list:
cluster_api.update_cluster_host_state(
user, cluster_id, host_id, state='ERROR'
)
cluster_api.update_cluster_state(
user, cluster_id, state='ERROR'
)
cluster_api.update_cluster(
user, cluster_id, reinstall_distributed_system=True
)
for host_id in host_id_list:
cluster_api.update_cluster_host(
user, cluster_id, host_id, reinstall_os=True
)
cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
adapter_id = cluster_info[const.ADAPTER_ID]
adapter_info = util.ActionHelper.get_adapter_info(
adapter_id, cluster_id, user)
hosts_info = util.ActionHelper.get_hosts_info(
cluster_id, host_id_list, user)
logging.debug('adapter info: %s', adapter_info)
logging.debug('cluster info: %s', cluster_info)
logging.debug('hosts info: %s', hosts_info)
deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
deploy_manager.remove_hosts(
package_only=not delete_underlying_host,
delete_cluster=True
)
util.ActionHelper.delete_cluster(
cluster_id, host_id_list, user,
delete_underlying_host
)
def delete_cluster_host(
cluster_id, host_id,
username=None, delete_underlying_host=False
):
with util.lock('serialized_action', timeout=100) as lock:
if not lock:
raise Exception('failed to acquire lock to delete clusterhost')
user = user_db.get_user_object(username)
cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
adapter_id = cluster_info[const.ADAPTER_ID]
adapter_info = util.ActionHelper.get_adapter_info(
adapter_id, cluster_id, user)
hosts_info = util.ActionHelper.get_hosts_info(
cluster_id, [host_id], user)
deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
logging.debug('Created deploy manager with %s %s %s'
% (adapter_info, cluster_info, hosts_info))
deploy_manager.remove_hosts(
package_only=not delete_underlying_host,
delete_cluster=False
)
util.ActionHelper.delete_cluster_host(
cluster_id, host_id, user,
delete_underlying_host
)
def delete_host(
host_id, cluster_id_list, username=None
):
with util.lock('serialized_action', timeout=100) as lock:
if not lock:
raise Exception('failed to acquire lock to delete host')
user = user_db.get_user_object(username)
for cluster_id in cluster_id_list:
cluster_info = util.ActionHelper.get_cluster_info(
cluster_id, user)
adapter_id = cluster_info[const.ADAPTER_ID]
adapter_info = util.ActionHelper.get_adapter_info(
adapter_id, cluster_id, user)
hosts_info = util.ActionHelper.get_hosts_info(
cluster_id, [host_id], user)
deploy_manager = DeployManager(
adapter_info, cluster_info, hosts_info)
logging.debug('Created deploy manager with %s %s %s'
% (adapter_info, cluster_info, hosts_info))
deploy_manager.remove_hosts(
package_only=True,
delete_cluster=False
)
util.ActionHelper.delete_host(
host_id, user
)
ActionHelper = util.ActionHelper

View File

@ -14,15 +14,12 @@
"""Module to deploy a given cluster
"""
import logging
from compass.actions import util
from compass.db.api import adapter_holder as adapter_db
from compass.db.api import cluster as cluster_db
from compass.db.api import machine as machine_db
from compass.db.api import user as user_db
from compass.deployment.deploy_manager import DeployManager
from compass.deployment.utils import constants as const
import logging
def deploy(cluster_id, hosts_id_list, username=None):
@ -40,13 +37,13 @@ def deploy(cluster_id, hosts_id_list, username=None):
user = user_db.get_user_object(username)
cluster_info = ActionHelper.get_cluster_info(cluster_id, user)
cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
adapter_id = cluster_info[const.ADAPTER_ID]
adapter_info = ActionHelper.get_adapter_info(adapter_id, cluster_id,
user)
hosts_info = ActionHelper.get_hosts_info(cluster_id, hosts_id_list,
user)
adapter_info = util.ActionHelper.get_adapter_info(
adapter_id, cluster_id, user)
hosts_info = util.ActionHelper.get_hosts_info(
cluster_id, hosts_id_list, user)
deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
#deploy_manager.prepare_for_deploy()
@ -54,8 +51,8 @@ def deploy(cluster_id, hosts_id_list, username=None):
% (adapter_info, cluster_info, hosts_info))
deployed_config = deploy_manager.deploy()
ActionHelper.save_deployed_config(deployed_config, user)
ActionHelper.update_state(cluster_id, hosts_id_list, user)
util.ActionHelper.save_deployed_config(deployed_config, user)
util.ActionHelper.update_state(cluster_id, hosts_id_list, user)
def redeploy(cluster_id, hosts_id_list, username=None):
@ -69,20 +66,21 @@ def redeploy(cluster_id, hosts_id_list, username=None):
raise Exception('failed to acquire lock to deploy')
user = user_db.get_user_object(username)
cluster_info = ActionHelper.get_cluster_info(cluster_id, user)
cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
adapter_id = cluster_info[const.ADAPTER_ID]
adapter_info = ActionHelper.get_adapter_info(adapter_id,
cluster_id,
user)
hosts_info = ActionHelper.get_hosts_info(cluster_id,
hosts_id_list,
user)
adapter_info = util.ActionHelper.get_adapter_info(
adapter_id, cluster_id, user)
hosts_info = util.ActionHelper.get_hosts_info(
cluster_id, hosts_id_list, user)
deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
# deploy_manager.prepare_for_deploy()
deploy_manager.redeploy()
ActionHelper.update_state(cluster_id, hosts_id_list, user)
util.ActionHelper.update_state(cluster_id, hosts_id_list, user)
ActionHelper = util.ActionHelper
class ServerPowerMgmt(object):
@ -117,192 +115,3 @@ class HostPowerMgmt(object):
@staticmethod
def reset(host_id, user):
pass
class ActionHelper(object):
@staticmethod
def get_adapter_info(adapter_id, cluster_id, user):
"""Get adapter information. Return a dictionary as below,
{
"id": 1,
"name": "xxx",
"flavors": [
{
"flavor_name": "xxx",
"roles": ['xxx', 'yyy', ...],
"template": "xxx.tmpl"
},
...
],
"metadata": {
"os_config": {
...
},
"package_config": {
...
}
},
"os_installer": {
"name": "cobbler",
"settings": {....}
},
"pk_installer": {
"name": "chef",
"settings": {....}
},
...
}
To view a complete output, please refer to backend doc.
"""
adapter_info = adapter_db.get_adapter(user, adapter_id)
metadata = cluster_db.get_cluster_metadata(user, cluster_id)
adapter_info.update({const.METADATA: metadata})
for flavor_info in adapter_info[const.FLAVORS]:
roles = flavor_info[const.ROLES]
flavor_info[const.ROLES] = ActionHelper._get_role_names(roles)
return adapter_info
@staticmethod
def _get_role_names(roles):
return [role[const.NAME] for role in roles]
@staticmethod
def get_cluster_info(cluster_id, user):
"""Get cluster information.Return a dictionary as below,
{
"id": 1,
"adapter_id": 1,
"os_version": "CentOS-6.5-x86_64",
"name": "cluster_01",
"flavor": {
"flavor_name": "zzz",
"template": "xx.tmpl",
"roles": [...]
}
"os_config": {..},
"package_config": {...},
"deployed_os_config": {},
"deployed_package_config": {},
"owner": "xxx"
}
"""
cluster_info = cluster_db.get_cluster(user, cluster_id)
# convert roles retrieved from db into a list of role names
roles_info = cluster_info.setdefault(
const.FLAVOR, {}).setdefault(const.ROLES, [])
cluster_info[const.FLAVOR][const.ROLES] = \
ActionHelper._get_role_names(roles_info)
# get cluster config info
cluster_config = cluster_db.get_cluster_config(user, cluster_id)
cluster_info.update(cluster_config)
deploy_config = cluster_db.get_cluster_deployed_config(user,
cluster_id)
cluster_info.update(deploy_config)
return cluster_info
@staticmethod
def get_hosts_info(cluster_id, hosts_id_list, user):
"""Get hosts information. Return a dictionary as below,
{
"hosts": {
1($host_id): {
"reinstall_os": True,
"mac": "xxx",
"name": "xxx",
"roles": [xxx, yyy]
},
"networks": {
"eth0": {
"ip": "192.168.1.1",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "192.168.1.0/24"
},
"eth1": {...}
},
"os_config": {},
"package_config": {},
"deployed_os_config": {},
"deployed_package_config": {}
},
2: {...},
....
}
}
"""
hosts_info = {}
for host_id in hosts_id_list:
info = cluster_db.get_cluster_host(user, cluster_id, host_id)
logging.debug("checking on info %r %r" % (host_id, info))
info[const.ROLES] = ActionHelper._get_role_names(info[const.ROLES])
# TODO(grace): Is following line necessary??
info.setdefault(const.ROLES, [])
config = cluster_db.get_cluster_host_config(user,
cluster_id,
host_id)
info.update(config)
networks = info[const.NETWORKS]
networks_dict = {}
# Convert networks from list to dictionary format
for entry in networks:
nic_info = {}
nic_info = {
entry[const.NIC]: {
const.IP_ADDR: entry[const.IP_ADDR],
const.NETMASK: entry[const.NETMASK],
const.MGMT_NIC_FLAG: entry[const.MGMT_NIC_FLAG],
const.PROMISCUOUS_FLAG: entry[const.PROMISCUOUS_FLAG],
const.SUBNET: entry[const.SUBNET]
}
}
networks_dict.update(nic_info)
info[const.NETWORKS] = networks_dict
hosts_info[host_id] = info
return hosts_info
@staticmethod
def save_deployed_config(deployed_config, user):
cluster_config = deployed_config[const.CLUSTER]
cluster_id = cluster_config[const.ID]
del cluster_config[const.ID]
cluster_db.update_cluster_deployed_config(user, cluster_id,
**cluster_config)
hosts_id_list = deployed_config[const.HOSTS].keys()
for host_id in hosts_id_list:
config = deployed_config[const.HOSTS][host_id]
cluster_db.update_cluster_host_deployed_config(user,
cluster_id,
host_id,
**config)
@staticmethod
def update_state(cluster_id, host_id_list, user):
# update all clusterhosts state
for host_id in host_id_list:
cluster_db.update_cluster_host_state(user, cluster_id, host_id,
state='INSTALLING')
# update cluster state
cluster_db.update_cluster_state(user, cluster_id, state='INSTALLING')
@staticmethod
def get_machine_IPMI(machine_id, user):
machine_info = machine_db.get_machine(user, machine_id)
return machine_info[const.IPMI_CREDS]

View File

@ -21,8 +21,11 @@ import redis
from contextlib import contextmanager
from compass.db.api import database
from compass.db import models
from compass.db.api import adapter_holder as adapter_db
from compass.db.api import cluster as cluster_db
from compass.db.api import host as host_db
from compass.db.api import machine as machine_db
from compass.deployment.utils import constants as const
@contextmanager
@ -53,3 +56,223 @@ def lock(lock_name, blocking=True, timeout=10):
logging.debug('released lock %s', lock_name)
else:
logging.debug('nothing to release %s', lock_name)
class ActionHelper(object):
@staticmethod
def get_adapter_info(adapter_id, cluster_id, user):
"""Get adapter information. Return a dictionary as below,
{
"id": 1,
"name": "xxx",
"flavors": [
{
"flavor_name": "xxx",
"roles": ['xxx', 'yyy', ...],
"template": "xxx.tmpl"
},
...
],
"metadata": {
"os_config": {
...
},
"package_config": {
...
}
},
"os_installer": {
"name": "cobbler",
"settings": {....}
},
"pk_installer": {
"name": "chef",
"settings": {....}
},
...
}
To view a complete output, please refer to backend doc.
"""
adapter_info = adapter_db.get_adapter(user, adapter_id)
metadata = cluster_db.get_cluster_metadata(user, cluster_id)
adapter_info.update({const.METADATA: metadata})
for flavor_info in adapter_info[const.FLAVORS]:
roles = flavor_info[const.ROLES]
flavor_info[const.ROLES] = ActionHelper._get_role_names(roles)
return adapter_info
@staticmethod
def _get_role_names(roles):
return [role[const.NAME] for role in roles]
@staticmethod
def get_cluster_info(cluster_id, user):
"""Get cluster information.Return a dictionary as below,
{
"id": 1,
"adapter_id": 1,
"os_version": "CentOS-6.5-x86_64",
"name": "cluster_01",
"flavor": {
"flavor_name": "zzz",
"template": "xx.tmpl",
"roles": [...]
}
"os_config": {..},
"package_config": {...},
"deployed_os_config": {},
"deployed_package_config": {},
"owner": "xxx"
}
"""
cluster_info = cluster_db.get_cluster(user, cluster_id)
# convert roles retrieved from db into a list of role names
roles_info = cluster_info.setdefault(
const.FLAVOR, {}).setdefault(const.ROLES, [])
cluster_info[const.FLAVOR][const.ROLES] = \
ActionHelper._get_role_names(roles_info)
# get cluster config info
cluster_config = cluster_db.get_cluster_config(user, cluster_id)
cluster_info.update(cluster_config)
deploy_config = cluster_db.get_cluster_deployed_config(user,
cluster_id)
cluster_info.update(deploy_config)
return cluster_info
@staticmethod
def get_hosts_info(cluster_id, hosts_id_list, user):
"""Get hosts information. Return a dictionary as below,
{
"hosts": {
1($host_id): {
"reinstall_os": True,
"mac": "xxx",
"name": "xxx",
"roles": [xxx, yyy]
},
"networks": {
"eth0": {
"ip": "192.168.1.1",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "192.168.1.0/24"
},
"eth1": {...}
},
"os_config": {},
"package_config": {},
"deployed_os_config": {},
"deployed_package_config": {}
},
2: {...},
....
}
}
"""
hosts_info = {}
for host_id in hosts_id_list:
info = cluster_db.get_cluster_host(user, cluster_id, host_id)
logging.debug("checking on info %r %r" % (host_id, info))
info[const.ROLES] = ActionHelper._get_role_names(info[const.ROLES])
# TODO(grace): Is following line necessary??
info.setdefault(const.ROLES, [])
config = cluster_db.get_cluster_host_config(user,
cluster_id,
host_id)
info.update(config)
networks = info[const.NETWORKS]
networks_dict = {}
# Convert networks from list to dictionary format
for entry in networks:
nic_info = {}
nic_info = {
entry[const.NIC]: {
const.IP_ADDR: entry[const.IP_ADDR],
const.NETMASK: entry[const.NETMASK],
const.MGMT_NIC_FLAG: entry[const.MGMT_NIC_FLAG],
const.PROMISCUOUS_FLAG: entry[const.PROMISCUOUS_FLAG],
const.SUBNET: entry[const.SUBNET]
}
}
networks_dict.update(nic_info)
info[const.NETWORKS] = networks_dict
hosts_info[host_id] = info
return hosts_info
@staticmethod
def save_deployed_config(deployed_config, user):
cluster_config = deployed_config[const.CLUSTER]
cluster_id = cluster_config[const.ID]
del cluster_config[const.ID]
cluster_db.update_cluster_deployed_config(user, cluster_id,
**cluster_config)
hosts_id_list = deployed_config[const.HOSTS].keys()
for host_id in hosts_id_list:
config = deployed_config[const.HOSTS][host_id]
cluster_db.update_cluster_host_deployed_config(user,
cluster_id,
host_id,
**config)
@staticmethod
def update_state(cluster_id, host_id_list, user):
# update all clusterhosts state
for host_id in host_id_list:
cluster_db.update_cluster_host_state(user, cluster_id, host_id,
state='INSTALLING')
# update cluster state
cluster_db.update_cluster_state(user, cluster_id, state='INSTALLING')
@staticmethod
def delete_cluster(
cluster_id, host_id_list, user, delete_underlying_host=False
):
if delete_underlying_host:
for host_id in host_id_list:
host_db.del_host_from_database(
user, host_id
)
cluster_db.del_cluster_from_database(
user, cluster_id
)
@staticmethod
def delete_cluster_host(
cluster_id, host_id, user, delete_underlying_host=False
):
if delete_underlying_host:
host_db.del_host_from_database(
user, host_id
)
cluster_db.del_cluster_host_from_database(
user, cluster_id, host_id
)
@staticmethod
def delete_host(host_id, user):
host_db.del_host_from_database(
user, host_id
)
@staticmethod
def get_machine_IPMI(machine_id, user):
machine_info = machine_db.get_machine(user, machine_id)
return machine_info[const.IPMI_CREDS]

View File

@ -1346,7 +1346,7 @@ def delete_cluster(cluster_id):
"""Delete cluster."""
data = _get_request_data()
return utils.make_json_response(
200,
202,
cluster_api.del_cluster(
current_user, cluster_id, **data
)
@ -1613,7 +1613,7 @@ def delete_cluster_host(cluster_id, host_id):
"""Delete cluster host."""
data = _get_request_data()
return utils.make_json_response(
200,
202,
cluster_api.del_cluster_host(
current_user, cluster_id, host_id, **data
)
@ -1630,7 +1630,7 @@ def delete_clusterhost(clusterhost_id):
"""Delete cluster host."""
data = _get_request_data()
return utils.make_json_response(
200,
202,
cluster_api.del_clusterhost(
current_user, clusterhost_id, **data
)
@ -1918,7 +1918,7 @@ def delete_host(host_id):
"""Delete host."""
data = _get_request_data()
return utils.make_json_response(
200,
202,
host_api.del_host(
current_user, host_id, **data
)

View File

@ -292,9 +292,14 @@ def update_cluster(session, updater, cluster_id, **kwargs):
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEL_CLUSTER
)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.wrap_to_dict(
['status', 'cluster', 'hosts'],
cluster=RESP_FIELDS,
hosts=RESP_CLUSTERHOST_FIELDS
)
def del_cluster(session, deleter, cluster_id, **kwargs):
"""Delete a cluster."""
from compass.tasks import client as celery_client
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
@ -302,6 +307,32 @@ def del_cluster(session, deleter, cluster_id, **kwargs):
session, cluster, deleter,
reinstall_distributed_system_set=True
)
clusterhosts = []
for clusterhost in cluster.clusterhosts:
clusterhosts.append(clusterhost)
celery_client.celery.send_task(
'compass.tasks.delete_cluster',
(
deleter.email, cluster_id,
[clusterhost.host_id for clusterhost in clusterhosts]
)
)
return {
'status': 'delete action sent',
'cluster': cluster,
'hosts': clusterhosts
}
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEL_CLUSTER
)
def del_cluster_from_database(session, deleter, cluster_id):
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
return utils.del_db_object(session, cluster)
@ -518,10 +549,13 @@ def add_clusterhost_internal(
session, models.Host, False, id=machine_id
)
if host:
if host_api.is_host_editable(
session, host, cluster.creator,
reinstall_os_set=kwargs.get('reinstall_os', False),
exception_when_not_editable=False
if (
host_dict and
host_api.is_host_editable(
session, host, cluster.creator,
reinstall_os_set=kwargs.get('reinstall_os', False),
exception_when_not_editable=False
)
):
if 'name' in host_dict:
hostname = host_dict['name']
@ -688,6 +722,38 @@ def add_cluster_host(
)
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
def _update_clusterhost(session, updater, clusterhost, **kwargs):
clusterhost_dict = {}
host_dict = {}
for key, value in kwargs.items():
if key in UPDATED_HOST_FIELDS:
host_dict[key] = value
else:
clusterhost_dict[key] = value
if host_dict:
from compass.db.api import host as host_api
host = clusterhost.host
if host_api.is_host_editable(
session, host, clusterhost.cluster.creator,
reinstall_os_set=kwargs.get('reinstall_os', False),
exception_when_not_editable=False
):
if 'name' in host_dict:
hostname = host_dict['name']
host_by_name = utils.get_db_object(
session, models.Host, False, name=hostname
)
if host_by_name and host_by_name.id != host.id:
raise exception.InvalidParameter(
'host name %s exists in host %s' % (
hostname, host_by_name.id
)
)
utils.update_db_object(
session, host,
**host_dict
)
def roles_validates(roles):
cluster_roles = []
cluster = clusterhost.cluster
@ -728,7 +794,7 @@ def _update_clusterhost(session, updater, clusterhost, **kwargs):
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_FIELDS,
optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@ -744,7 +810,7 @@ def update_cluster_host(
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_FIELDS,
optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@ -802,9 +868,10 @@ def patch_clusterhost(
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEL_CLUSTER_HOST
)
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
@utils.wrap_to_dict(['status', 'host'], host=RESP_CLUSTERHOST_FIELDS)
def del_cluster_host(session, deleter, cluster_id, host_id, **kwargs):
"""Delete cluster host."""
from compass.tasks import client as celery_client
clusterhost = utils.get_db_object(
session, models.ClusterHost,
cluster_id=cluster_id, host_id=host_id
@ -813,9 +880,27 @@ def del_cluster_host(session, deleter, cluster_id, host_id, **kwargs):
session, clusterhost.cluster, deleter,
reinstall_distributed_system_set=True
)
return utils.del_db_object(
session, clusterhost
celery_client.celery.send_task(
'compass.tasks.delete_cluster_host',
(
deleter.email, cluster_id, host_id
)
)
return {
'status': 'delete action sent',
'host': clusterhost,
}
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEL_CLUSTER_HOST
)
def del_cluster_host_from_database(session, deleter, cluster_id, host_id):
clusterhost = utils.get_db_object(
session, models.ClusterHost, id=cluster_id, host_id=host_id
)
return utils.del_db_object(session, clusterhost)
@utils.supported_filters([])
@ -823,9 +908,10 @@ def del_cluster_host(session, deleter, cluster_id, host_id, **kwargs):
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEL_CLUSTER_HOST
)
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
@utils.wrap_to_dict(['status', 'host'], host=RESP_CLUSTERHOST_FIELDS)
def del_clusterhost(session, deleter, clusterhost_id, **kwargs):
"""Delete cluster host."""
from compass.tasks import client as celery_client
clusterhost = utils.get_db_object(
session, models.ClusterHost,
clusterhost_id=clusterhost_id
@ -834,9 +920,27 @@ def del_clusterhost(session, deleter, clusterhost_id, **kwargs):
session, clusterhost.cluster, deleter,
reinstall_distributed_system_set=True
)
return utils.del_db_object(
session, clusterhost
celery_client.celery.send_task(
'compass.tasks.delete_cluster_host',
(
deleter.email, clusterhost.cluster_id, clusterhost.host_id
)
)
return {
'status': 'delete action sent',
'host': clusterhost,
}
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEL_CLUSTER_HOST
)
def del_clusterhost_from_database(session, deleter, clusterhost_id):
clusterhost = utils.get_db_object(
session, models.ClusterHost, clusterhost_id=clusterhost_id
)
return utils.del_db_object(session, clusterhost)
@utils.supported_filters([])

View File

@ -317,9 +317,11 @@ def update_hosts(session, updater, data=[]):
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEL_HOST
)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.wrap_to_dict(['status', 'host'], host=RESP_FIELDS)
def del_host(session, deleter, host_id, **kwargs):
"""Delete a host."""
from compass.db.api import cluster as cluster_api
from compass.tasks import client as celery_client
host = utils.get_db_object(
session, models.Host, id=host_id
)
@ -327,6 +329,34 @@ def del_host(session, deleter, host_id, **kwargs):
session, host, deleter,
reinstall_os_set=True
)
cluster_ids = []
for clusterhost in host.clusterhosts:
cluster_api.is_cluster_editable(
session, clusterhost.cluster, deleter,
reinstall_distributed_system_set=True
)
cluster_ids.append(clusterhost.cluster_id)
celery_client.celery.send_task(
'compass.tasks.delete_host',
(
deleter.email, host_id, cluster_ids
)
)
return {
'status': 'delete action sent',
'host': host,
}
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEL_HOST
)
def del_host_from_database(session, deleter, host_id):
host = utils.get_db_object(
session, models.Host, id=host_id
)
return utils.del_db_object(session, host)

View File

@ -128,6 +128,13 @@ def patch_machine(session, updater, machine_id, **kwargs):
def del_machine(session, deleter, machine_id, **kwargs):
"""Delete a machine."""
machine = utils.get_db_object(session, models.Machine, id=machine_id)
if machine.host:
host = machine.host
raise exception.NotAcceptable(
'machine %s has host %s on it' % (
machine.mac, host.name
)
)
return utils.del_db_object(session, machine)

View File

@ -301,17 +301,28 @@ def _validate_self(
metadata, whole_check,
**kwargs
):
logging.debug('validate config self %s', config_path)
if '_self' not in metadata:
if isinstance(config, dict):
_validate_config(
config_path, config, metadata, whole_check, **kwargs
)
return
field_type = metadata['_self'].get('field_type', 'basestring')
field_type = metadata['_self'].get('field_type', basestring)
if not isinstance(config, field_type):
raise exception.InvalidParameter(
'%s config type is not %s' % (config_path, field_type)
)
is_required = metadata['_self'].get(
'is_required', False
)
required_in_whole_config = metadata['_self'].get(
'required_in_whole_config', False
)
if isinstance(config, basestring):
if config == '' and not is_required and not required_in_whole_config:
# ignore empty config when it is optional
return
required_in_options = metadata['_self'].get(
'required_in_options', False
)
@ -333,6 +344,7 @@ def _validate_self(
'%s config is not in %s' % (config_path, options)
)
validator = metadata['_self'].get('validator', None)
logging.debug('validate by validator %s', validator)
if validator:
if not validator(config_key, config, **kwargs):
raise exception.InvalidParameter(
@ -348,6 +360,7 @@ def _validate_config(
config_path, config, metadata, whole_check,
**kwargs
):
logging.debug('validate config %s', config_path)
generals = {}
specified = {}
for key, value in metadata.items():

View File

@ -125,4 +125,18 @@ def del_subnet(session, deleter, subnet_id, **kwargs):
subnet = utils.get_db_object(
session, models.Subnet, id=subnet_id
)
if subnet.host_networks:
host_networks = [
'%s:%s=%s' % (
host_network.host.name, host_network.interface,
host_network.ip
)
for host_network in subnet.host_networks
]
raise exception.NotAcceptable(
'subnet %s contains host networks %s' % (
subnet.subnet, host_networks
)
)
return utils.del_db_object(session, subnet)

View File

@ -462,12 +462,12 @@ def get_db_object(session, table, exception_when_missing=True, **kwargs):
with session.begin(subtransactions=True):
logging.debug(
'session %s get db object %s from table %s',
session, kwargs, table.__name__)
id(session), kwargs, table.__name__)
db_object = model_filter(
model_query(session, table), table, **kwargs
).first()
logging.debug(
'session %s got db object %s', session, db_object
'session %s got db object %s', id(session), db_object
)
if db_object:
return db_object
@ -488,7 +488,7 @@ def add_db_object(session, table, exception_when_existing=True,
with session.begin(subtransactions=True):
logging.debug(
'session %s add object %s atributes %s to table %s',
session, args, kwargs, table.__name__)
id(session), args, kwargs, table.__name__)
argspec = inspect.getargspec(table.__init__)
arg_names = argspec.args[1:]
arg_defaults = argspec.defaults
@ -526,7 +526,7 @@ def add_db_object(session, table, exception_when_existing=True,
db_object.initialize()
db_object.validate()
logging.debug(
'session %s db object %s added', session, db_object
'session %s db object %s added', id(session), db_object
)
return db_object
@ -536,7 +536,7 @@ def list_db_objects(session, table, order_by=[], **filters):
with session.begin(subtransactions=True):
logging.debug(
'session %s list db objects by filters %s in table %s',
session, filters, table.__name__
id(session), filters, table.__name__
)
db_objects = model_order_by(
model_filter(
@ -549,7 +549,7 @@ def list_db_objects(session, table, order_by=[], **filters):
).all()
logging.debug(
'session %s got listed db objects: %s',
session, db_objects
id(session), db_objects
)
return db_objects
@ -559,7 +559,7 @@ def del_db_objects(session, table, **filters):
with session.begin(subtransactions=True):
logging.debug(
'session %s delete db objects by filters %s in table %s',
session, filters, table.__name__
id(session), filters, table.__name__
)
query = model_filter(
model_query(session, table), table, **filters
@ -567,7 +567,7 @@ def del_db_objects(session, table, **filters):
db_objects = query.all()
query.delete(synchronize_session=False)
logging.debug(
'session %s db objects %s deleted', session, db_objects
'session %s db objects %s deleted', id(session), db_objects
)
return db_objects
@ -577,7 +577,7 @@ def update_db_objects(session, table, **filters):
with session.begin(subtransactions=True):
logging.debug(
'session %s update db objects by filters %s in table %s',
session, filters, table.__name__)
id(session), filters, table.__name__)
db_objects = model_filter(
model_query(session, table), table, **filters
).all()
@ -587,7 +587,8 @@ def update_db_objects(session, table, **filters):
db_object.update()
db_object.validate()
logging.debug(
'session %s db objects %s updated', session, db_objects
'session %s db objects %s updated',
id(session), db_objects
)
return db_objects
@ -597,7 +598,7 @@ def update_db_object(session, db_object, **kwargs):
with session.begin(subtransactions=True):
logging.debug(
'session %s update db object %s by value %s',
session, db_object, kwargs
id(session), db_object, kwargs
)
for key, value in kwargs.items():
setattr(db_object, key, value)
@ -605,7 +606,8 @@ def update_db_object(session, db_object, **kwargs):
db_object.update()
db_object.validate()
logging.debug(
'session %s db object %s updated', session, db_object
'session %s db object %s updated',
id(session), db_object
)
return db_object
@ -615,12 +617,12 @@ def del_db_object(session, db_object):
with session.begin(subtransactions=True):
logging.debug(
'session %s delete db object %s',
session, db_object
id(session), db_object
)
session.delete(db_object)
logging.debug(
'session %s db object %s deleted',
session, db_object
id(session), db_object
)
return db_object

View File

@ -486,6 +486,9 @@ class HostNetwork(BASE, TimestampMixin, HelperMixin):
self.interface = interface
super(HostNetwork, self).__init__(**kwargs)
def __str__(self):
return 'HostNetwork[%s=%s]' % (self.interface, self.ip)
@property
def ip(self):
return str(netaddr.IPAddress(self.ip_int))
@ -558,6 +561,11 @@ class ClusterHostLogHistory(BASE, LogHistoryMixin):
self.filename = filename
super(ClusterHostLogHistory, self).__init__(**kwargs)
def __str__(self):
return 'ClusterHostLogHistory[%s:%s]' % (
self.clusterhost_id, self.filename
)
def initialize(self):
self.cluster_id = self.clusterhost.cluster_id
self.host_id = self.clusterhost.host_id
@ -580,6 +588,9 @@ class HostLogHistory(BASE, LogHistoryMixin):
self.filename = filename
super(HostLogHistory, self).__init__(**kwargs)
def __str__(self):
return 'HostLogHistory[%s:%s]' % (self.id, self.filename)
class ClusterHostState(BASE, StateMixin):
"""ClusterHost state table."""
@ -594,6 +605,11 @@ class ClusterHostState(BASE, StateMixin):
primary_key=True
)
def __str__(self):
return 'ClusterHostState[%s state %s percentage %s]' % (
self.id, self.state, self.percentage
)
def update(self):
super(ClusterHostState, self).update()
host_state = self.clusterhost.host.state
@ -655,6 +671,9 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
self.state = ClusterHostState()
super(ClusterHost, self).__init__(**kwargs)
def __str__(self):
return 'ClusterHost[%s:%s]' % (self.clusterhost_id, self.name)
def update(self):
if self.host.reinstall_os:
if self.state in ['SUCCESSFUL', 'ERROR']:
@ -662,7 +681,8 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
self.state.state = 'INITIALIZED'
else:
self.state.state = 'UNINITIALIZED'
self.state.update()
self.state.update()
super(ClusterHost, self).update()
@property
def name(self):
@ -866,6 +886,11 @@ class HostState(BASE, StateMixin):
primary_key=True
)
def __str__(self):
return 'HostState[%s state %s percentage %s]' % (
self.id, self.state, self.percentage
)
def update(self):
super(HostState, self).update()
host = self.host
@ -944,6 +969,9 @@ class Host(BASE, TimestampMixin, HelperMixin):
backref=backref('host')
)
def __str__(self):
return 'Host[%s:%s]' % (self.id, self.name)
@hybrid_property
def mac(self):
machine = self.machine
@ -1004,6 +1032,7 @@ class Host(BASE, TimestampMixin, HelperMixin):
self.os_name = os.name
else:
self.os_name = None
self.state.update()
super(Host, self).update()
def validate(self):
@ -1089,6 +1118,11 @@ class ClusterState(BASE, StateMixin):
default=0
)
def __str__(self):
return 'ClusterState[%s state %s percentage %s]' % (
self.id, self.state, self.percentage
)
def to_dict(self):
dict_info = super(ClusterState, self).to_dict()
dict_info['status'] = {
@ -1199,8 +1233,8 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
self.state = ClusterState()
super(Cluster, self).__init__(**kwargs)
def initialize(self):
super(Cluster, self).initialize()
def __str__(self):
return 'Cluster[%s:%s]' % (self.id, self.name)
def update(self):
creator = self.creator
@ -1233,6 +1267,7 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
self.flavor_name = flavor.name
else:
self.flavor_name = None
self.state.update()
super(Cluster, self).update()
def validate(self):
@ -1375,6 +1410,9 @@ class UserPermission(BASE, HelperMixin, TimestampMixin):
self.user_id = user_id
self.permission_id = permission_id
def __str__(self):
return 'UserPermission[%s:%s]' % (self.id, self.name)
@hybrid_property
def name(self):
return self.permission.name
@ -1404,6 +1442,9 @@ class Permission(BASE, HelperMixin, TimestampMixin):
self.name = name
super(Permission, self).__init__(**kwargs)
def __str__(self):
return 'Permission[%s:%s]' % (self.id, self.name)
class UserToken(BASE, HelperMixin):
"""user token table."""
@ -1497,6 +1538,9 @@ class User(BASE, HelperMixin, TimestampMixin):
self.email = email
super(User, self).__init__(**kwargs)
def __str__(self):
return 'User[%s]' % self.email
def validate(self):
super(User, self).validate()
if not self.crypted_password:
@ -1528,12 +1572,6 @@ class User(BASE, HelperMixin, TimestampMixin):
]
return dict_info
def __str__(self):
return '%s[email:%s,is_admin:%s,active:%s]' % (
self.__class__.__name__,
self.email, self.is_admin, self.active
)
class SwitchMachine(BASE, HelperMixin, TimestampMixin):
"""Switch Machine table."""
@ -1560,6 +1598,11 @@ class SwitchMachine(BASE, HelperMixin, TimestampMixin):
self.machine_id = machine_id
super(SwitchMachine, self).__init__(**kwargs)
def __str__(self):
return 'SwitchMachine[%s port %s]' % (
self.switch_machine_id, self.port
)
def validate(self):
super(SwitchMachine, self).validate()
if not self.switch:
@ -1709,6 +1752,9 @@ class Machine(BASE, HelperMixin, TimestampMixin):
self.mac = mac
super(Machine, self).__init__(**kwargs)
def __str__(self):
return 'Machine[%s:%s]' % (self.id, self.mac)
def validate(self):
super(Machine, self).validate()
try:
@ -1789,6 +1835,9 @@ class Switch(BASE, HelperMixin, TimestampMixin):
backref=backref('switch')
)
def __str__(self):
return 'Switch[%s:%s]' % (self.id, self.ip)
@classmethod
def parse_filters(cls, filters):
if isinstance(filters, basestring):
@ -2094,6 +2143,9 @@ class OperatingSystem(BASE, HelperMixin):
self.name = name
super(OperatingSystem, self).__init__()
def __str__(self):
return 'OperatingSystem[%s:%s]' % (self.id, self.name)
@property
def root_metadatas(self):
return [
@ -2199,6 +2251,9 @@ class AdapterFlavor(BASE, HelperMixin):
UniqueConstraint('name', 'adapter_id', name='constraint'),
)
def __str__(self):
return 'AdapterFlavor[%s:%s]' % (self.id, self.name)
@property
def ordered_flavor_roles(self):
flavor_roles = dict([
@ -2285,6 +2340,9 @@ class AdapterRole(BASE, HelperMixin):
self.adapter_id = adapter_id
super(AdapterRole, self).__init__(**kwargs)
def __str__(self):
return 'AdapterRole[%s:%s]' % (self.id, self.name)
def initialize(self):
if not self.description:
self.description = self.name
@ -2592,6 +2650,9 @@ class DistributedSystem(BASE, HelperMixin):
self.name = name
super(DistributedSystem, self).__init__()
def __str__(self):
return 'DistributedSystem[%s:%s]' % (self.id, self.name)
class OSInstaller(BASE, InstallerMixin):
"""OS installer table."""
@ -2612,6 +2673,9 @@ class OSInstaller(BASE, InstallerMixin):
self.alias = alias
super(OSInstaller, self).__init__(**kwargs)
def __str__(self):
return 'OSInstaller[%s:%s]' % (self.id, self.alias)
class PackageInstaller(BASE, InstallerMixin):
"""package installer table."""
@ -2628,6 +2692,9 @@ class PackageInstaller(BASE, InstallerMixin):
self.alias = alias
super(PackageInstaller, self).__init__(**kwargs)
def __str__(self):
return 'PackageInstaller[%s:%s]' % (self.id, self.alias)
class Subnet(BASE, TimestampMixin, HelperMixin):
"""network table."""
@ -2648,6 +2715,9 @@ class Subnet(BASE, TimestampMixin, HelperMixin):
self.subnet = subnet
super(Subnet, self).__init__(**kwargs)
def __str__(self):
return 'Subnet[%s:%s]' % (self.id, self.subnet)
def to_dict(self):
dict_info = super(Subnet, self).to_dict()
if not self.name:

View File

@ -13,6 +13,7 @@
# limitations under the License.
"""Validator methods."""
import logging
import netaddr
import re
import socket
@ -23,87 +24,162 @@ from compass.utils import util
def is_valid_ip(name, ip_addr, **kwargs):
"""Valid the format of an IP address."""
if isinstance(ip_addr, list):
return all([
is_valid_ip(name, item, **kwargs) for item in ip_addr
])
try:
netaddr.IPAddress(ip_addr)
except Exception:
logging.debug('%s invalid ip addr %s', name, ip_addr)
return False
return True
def is_valid_network(name, ip_network, **kwargs):
"""Valid the format of an Ip network."""
if isinstance(ip_network, list):
return all([
is_valid_network(name, item, **kwargs) for item in ip_network
])
try:
netaddr.IPNetwork(ip_network)
except Exception:
logging.debug('%s invalid network %s', name, ip_network)
return False
return False
return True
def is_valid_netmask(name, ip_addr, **kwargs):
"""Valid the format of a netmask."""
if isinstance(ip_addr, list):
return all([
is_valid_netmask(name, item, **kwargs) for item in ip_addr
])
if not is_valid_ip(ip_addr):
return False
ip = netaddr.IPAddress(ip_addr)
if ip.is_netmask():
return True
else:
return False
logging.debug('%s invalid netmask %s', name, ip_addr)
return False
def is_valid_gateway(name, ip_addr, **kwargs):
"""Valid the format of gateway."""
if isinstance(ip_addr, list):
return all([
is_valid_gateway(name, item, **kwargs) for item in ip_addr
])
if not is_valid_ip(ip_addr):
return False
ip = netaddr.IPAddress(ip_addr)
if ip.is_private() or ip.is_public():
return True
else:
return False
logging.debug('%s invalid gateway %s', name, ip_addr)
return False
def is_valid_dns(name, dns, **kwargs):
"""Valid the format of DNS."""
if isinstance(dns, list):
return all([is_valid_dns(name, item, **kwargs) for item in dns])
if is_valid_ip(dns):
return True
try:
socket.gethostbyname_ex(dns)
except Exception:
logging.debug('%s invalid dns name %s', name, dns)
return False
return True
def is_valid_url(name, url, **kwargs):
"""Valid the format of url."""
if isinstance(url, list):
return all([
is_valid_url(name, item, **kwargs) for item in url
])
if re.match(
r'^(http|https|ftp)://([0-9A-Za-z_-]+)(\.[0-9a-zA-Z_-]+)*'
r'(:\d+)?(/[0-9a-zA-Z_-]+)*$',
url
):
return True
logging.debug(
'%s invalid url %s', name, url
)
return False
def is_valid_domain(name, domain, **kwargs):
"""Validate the format of domain."""
if isinstance(domain, list):
return all([
is_valid_domain(name, item, **kwargs) for item in domain
])
if re.match(
r'^([0-9a-zA-Z_-]+)(\.[0-9a-zA-Z_-]+)*$',
domain
):
return True
logging.debug(
'%s invalid domain %s', name, domain
)
return False
def is_valid_username(name, username, **kwargs):
"""Valid the format of username."""
return bool(username)
if bool(username):
return True
logging.debug(
'%s username is empty', name
)
def is_valid_password(name, password, **kwargs):
"""Valid the format of password."""
return bool(password)
if bool(password):
return True
logging.debug('%s password is empty', name)
return False
def is_valid_partition(name, partition, **kwargs):
"""Valid the format of partition name."""
if name != 'swap' and not name.startswith('/'):
logging.debug(
'%s is not started with / or swap', name
)
return False
if 'size' not in partition and 'percentage' not in partition:
logging.debug(
'%s partition does not contain sie or percentage',
name
)
return False
return True
def is_valid_percentage(name, percentage, **kwargs):
"""Valid the percentage."""
return 0 <= percentage <= 100
if 0 <= percentage <= 100:
return True
logging.debug('%s invalid percentage %s', name, percentage)
def is_valid_port(name, port, **kwargs):
"""Valid the format of port."""
return 0 < port < 65536
if 0 < port < 65536:
return True
logging.debug('%s invalid port %s', name, port)
def is_valid_size(name, size, **kwargs):
if re.match(r'(\d+)(K|M|G|T)?', size):
if re.match(r'^(\d+)(K|M|G|T)$', size):
return True
logging.debug('%s invalid size %s', name, size)
return False

View File

@ -140,13 +140,13 @@ class DeployManager(object):
self.redeploy_os()
self.redeploy_target_system()
def remove_hosts(self, package_only=False):
def remove_hosts(self, package_only=False, delete_cluster=False):
"""Remove hosts from both OS and/or package installlers server side."""
if self.os_installer and not package_only:
self.os_installer.delete_hosts()
if self.pk_installer:
self.pk_installer.delete_hosts()
self.pk_installer.delete_hosts(delete_cluster=delete_cluster)
def _get_hosts_for_os_installation(self, hosts_info):
"""Get info of hosts which need to install/reinstall OS."""

View File

@ -302,8 +302,10 @@ class CobblerInstaller(OSInstaller):
def delete_hosts(self):
hosts_id_list = self.config_manager.get_host_id_list()
logging.debug('delete hosts %s', hosts_id_list)
for host_id in hosts_id_list:
self.delete_single_host(host_id)
self._sync()
def delete_single_host(self, host_id):
"""Delete the host from cobbler server and clean up the installation
@ -311,11 +313,12 @@ class CobblerInstaller(OSInstaller):
"""
hostname = self.config_manager.get_hostname(host_id)
try:
log_dir_prefix = compass_setting.INSTALLATION_LOGDIR[self.NAME]
log_dir_prefix = compass_setting.INSTALLATION_LOGDIR[NAME]
self._clean_system(hostname)
self._clean_log(log_dir_prefix, hostname)
except Exception as ex:
logging.info("Deleting host got exception: %s", ex.message)
logging.error("Deleting host got exception: %s", ex)
logging.exception(ex)
def _get_host_tmpl_vars_dict(self, host_id, global_vars_dict, **kwargs):
"""Generate template variables dictionary.

View File

@ -135,10 +135,20 @@ class ChefInstaller(PKInstaller):
return node
def delete_hosts(self):
def delete_hosts(self, delete_cluster=False):
hosts_id_list = self.config_manager.get_host_id_list()
for host_id in hosts_id_list:
self.delete_node(host_id)
if delete_cluster:
self.delete_environment()
def delete_environment(self):
adapter_name = self.config_manager.get_adapter_name()
cluster_name = self.config_manager.get_clustername()
env_name = self.get_env_name(adapter_name, cluster_name)
env = self.get_create_environment(env_name)
if env:
self._delete_environment(env)
def delete_node(self, host_id):
fullname = self.config_manager.get_host_fullname(host_id)
@ -146,6 +156,20 @@ class ChefInstaller(PKInstaller):
if node:
self._delete_node(node)
def _delete_environment(self, env):
"""clean env attributes about arget system."""
import chef
if env is None:
raise Exception("env is None, cannot delete a bnone env.")
env_name = env.name
try:
env.delete()
except Exception as error:
logging.debug(
'failed to delete env %s, error: %s',
env_name, error
)
def _delete_node(self, node):
"""clean node attributes about target system."""
import chef
@ -525,7 +549,7 @@ class ChefInstaller(PKInstaller):
def _clean_log(self, log_dir_prefix, node_name):
log_dir = os.path.join(log_dir_prefix, node_name)
shutil.rmtree(log_dir, False)
shutil.rmtree(log_dir, True)
def get_supported_dist_systems(self):
"""get target systems from chef. All target_systems for compass will

View File

@ -101,11 +101,12 @@ class HDManager(object):
:return a tuple (vendor, switch_state, error)
"""
switch_lists = util.load_configs(setting.SWITCH_LIST_DIR)
switch_lists = util.load_configs(setting.MACHINE_LIST_DIR)
switch_list = []
for item in switch_lists:
if item and 'SWITCH_LIST' in item and item['SWITCH_LIST']:
switch_list.extend(item['SWITCH_LIST'])
for items in switch_lists:
for item in items['MACHINE_LIST']:
for k, v in item.items():
switch_list.append(k)
if host in switch_list:
return ("appliance", "Found", "")

View File

@ -27,7 +27,9 @@ class Mac(base.BaseSnmpMacPlugin):
"""Processes MAC address."""
def __init__(self, host, credential):
return
self.host = host
#self.credential = credential
#return
def scan(self):
"""Implemnets the scan method in BasePlugin class.
@ -36,8 +38,11 @@ class Mac(base.BaseSnmpMacPlugin):
Dummy scan function for compass appliance.
Returns fixed mac addresses.
"""
mac_lists = util.load_configs(setting.MAC_LIST_DIR)
mac_list = None
for item in mac_lists:
mac_list = item['MAC_LIST']
machine_lists = util.load_configs(setting.MACHINE_LIST_DIR)
for items in machine_lists:
for item in items['MACHINE_LIST']:
for k, v in item.items():
if k == self.host:
mac_list = v
return mac_list

View File

@ -21,6 +21,8 @@ import logging
from celery.signals import celeryd_init
from celery.signals import setup_logging
from compass.actions import clean
from compass.actions import delete
from compass.actions import deploy
from compass.actions import poll_switch
from compass.actions import update_progress
@ -82,8 +84,10 @@ def pollswitch(
def deploy_cluster(deployer_email, cluster_id, clusterhost_ids):
"""Deploy the given cluster.
:param cluster_hosts: the cluster and hosts of each cluster to deploy.
:type cluster_hosts: dict of int to list of int
:param cluster_id: id of the cluster
:type cluster_id: int
:param clusterhost_ids: the id of the hosts in the cluster
:type clusterhost_ids: list of int
"""
try:
deploy.deploy(cluster_id, clusterhost_ids, deployer_email)
@ -95,10 +99,100 @@ def deploy_cluster(deployer_email, cluster_id, clusterhost_ids):
def reinstall_cluster(installer_email, cluster_id, clusterhost_ids):
"""reinstall the given cluster.
:param cluster_hosts: the cluster and hosts of each cluster to reinstall.
:type cluster_hosts: dict of int to list of int
:param cluster_id: id of the cluster
:type cluster_id: int
:param clusterhost_ids: the id of the hosts in the cluster
:type clusterhost_ids: list of int
"""
pass
try:
deploy.redeploy(cluster_id, clusterhost_ids, installer_email)
except Exception as error:
logging.exception(error)
@celery.task(name='compass.tasks.delete_cluster')
def delete_cluster(
deleter_email, cluster_id, clusterhost_ids,
delete_underlying_host=False
):
"""Delete the given cluster.
:param cluster_id: id of the cluster
:type cluster_id: int
:param clusterhost_ids: the id of the hosts in the cluster
:type clusterhost_ids: list of int
"""
try:
delete.delete_cluster(
cluster_id, clusterhost_ids, deleter_email,
delete_underlying_host=delete_underlying_host
)
except Exception as error:
logging.exception(error)
@celery.task(name='compass.tasks.delete_cluster_host')
def delete_cluster_host(
deleter_email, cluster_id, host_id,
delete_underlying_host=False
):
"""Delte the given cluster host.
:param cluster_id: id of the cluster
:type cluster_id: int
:param host_id: id of the host
:type host_id: int
"""
try:
delete.delete_cluster_host(
cluster_id, host_id, deleter_email,
delete_underlying_host=delete_underlying_host
)
except Exception as error:
logging.exception(error)
@celery.task(name='compass.tasks.delete_host')
def delete_host(deleter_email, host_id, cluster_ids):
"""Delete the given host.
:param host_id: id of the host
:type host_id: int
:param cluster_ids: list of cluster id
:type cluster_ids: list of int
"""
try:
delete.delete_host(
host_id, deleter_email, cluster_ids
)
except Exception as error:
logging.exception(error)
@celery.task(name='compass.tasks.clean_os_installer')
def clean_os_installer(
os_installer_name, os_installer_settings
):
"""Clean os installer."""
try:
clean.clean_os_installer(
os_installer_name, os_installer_settings
)
except Exception as error:
logging.excception(error)
@celery.task(name='compass.tasks.clean_package_installer')
def clean_package_installer(
package_installer_name, package_installer_settings
):
"""Clean package installer."""
try:
clean.clean_package_installer(
package_installer_name, package_installer_settings
)
except Exception as error:
logging.excception(error)
@celery.task(name='compass.tasks.poweron_host')

View File

@ -72,6 +72,8 @@ class ApiTestCase(unittest2.TestCase):
self.test_client = application.test_client()
celery.current_app.send_task = mock.Mock()
from compass.tasks import client as celery_client
celery_client.celery.send_task = mock.Mock()
url = '/users/token'
data = self.USER_CREDENTIALS
request_data = json.dumps(data)
@ -385,7 +387,7 @@ class TestClusterAPI(ApiTestCase):
# delete a cluster sucessfully
url = '/clusters/1'
return_value = self.delete(url)
self.assertEqual(return_value.status_code, 200)
self.assertEqual(return_value.status_code, 202)
def test_list_cluster_hosts(self):
# list cluster_hosts successfully
@ -451,7 +453,7 @@ class TestClusterAPI(ApiTestCase):
# delete a cluster_host successfully
url = '/clusters/1/hosts/1'
return_value = self.delete(url)
self.assertEqual(return_value.status_code, 200)
self.assertEqual(return_value.status_code, 202)
# give a non-existed cluster_id
url = '/clusters/99/hosts/1'
@ -862,7 +864,7 @@ class TestHostAPI(ApiTestCase):
# delete a host successfully
url = '/hosts/2'
return_value = self.delete(url)
self.assertEqual(return_value.status_code, 200)
self.assertEqual(return_value.status_code, 202)
# give a non-existed id
url = '/hosts/99'

View File

@ -383,15 +383,13 @@ class TestDelCluster(ClusterTestCase):
super(TestDelCluster, self).setUp()
def test_del_cluster(self):
cluster.del_cluster(
from compass.tasks import client as celery_client
celery_client.celery.send_task = mock.Mock()
del_cluster = cluster.del_cluster(
self.user_object,
self.cluster_id
)
del_clusters = cluster.list_clusters(self.user_object)
cluster_ids = []
for del_cluster in del_clusters:
cluster_ids.append(del_cluster['id'])
self.assertNotIn(self.cluster_id, cluster_ids)
self.assertIsNotNone(del_cluster['status'])
def test_is_cluster_editable(self):
#state is INSTALLING
@ -907,19 +905,14 @@ class TestDelClusterHost(ClusterTestCase):
super(TestDelClusterHost, self).tearDown()
def test_del_cluster_host(self):
cluster.del_cluster_host(
from compass.tasks import client as celery_client
celery_client.celery.send_task = mock.Mock()
del_clusterhost = cluster.del_cluster_host(
self.user_object,
self.cluster_id,
self.host_id[0]
)
del_cluster_host = cluster.list_cluster_hosts(
self.user_object,
self.cluster_id
)
result = []
for item in del_cluster_host:
result.append(item['hostname'])
self.assertNotIn('newname1', result)
self.assertIsNotNone(del_clusterhost)
def test_is_cluster_editable(self):
cluster.update_cluster_state(
@ -946,15 +939,13 @@ class TestDelClusterhost(ClusterTestCase):
super(TestDelClusterhost, self).tearDown()
def test_del_clusterhost(self):
cluster.del_clusterhost(
from compass.tasks import client as celery_client
celery_client.celery.send_task = mock.Mock()
del_clusterhost = cluster.del_clusterhost(
self.user_object,
self.clusterhost_id[0]
)
del_clusterhost = cluster.list_clusterhosts(self.user_object)
result = []
for item in del_clusterhost:
result.append(item['hostname'])
self.assertNotIn('newname1', result)
self.assertIsNotNone(del_clusterhost)
def test_is_cluster_editable(self):
cluster.update_cluster_state(

View File

@ -267,11 +267,11 @@ class TestListMachinesOrHosts(HostTestCase):
self.assertIn(item, ['newname1', 'newname2'])
def test_list_machines(self):
host.del_host(
host.del_host_from_database(
self.user_object,
self.host_ids[0]
)
host.del_host(
host.del_host_from_database(
self.user_object,
self.host_ids[1]
)
@ -327,7 +327,7 @@ class TestGetMachineOrHost(HostTestCase):
self.assertEqual(get_host['mac'], '28:6e:d4:46:c4:25')
def test_get_machine(self):
host.del_host(
host.del_host_from_database(
self.user_object,
self.host_ids[0]
)
@ -448,17 +448,13 @@ class TestDelHost(HostTestCase):
super(TestDelHost, self).tearDown()
def test_del_host(self):
host.del_host(
from compass.tasks import client as celery_client
celery_client.celery.send_task = mock.Mock()
del_host = host.del_host(
self.user_object,
self.host_ids[0]
)
del_host = host.list_hosts(
self.user_object
)
ids = []
for item in del_host:
ids.append(item['id'])
self.assertNotIn(self.host_ids[0], ids)
self.assertIsNotNone(del_host['status'])
def test_is_host_editable(self):
host.update_host_state(

View File

@ -116,11 +116,8 @@ CALLBACK_DIR = lazypy.delay(
TMPL_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'templates')
)
MAC_LIST_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'mac_list')
)
SWITCH_LIST_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'switch_list')
MACHINE_LIST_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'machine_list')
)
PROXY_URL_PREFIX = 'http://10.145.81.205:5000'

View File

@ -0,0 +1,46 @@
MACHINE_LIST = [
{
'127.0.0.1': [
{
'port': '200',
'mac': '80:fb:06:35:8c:85',
'vlan': 0,
},
{
'port': '201',
'mac': '70:7b:e8:75:71:dc',
'vlan': 0,
},
{
'port': '202',
'mac': '80:fb:06:35:8c:a0',
'vlan': 0,
},
{
'port': '203',
'mac': '70:7b:e8:75:71:d3',
'vlan': 0,
},
{
'port': '204',
'mac': '70:7b:e8:75:72:21',
'vlan': 0,
},
{
'port': '205',
'mac': '70:7b:e8:75:71:37',
'vlan': 0,
},
{
'port': '206',
'mac': '70:fb:e8:75:71:d6',
'vlan': 0,
},
{
'port': '207',
'mac': '70:7b:e8:75:71:d9',
'vlan': 0,
}
]
},
]

View File

@ -0,0 +1,2 @@
NAME = 'domain'
VALIDATOR = is_valid_domain

View File

@ -0,0 +1,3 @@
NAME = 'ip_list'
FIELD_TYPE = list
VALIDATOR = is_valid_ip

2
conf/os_field/url.conf Normal file
View File

@ -0,0 +1,2 @@
NAME = 'url'
VALIDATOR = is_valid_url

View File

@ -29,7 +29,7 @@ METADATA = {
},
'http_proxy': {
'_self': {
'field': 'general',
'field': 'url',
'default_callback': default_proxy,
'options_callback': proxy_options,
'mapping_to': 'http_proxy'
@ -37,7 +37,7 @@ METADATA = {
},
'https_proxy': {
'_self': {
'field': 'general',
'field': 'url',
'default_callback': default_proxy,
'options_callback': proxy_options,
'mapping_to': 'https_proxy'
@ -55,7 +55,7 @@ METADATA = {
'ntp_server': {
'_self': {
'is_required': True,
'field': 'general',
'field': 'ip',
'default_callback': default_ntp_server,
'options_callback': ntp_server_options,
'mapping_to': 'ntp_server'
@ -64,7 +64,7 @@ METADATA = {
'dns_servers': {
'_self': {
'is_required': True,
'field': 'general_list',
'field': 'ip_list',
'default_callback': default_dns_servers,
'options_callback': dns_servers_options,
'mapping_to': 'nameservers'
@ -72,7 +72,7 @@ METADATA = {
},
'domain': {
'_self': {
'field': 'general',
'field': 'domain',
'is_required' : True,
'default_callback': default_domain,
'options_callback': domain_options,
@ -96,7 +96,7 @@ METADATA = {
},
'local_repo': {
'_self': {
'field': 'general',
'field': 'url',
'default_callback': default_localrepo,
'mapping_to': 'local_repo'
}

View File

@ -11,6 +11,11 @@
},
"json_class": "Chef::Environment",
"chef_type": "environment",
"override_attributes": {
"compass": {
"cluster_id": "$id"
}
},
"default_attributes": {
"local_repo": "",
"mysql": {

View File

@ -73,6 +73,11 @@
},
"json_class": "Chef::Environment",
"chef_type": "environment",
"override_attributes": {
"compass": {
"cluster_id": "$id"
}
},
"default_attributes": {
"local_repo": "",
"mysql": {

View File

@ -195,6 +195,7 @@ else
fi
# create centos repo
sudo rm -rf /var/lib/cobbler/repo_mirror/centos_ppa_repo
sudo mkdir -p /var/lib/cobbler/repo_mirror/centos_ppa_repo
found_centos_ppa_repo=0
for repo in $(cobbler repo list); do
@ -220,12 +221,17 @@ cd /var/lib/cobbler/repo_mirror/centos_ppa_repo/
centos_ppa_repo_packages="
ntp-4.2.6p5-1.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_TYPE,,}.${CENTOS_IMAGE_ARCH}.rpm
openssh-clients-5.3p1-94.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_ARCH}.rpm
openssh-5.3p1-94.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_ARCH}.rpm
iproute-2.6.32-31.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_ARCH}.rpm
wget-1.12-1.8.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_ARCH}.rpm
ntpdate-4.2.6p5-1.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_TYPE,,}.${CENTOS_IMAGE_ARCH}.rpm
yum-plugin-priorities-1.1.30-14.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.noarch.rpm"
for f in $centos_ppa_repo_packages; do
download ftp://rpmfind.net/linux/${CENTOS_IMAGE_TYPE,,}/${CENTOS_IMAGE_VERSION_MAJOR}/os/${CENTOS_IMAGE_ARCH}/Packages/$f $f copy /var/lib/cobbler/repo_mirror/centos_ppa_repo/ || exit $?
if [ "$REGION" == "asia" ]; then
download http://mirrors.yun-idc.com/${CENTOS_IMAGE_TYPE,,}/${CENTOS_IMAGE_VERSION}/os/${CENTOS_IMAGE_ARCH}/Packages/$f $f copy /var/lib/cobbler/repo_mirror/centos_ppa_repo/ || exit $?
else
download ftp://rpmfind.net/linux/${CENTOS_IMAGE_TYPE,,}/${CENTOS_IMAGE_VERSION}/os/${CENTOS_IMAGE_ARCH}/Packages/$f $f copy /var/lib/cobbler/repo_mirror/centos_ppa_repo/ || exit $?
fi
done
centos_ppa_repo_rsyslog_packages="
@ -235,7 +241,7 @@ libgt-0.3.11-1.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_
liblogging-1.0.4-1.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_ARCH}.rpm
rsyslog-7.6.3-1.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_ARCH}.rpm"
for f in $centos_ppa_repo_rsyslog_packages; do
download http://rpms.adiscon.com/v7-stable/epel-{CENTOS_IMAGE_VERSION_MAJOR}/${CENTOS_IMAGE_ARCH}/RPMS/$f $f copy /var/lib/cobbler/repo_mirror/centos_ppa_repo/ || exit $?
download http://rpms.adiscon.com/v7-stable/epel-${CENTOS_IMAGE_VERSION_MAJOR}/${CENTOS_IMAGE_ARCH}/RPMS/$f $f copy /var/lib/cobbler/repo_mirror/centos_ppa_repo/ || exit $?
done
# download chef client for centos ppa repo
@ -252,6 +258,7 @@ else
fi
# create ubuntu repo
sudo rm -rf /var/lib/cobbler/repo_mirror/ubuntu_ppa_repo
sudo mkdir -p /var/lib/cobbler/repo_mirror/ubuntu_ppa_repo
found_ubuntu_ppa_repo=0
for repo in $(cobbler repo list); do
@ -313,7 +320,11 @@ fi
# import cobbler distro
sudo mkdir -p /var/lib/cobbler/iso
download "$CENTOS_IMAGE_SOURCE" ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}.iso copy /var/lib/cobbler/iso/ || exit $?
if [ "$REGION" == "asia" ]; then
download "$CENTOS_IMAGE_SOURCE_ASIA" ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}.iso copy /var/lib/cobbler/iso/ || exit $?
else
download "$CENTOS_IMAGE_SOURCE" ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}.iso copy /var/lib/cobbler/iso/ || exit $?
fi
sudo mkdir -p /mnt/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}
if [ $(mount | grep -c "/mnt/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH} ") -eq 0 ]; then
sudo mount -o loop /var/lib/cobbler/iso/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}.iso /mnt/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}
@ -326,8 +337,11 @@ if [ $(mount | grep -c "/mnt/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH} ") -eq 0
else
echo "/mnt/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH} has already mounted"
fi
download "$UBUNTU_IMAGE_SOURCE" ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}.iso copy /var/lib/cobbler/iso/ || exit $?
if [ "$REGION" == "asia" ]; then
download "$UBUNTU_IMAGE_SOURCE_ASIA" ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}.iso copy /var/lib/cobbler/iso/ || exit $?
else
download "$UBUNTU_IMAGE_SOURCE" ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}.iso copy /var/lib/cobbler/iso/ || exit $?
fi
sudo mkdir -p /mnt/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}
if [ $(mount | grep -c "/mnt/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH} ") -eq 0 ]; then
sudo mount -o loop /var/lib/cobbler/iso/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}.iso /mnt/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}

View File

@ -87,6 +87,7 @@ sudo mkdir -p /var/lib/redis/
sudo chown -R redis:root /var/lib/redis
sudo mkdir -p /var/run/redis
sudo chown -R redis:root /var/run/redis
killall -9 redis-server
sudo service redis restart
echo "Checking if redis is running"
sudo service redis status
@ -153,8 +154,8 @@ else
echo "compass-progress-updated has already started"
fi
#compass check
#if [[ "$?" != "0" ]]; then
# echo "compass check failed"
# exit 1
#fi
compass check
if [[ "$?" != "0" ]]; then
echo "compass check failed"
# exit 1
fi

View File

@ -22,7 +22,7 @@ sudo cp -rf $WEB_HOME/v2 /var/www/compass_web/
if [[ $LOCAL_REPO = "y" ]]; then
echo "setting up local repo"
mkdir -p /tmp/repo
download -f "$COMPASS_LOCAL_REPO" $COMPASS_LOCAL_REPO_FILE unzip /tmp/repo || exit $?
download https://s3-us-west-1.amazonaws.com/compass-local-repo/local_repo.tar.gz local_repo.tar.gz unzip /tmp/repo || exit $?
mv -f /tmp/repo/local_repo/* /var/www/compass_web/v2/
if [[ "$?" != "0" ]]; then
echo "failed to setup local repo"

View File

@ -47,8 +47,8 @@ export CENTOS_IMAGE_VERSION_MINOR=${CENTOS_IMAGE_VERSION_MINOR:-"5"}
export CENTOS_IMAGE_VERSION=${CENTOS_IMAGE_VERSION:-"${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_VERSION_MINOR}"}
export CENTOS_IMAGE_NAME=${CENTOS_IMAGE_NAME:-"${CENTOS_IMAGE_TYPE}-${CENTOS_IMAGE_VERSION}"}
export CENTOS_IMAGE_ARCH=${CENTOS_IMAGE_ARCH:-"x86_64"}
export CENTOS_IMAGE_SOURCE=${CENTOS_IMAGE_SOURCE:-"http://mirror.rackspace.com/${CENTOS_IMAGE_TYPE,,}/${CENTOS_IMAGE_VERSION_MAJOR}/isos/${CENTOS_IMAGE_ARCH}/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}-minimal.iso"}
export CENTOS_IMAGE_SOURCE=${CENTOS_IMAGE_SOURCE:-"http://mirror.rackspace.com/${CENTOS_IMAGE_TYPE,,}/${CENTOS_IMAGE_VERSION}/isos/${CENTOS_IMAGE_ARCH}/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}-minimal.iso"}
export CENTOS_IMAGE_SOURCE_ASIA=${CENTOS_IMAGE_SOURCE_ASIA:-"http://mirrors.yun-idc.com/${CENTOS_IMAGE_TYPE,,}/${CENTOS_IMAGE_VERSION}/isos/${CENTOS_IMAGE_ARCH}/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}-minimal.iso"}
export UBUNTU_IMAGE_TYPE=${UBUNTU_IMAGE_TYPE:-"Ubuntu"}
export UBUNTU_IMAGE_VERSION_MAJOR=${UBUNTU_IMAGE_VERSION_MAJOR:-"12"}
export UBUNTU_IMAGE_VERSION_MINOR=${UBUNTU_IMAGE_VERSION_MINOR:-"04"}
@ -58,10 +58,7 @@ export UBUNTU_IMAGE_NAME=${UBUNTU_IMAGE_NAME:-"${UBUNTU_IMAGE_TYPE}-${UBUNTU_IMA
export UBUNTU_IMAGE_ARCH=${UBUNTU_IMAGE_ARCH:-"x86_64"}
export UBUNTU_IMAGE_ARCH_OTHER=${UBUNTU_IMAGE_ARCH_OTHER:-"amd64"}
export UBUNTU_IMAGE_SOURCE=${UBUNTU_IMAGE_SOURCE:-"http://releases.ubuntu.com/${UBUNTU_IMAGE_VERSION}/${UBUNTU_IMAGE_TYPE,,}-${UBUNTU_IMAGE_VERSION}${UBUNTU_IMAGE_PATCH_VERSION}-server-${UBUNTU_IMAGE_ARCH_OTHER}.iso"}
# Compass local repo
export COMPASS_LOCAL_REPO_FILE=${COMPASS_LOCAL_REPO_FILE:-"local_repo.tar.gz"}
export COMPASS_LOCAL_REPO=${COMPASS_LOCAL_REPO:-"http://s3-us-west-1.amazonaws.com/compass-local-repo/${COMPASS_LOCAL_REPO_FILE}"}
export UBUNTU_IMAGE_SOURCE_ASIA=${UBUNTU_IMAGE_SOURCE_ASIA:-"http://mirros.ustc.edu.cn/ubuntu-releases/${UBUNTU_IMAGE_VERSION}/${UBUNTU_IMAGE_TYPE,,}-${UBUNTU_IMAGE_VERSION}${UBUNTU_IMAGE_PATCH_VERSION}-server-${UBUNTU_IMAGE_ARCH_OTHER}.iso"}
export COBBLER_PASSWORD=${COBBLER_PASSWORD:-"cobbler"}

View File

@ -51,7 +51,7 @@ export CENTOS_IMAGE_VERSION_MINOR=${CENTOS_IMAGE_VERSION_MINOR:-"5"}
export CENTOS_IMAGE_VERSION=${CENTOS_IMAGE_VERSION:-"${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_VERSION_MINOR}"}
export CENTOS_IMAGE_NAME=${CENTOS_IMAGE_NAME:-"${CENTOS_IMAGE_TYPE}-${CENTOS_IMAGE_VERSION}"}
export CENTOS_IMAGE_ARCH=${CENTOS_IMAGE_ARCH:-"x86_64"}
export CENTOS_IMAGE_SOURCE=${CENTOS_IMAGE_SOURCE:-"http://mirror.rackspace.com/${CENTOS_IMAGE_TYPE,,}/${CENTOS_IMAGE_VERSION_MAJOR}/isos/${CENTOS_IMAGE_ARCH}/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}-minimal.iso"}
export CENTOS_IMAGE_SOURCE=${CENTOS_IMAGE_SOURCE:-"http://mirror.rackspace.com/${CENTOS_IMAGE_TYPE,,}/${CENTOS_IMAGE_VERSION}/isos/${CENTOS_IMAGE_ARCH}/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}-minimal.iso"}
export UBUNTU_IMAGE_TYPE=${UBUNTU_IMAGE_TYPE:-"Ubuntu"}
export UBUNTU_IMAGE_VERSION_MAJOR=${UBUNTU_IMAGE_VERSION_MAJOR:-"12"}

View File

@ -158,6 +158,9 @@ loadvars()
done
fi
}
echo -e "\x1b[32mAvailable Regions are asia/america, other REGIONs please use default: america\x1b[37m"
loadvars REGION "america"
loadvars NIC "eth0"
sudo ifconfig $NIC

View File

@ -41,21 +41,18 @@ copy2dir()
git clean -x -f
git checkout $git_branch
git reset --hard remotes/origin/$git_branch
git clean -x -f -d -q
cd -
else
echo "create $destdir"
mkdir -p $destdir
git clone $repo $destdir
git clone $repo $destdir -b $git_branch
if [ $? -ne 0 ]; then
echo "failed to git clone $repo $destdir"
exit 1
else
echo "git clone $repo $destdir suceeded"
fi
cd $destdir
git checkout $git_branch
git reset --hard remotes/origin/$git_branch
cd -
fi
cd $destdir
if [[ -z $ZUUL_PROJECT ]]; then
@ -154,9 +151,9 @@ download()
if [[ "$downloaded" == "0" ]]; then
echo "downloading $url to /tmp/${package}"
if [[ -f /tmp/${package} || -L /tmp/${package} ]]; then
curl -L -z /tmp/${package} -o /tmp/${package}.tmp $url
curl -f -L -z /tmp/${package} -o /tmp/${package}.tmp $url
else
curl -L -o /tmp/${package}.tmp $url
curl -f -L -o /tmp/${package}.tmp $url
fi
if [[ "$?" != "0" ]]; then
echo "failed to download $package"

View File

@ -125,6 +125,12 @@ else
fi
cd $SCRIPT_DIR
remote_branch=$(git rev-parse --abbrev-ref --symbolic-full-name @{u})
if [[ "$?" != "0" ]]; then
remote_branch="origin/master"
fi
local_branch=$(echo ${remote_branch} | sed -e 's/origin\///g')
if [ -z $WEB_SOURCE ]; then
echo "web source $WEB_SOURCE is not set"
exit 1
@ -135,7 +141,7 @@ if [ -z $ADAPTERS_SOURCE ]; then
echo "adpaters source $ADAPTERS_SOURCE is not set"
exit 1
fi
copy2dir "$ADAPTERS_SOURCE" "$ADAPTERS_HOME" "stackforge/compass-adapters" || exit $?
copy2dir "$ADAPTERS_SOURCE" "$ADAPTERS_HOME" "stackforge/compass-adapters" ${local_branch} || exit $?
if [ "$tempest" == "true" ]; then
echo "download tempest packages"
@ -168,6 +174,7 @@ if [ "$tempest" == "true" ]; then
mkvirtualenv tempest
fi
workon tempest
rm -rf ${WORKON_HOME}/tempest/build
cd /tmp/tempest
pip install -e .
pip install sqlalchemy
@ -187,6 +194,7 @@ if ! lsvirtualenv |grep compass-core>/dev/null; then
fi
cd $COMPASSDIR
workon compass-core
rm -rf ${WORKON_HOME}/compass-core/build
echo "install compass requirements"
pip install -U -r requirements.txt
if [[ "$?" != "0" ]]; then
@ -214,7 +222,11 @@ ntpdate-4.2.6p5-1.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENT
yum-plugin-priorities-1.1.30-14.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.noarch.rpm"
for f in $centos_ppa_repo_packages; do
download http://rpmfind.net/linux/${IMAGE_TYPE,,}/${IMAGE_VERSION_MAJOR}/os/${IMAGE_ARCH}/Packages/$f $f || exit $?
if [ "$REGION" == "asia" ]; then
download http://mirrors.yun-idc.com/${CENTOS_IMAGE_TYPE,,}/${CENTOS_IMAGE_VERSION}/os/${CENTOS_IMAGE_ARCH}/Packages/$f $f || exit $?
else
download http://rpmfind.net/linux/${CENTOS_IMAGE_TYPE,,}/${CENTOS_IMAGE_VERSION}/os/${CENTOS_IMAGE_ARCH}/Packages/$f $f || exit $?
fi
done
centos_ppa_repo_rsyslog_packages="
@ -225,7 +237,7 @@ liblogging-1.0.4-1.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CEN
rsyslog-7.6.3-1.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_ARCH}.rpm"
for f in $centos_ppa_repo_rsyslog_packages; do
download http://rpms.adiscon.com/v7-stable/epel-6/${IMAGE_ARCH}/RPMS/$f $f || exit $?
download http://rpms.adiscon.com/v7-stable/epel-${CENTOS_IMAGE_VERSION_MAJOR}/${CENTOS_IMAGE_ARCH}/RPMS/$f $f || exit $?
done
download $CHEF_CLIENT `basename $CHEF_CLIENT` || exit $?
@ -236,11 +248,18 @@ download $UBUNTU_CHEF_CLIENT `basename $UBUNTU_CHEF_CLIENT` || exit $?
download $CHEF_SRV chef-server || exit $?
# download os images
download "$CENTOS_IMAGE_SOURCE" ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}.iso || exit $?
download "$UBUNTU_IMAGE_SOURCE" ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}.iso || exit $?
if [ "$REGION" == "asia" ]; then
download "$CENTOS_IMAGE_SOURCE_ASIA" ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}.iso || exit $?
download "$UBUNTU_IMAGE_SOURCE_ASIA" ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}.iso || exit $?
else
download "$CENTOS_IMAGE_SOURCE" ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}.iso || exit $?
download "$UBUNTU_IMAGE_SOURCE" ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}.iso || exit $?
fi
# download local repo
download -f "$COMPASS_LOCAL_REPO" $COMPASS_LOCAL_REPO_FILE || exit $?
if [[ $LOCAL_REPO = "y" ]]; then
download https://s3-us-west-1.amazonaws.com/compass-local-repo/local_repo.tar.gz local_repo.tar.gz || exit $?
fi
# Install net-snmp
echo "install snmp config"

View File

@ -10,5 +10,6 @@ NEXTSERVER=\${NEXTSERVER:-$NEXTSERVER}
IP_START=\${IP_START:-$IP_START}
IP_END=\${IP_END:-$IP_END}
LOCAL_REPO=\${LOCAL_REPO:-$LOCAL_REPO}
REGION=\${REGION:-$REGION}
EOF
chmod ugo+x $SCRIPT_DIR/env.conf

View File

@ -79,19 +79,31 @@ for i in `seq $VIRT_NUM`; do
exit 1
fi
echo "make pxe${i} reboot if installation failing."
sed -i "/<boot dev='hd'\/>/ a\ <bios useserial='yes' rebootTimeout='0'\/>" /etc/libvirt/qemu/pxe${i}.xml
echo "check pxe${i} state"
state=$(virsh domstate pxe${i})
echo "pxe${i} state is ${state}"
if [[ "$state" == "running" ]]; then
echo "pxe${i} is already running"
virsh destroy pxe${i}
if [[ "$?" != "0" ]]; then
echo "detroy intsance pxe${i} failed"
exit 1
else
echo "pxe${i} is detroyed"
fi
fi
echo "make pxe${i} reboot if installation failing."
sed -i "/<boot dev='hd'\/>/ a\ <bios useserial='yes' rebootTimeout='0'\/>" /etc/libvirt/qemu/pxe${i}.xml
virsh define /etc/libvirt/qemu/pxe${i}.xml
virsh dumpxml pxe${i} | grep "<bios useserial='yes' rebootTimeout='0'\/>"
if [[ "$?" != "0" ]]; then
echo "pxe${i} auto reboot is not enabled"
exit 1
else
echo "pxe${i} auto reboot is enabled"
fi
echo "start pxe${i}"
virsh start pxe${i}
if [[ "$?" != "0" ]]; then

View File

@ -31,7 +31,7 @@ commands = python setup.py testr --coverage --testr-args='{posargs}'
downloadcache = ~/cache/pip
[flake8]
ignore = H302,H233,H803,F401
ignore = H302,H304,H233,H803,F401
show-source = true
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build