prepared: filters for nodes

This commit is contained in:
adobdin 2016-03-04 13:22:12 +00:00
parent a4fe3aaaee
commit 9d2e0e83b6
7 changed files with 108 additions and 74 deletions

49
conf.py Normal file
View File

@ -0,0 +1,49 @@
import yaml
import logging
import sys
from nodefilter import NodeFilter
class Conf(object):
"""Configuration parameters"""
hard_filter = None
soft_filter = NodeFilter()
ssh = {'opts': '-oConnectTimeout=2 -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=error -lroot -oBatchMode=yes',
'vars': 'OPENRC=/root/openrc IPTABLES_STR="iptables -nvL"'}
cluster = None
fuelip = 'localhost'
outdir = '/tmp/timmy-gen/info'
timeout = 15
logs_archive = '/tmp/timmy-logs.tar'
rqdir = './rq'
compress_timeout = 3600
find = {'template': "-name '*.gz' -o -name '*.log' -o -name '*-[0-9]4'",
'path': '/var/log/'}
def __init__(self, **entries):
self.__dict__.update(entries)
if self.hard_filter:
self.hard_filter = NodeFilter(**self.hard_filter)
if self.soft_filter:
self.soft_filter = NodeFilter(**self.soft_filter)
@staticmethod
def load_conf(filename):
try:
with open(filename, 'r') as f:
conf = yaml.load(f)
except IOError as e:
logging.error("I/O error(%s): %s" % (e.errno, e.strerror))
sys.exit(1)
except ValueError:
logging.error("Could not convert data")
sys.exit(1)
except:
logging.error("Unexpected error: %s" % sys.exc_info()[0])
sys.exit(1)
logging.info(conf)
return Conf(**conf)
if __name__ == '__main__':
conf = Conf.load_conf('config.yaml')
print(yaml.dump(conf))

View File

@ -4,12 +4,13 @@ ssh:
fuelip: 127.0.0.1
rqdir: ./rq
logdir: ./logs
out-dir: ../timmy-ng/info
node-status: ['ready']
outdir: ../timmy-ng/info
soft_filter:
status: ['ready']
timeout: 15
find:
template: -name '*.log'
log-files:
log_files:
default: -name '*.log'
by-role:
compute: -name '*.log'

View File

@ -1,17 +1,18 @@
ssh:
opts: -oConnectTimeout=2 -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=error -lroot -oBatchMode=yes
vars: OPENRC=/root/openrc IPTABLES_STR="iptables -nvL"
#cluster: 0
fuelip: localhost
out-dir: /tmp/timmy-gen/info
timeout: 15
node-status: ['ready', 'discover']
find:
template: -name '*.gz' -o -name '*.log' -o -name '*-[0-9]4'
path: /var/log/
logs-archive: /tmp/timmy-logs.tar
compress-timeout: 3600
log-files:
template: -name '*.gz' -o -name '*.log' -o -name '*-[0-9]4'
by-role: compute, controller, mongo, ceph
by-node-id: 0
conf:
ssh:
opts: -oConnectTimeout=2 -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=error -lroot -oBatchMode=yes
vars: OPENRC=/root/openrc IPTABLES_STR="iptables -nvL"
#cluster: 0
fuelip: localhost
out-dir: /tmp/timmy-gen/info
timeout: 15
node-status: ['ready', 'discover']
find:
template: -name '*.gz' -o -name '*.log' -o -name '*-[0-9]4'
path: /var/log/
logs-archive: /tmp/timmy-logs.tar
compress-timeout: 3600
log-files:
template: -name '*.gz' -o -name '*.log' -o -name '*-[0-9]4'
by-role: compute, controller, mongo, ceph
by-node-id: 0

View File

@ -1,28 +0,0 @@
import yaml
import logging
import sys
def load_conf(filename):
try:
with open('default.yaml', 'r') as f:
conf = yaml.load(f)
with open(filename, 'r') as f:
nc = yaml.load(f)
conf.update(nc)
except IOError as e:
logging.error("I/O error(%s): %s" % (e.errno, e.strerror))
sys.exit(1)
except ValueError:
logging.error("Could not convert data")
sys.exit(1)
except:
logging.error("Unexpected error: %s" % sys.exc_info()[0])
sys.exit(1)
logging.info(conf)
return conf
if __name__ == '__main__':
conf = load_conf('config.yaml')
print(conf)

9
nodefilter.py Normal file
View File

@ -0,0 +1,9 @@
class NodeFilter(object):
status = ['ready', 'discover']
online = True
roles = []
node_ids = []
def __init__(self, **entries):
self.__dict__.update(entries)

View File

@ -239,20 +239,20 @@ class Nodes(object):
"""Class nodes """
def __init__(self, cluster, extended, conf, destdir, filename=None):
self.dirname = conf['rqdir'].rstrip('/')
self.dirname = conf.rqdir.rstrip('/')
if (not os.path.exists(self.dirname)):
logging.error("directory %s doesn't exist" % (self.dirname))
sys.exit(1)
self.files = get_dir_structure(conf['rqdir'])[os.path.basename(self.dirname)]
self.fuelip = conf['fuelip']
self.sshopts = conf['ssh']['opts']
self.sshvars = conf['ssh']['vars']
self.timeout = conf['timeout']
self.files = get_dir_structure(conf.rqdir)[os.path.basename(self.dirname)]
self.fuelip = conf.fuelip
self.sshopts = conf.ssh['opts']
self.sshvars = conf.ssh['vars']
self.timeout = conf.timeout
self.conf = conf
self.destdir = destdir
self.get_version()
self.cluster = cluster
self.logdir = conf['logdir']
self.logdir = conf.logdir
self.extended = extended
logging.info('extended: %s' % self.extended)
if filename is not None:
@ -290,6 +290,8 @@ class Nodes(object):
ip=self.fuelip)
self.nodes = {self.fuelip: node}
for node in self.njdata:
if self.conf.hard_filter:
pass
node_roles = node.get('roles')
if not node_roles:
roles = ['None']
@ -379,7 +381,7 @@ class Nodes(object):
if (self.cluster and str(self.cluster) != str(node.cluster) and
node.cluster != 0):
continue
if node.status in self.conf['node-status'] and node.online:
if node.status in self.conf.soft_filter.status and node.online:
t = threading.Thread(target=node.exec_cmd,
args=(label,
self.sshvars,
@ -400,7 +402,7 @@ class Nodes(object):
if (self.cluster and str(self.cluster) != str(node.cluster) and
node.cluster != 0):
continue
if node.status in self.conf['node-status'] and node.online:
if node.status in self.conf.soft_filter.status and node.online:
t = threading.Thread(target=node.du_logs,
args=(label,
self.sshopts,
@ -493,7 +495,7 @@ class Nodes(object):
if (self.cluster and str(self.cluster) != str(node.cluster) and
node.cluster != 0):
continue
if node.status in self.conf['node-status'] and node.online:
if node.status in self.conf.soft_filter.status and node.online:
t = threading.Thread(target=node.get_files,
args=(label,
self.logdir,
@ -520,7 +522,7 @@ class Nodes(object):
if (self.cluster and str(self.cluster) != str(node.cluster) and
node.cluster != 0):
continue
if (node.status in self.conf['node-status'] and
if (node.status in self.conf.soft_filter.status and
node.online and str(node.node_id) != '0'):
t = threading.Thread(target=node.get_files,
args=(label,
@ -603,7 +605,7 @@ def main(argv=None):
destdir=args.dest_dir)
# nodes.print_nodes()
nodes.get_node_file_list()
nodes.calculate_log_size(conf['find']['template'])
nodes.calculate_log_size(conf.find['template'])
if nodes.is_enough_space():
nodes.get_log_files(args.out_dir)
nodes.launch_ssh(args.out_dir)

View File

@ -19,7 +19,7 @@ import argparse
import nodes
import logging
import sys
import loadconf
from conf import Conf
import flock
def main(argv=None):
@ -60,32 +60,32 @@ def main(argv=None):
loglevel = logging.INFO
logging.basicConfig(level=loglevel,
format='%(asctime)s %(levelname)s %(message)s')
conf = loadconf.load_conf(args.config)
n = nodes.Nodes(conf=conf,
config = Conf.load_conf(args.config)
n = nodes.Nodes(conf=config,
extended=args.extended,
cluster=args.cluster,
destdir=args.dest_file)
# nodes.print_nodes()
if not args.only_logs:
n.get_node_file_list()
n.launch_ssh(conf['out-dir'])
n.get_conf_files(conf['out-dir'])
n.create_archive_general(conf['out-dir'], '/tmp/timmy-gen.tar.bz2', 60)
n.launch_ssh(config.outdir)
n.get_conf_files(config.outdir)
n.create_archive_general(config.outdir, '/tmp/timmy-gen.tar.bz2', 60)
if args.only_logs or args.getlogs:
lock = flock.FLock('/tmp/timmy-logs.lock')
if not lock.lock():
logging.warning('Unable to obtain lock, skipping "logs"-part')
return 1
n.get_node_file_list()
n.calculate_log_size(conf['find']['template'])
n.calculate_log_size(config.find['template'])
if n.is_enough_space():
n.get_log_files(conf['out-dir'])
n.create_archive_logs(conf['find']['template'],
conf['logs-archive'],
conf['compress-timeout'])
n.add_logs_archive(conf['out-dir'], nodes.lkey,
conf['logs-archive'], 120)
n.compress_archive(conf['logs-archive'], conf['compress-timeout'])
n.get_log_files(config.outdir)
n.create_archive_logs(config.find['template'],
config.logs_archive,
config.compress_timeout)
n.add_logs_archive(config.outdir, nodes.lkey,
config.logs_archive, 120)
n.compress_archive(config.logs_archive, config.compress_timeout)
n.print_nodes()
return 0