Merge master version 1.26.3 into stable/newton
Change-Id: Id0f1c869ba13f161004fbc800a8033103d70ba79
This commit is contained in:
parent
a1ad8685b9
commit
cc8b3e0868
5
setup.py
5
setup.py
@ -38,7 +38,10 @@ setup(name=pname,
|
||||
'operations: two-way data transfer, log collection, '
|
||||
'remote command execution'),
|
||||
long_description=open('README.md').read(),
|
||||
packages=[pname, '%s.modules' % pname, '%s_data' % pname],
|
||||
packages=[pname,
|
||||
'%s.analyze_modules' % pname,
|
||||
'%s.modules' % pname,
|
||||
'%s_data' % pname],
|
||||
install_requires=['pyyaml'],
|
||||
include_package_data=True,
|
||||
entry_points={'console_scripts': ['%s=%s.cli:main' % (pname, pname)]},
|
||||
|
@ -4,7 +4,7 @@
|
||||
%global pypi_name timmy
|
||||
|
||||
Name: python-%{pypi_name}
|
||||
Version: 1.25.0
|
||||
Version: 1.26.3
|
||||
Release: 1%{?dist}~mos0
|
||||
Summary: Log collector tool for OpenStack Fuel
|
||||
|
||||
@ -107,6 +107,33 @@ popd
|
||||
|
||||
|
||||
%changelog
|
||||
* Wed Dec 21 2016 Dmitry Sutyagin <dsutyagin@mirantis.com> - 1.26.3
|
||||
- Fix: scripts_all_pairs sometimes uses same node
|
||||
|
||||
* Tue Dec 20 2016 Dmitry Sutyagin <dsutyagin@mirantis.com> - 1.26.2
|
||||
- Add: rabbitmq analysis module
|
||||
|
||||
* Tue Dec 20 2016 Aleksandr Dobdin <adobdin@mirantis.com> - 1.26.1
|
||||
- Fix: download network templates for 9.1
|
||||
|
||||
* Thu Dec 15 2016 Alexander Lemeshko <oliemieshko@mirantis.com> - 1.26.0
|
||||
- Add: scripts_all_pairs 'one way' mode
|
||||
|
||||
* Wed Dec 14 2016 Dmitry Sutyagin <dsutyagin@mirantis.com> - 1.25.5
|
||||
- Fix: RuntimeWarning when using analyze
|
||||
|
||||
* Mon Dec 12 2016 Dmitry Sutyagin <dsutyagin@mirantis.com> - 1.25.4
|
||||
- Change: compact analyze output, minor refactor
|
||||
|
||||
* Mon Dec 12 2016 Dmitry Sutyagin <dsutyagin@mirantis.com> - 1.25.3
|
||||
- Add: dynamic import of analyze modules
|
||||
|
||||
* Thu Dec 9 2016 Aleksandr Dobdin <adobdin@mirantis.com> - 1.25.2
|
||||
- Add: fuel network template download script
|
||||
|
||||
* Fri Dec 9 2016 Dmitry Sutyagin <dsutyagin@mirantis.com> - 1.25.1
|
||||
- Fix: IOError if no-clean + outdir missing
|
||||
|
||||
* Fri Dec 9 2016 Alexander Lemeshko <oliemieshko@mirantis.com> - 1.25.0
|
||||
- Add: ability to analyze old results
|
||||
|
||||
|
131
timmy/analyze.py
131
timmy/analyze.py
@ -15,80 +15,34 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from timmy.env import project_name
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import timmy
|
||||
from timmy.analyze_health import GREEN, UNKNOWN, YELLOW, RED
|
||||
from timmy.env import project_name
|
||||
|
||||
|
||||
logger = logging.getLogger(project_name)
|
||||
|
||||
|
||||
def analyze(node_manager):
|
||||
col_msg = 'Column "%s" not found in output of "%s" from node "%s"'
|
||||
green = 0
|
||||
unknown = 1
|
||||
yellow = 2
|
||||
red = 3
|
||||
def is_module(f):
|
||||
return f.endswith('.py') and not f.startswith('__')
|
||||
|
||||
def parse_df_m(data, script, node):
|
||||
column_use = "Use%"
|
||||
full = 100
|
||||
near_full = 80
|
||||
health = green
|
||||
details = []
|
||||
if column_use not in data[0]:
|
||||
logger.warning(col_msg % (column_use, script, node.repr))
|
||||
health = unknown
|
||||
index = data[0].split().index(column_use)
|
||||
prepend_str = '' # workaround for data which spans 2 lines
|
||||
index_shift = 0
|
||||
for line in data[2:]:
|
||||
if len(line.split()) <= index:
|
||||
prepend_str = line.rstrip()
|
||||
index_shift = len(line.split())
|
||||
continue
|
||||
value = int(line.split()[index - index_shift][:-1])
|
||||
if value >= full:
|
||||
health = red
|
||||
details.append(prepend_str + line)
|
||||
elif value >= near_full:
|
||||
health = yellow if health < yellow else health
|
||||
details.append(prepend_str + line)
|
||||
prepend_str = ''
|
||||
index_shift = 0
|
||||
return health, details
|
||||
fn_mapping = {}
|
||||
modules_dir = 'analyze_modules'
|
||||
scan_path = os.path.join(os.path.dirname(__file__), modules_dir)
|
||||
base_path = os.path.split(timmy.__path__[0])[0]
|
||||
for item in os.walk(scan_path):
|
||||
for module_path in [m for m in item[2] if is_module(m)]:
|
||||
module_full_path = os.path.join(scan_path, module_path)
|
||||
module_rel_path = os.path.relpath(module_full_path, base_path)
|
||||
module_rel_path_noext = os.path.splitext(module_rel_path)[0]
|
||||
module_name = module_rel_path_noext.replace(os.path.sep, '.')
|
||||
module = __import__(module_name, fromlist=[project_name])
|
||||
module.register(fn_mapping)
|
||||
|
||||
def parse_df_i(data, script, node):
|
||||
column_use = "IUse%"
|
||||
full = 100
|
||||
near_full = 80
|
||||
health = green
|
||||
details = []
|
||||
if column_use not in data[0]:
|
||||
logger.warning(col_msg % (column_use, script, node.repr))
|
||||
health = unknown
|
||||
index = data[0].split().index(column_use)
|
||||
prepend_str = '' # workaround for data which spans 2 lines
|
||||
index_shift = 0
|
||||
for line in data[2:]:
|
||||
if len(line.split()) <= index:
|
||||
prepend_str = line.rstrip()
|
||||
index_shift = len(line.split())
|
||||
continue
|
||||
if "%" in line.split()[index - index_shift]:
|
||||
value = int(line.split()[index - index_shift][:-1])
|
||||
if value >= full:
|
||||
health = red
|
||||
details.append(prepend_str + line)
|
||||
elif value >= near_full:
|
||||
health = yellow if health < yellow else health
|
||||
details.append(prepend_str + line)
|
||||
prepend_str = ''
|
||||
return health, details
|
||||
|
||||
fn_mapping = {"df-m": parse_df_m,
|
||||
"df-i": parse_df_i}
|
||||
results = {}
|
||||
for node in node_manager.nodes.values():
|
||||
if not node.mapscr:
|
||||
@ -96,50 +50,47 @@ def analyze(node_manager):
|
||||
for script, param in node.mapscr.items():
|
||||
if script in fn_mapping:
|
||||
if not os.path.exists(param['output_path']):
|
||||
logger.warning("File %s does not exist"
|
||||
logger.warning('File %s does not exist'
|
||||
% param['output_path'])
|
||||
continue
|
||||
with open(param['output_path'], "r") as f:
|
||||
with open(param['output_path'], 'r') as f:
|
||||
data = [l.rstrip() for l in f.readlines()]
|
||||
health, details = fn_mapping[script](data, script, node)
|
||||
if node.repr not in results:
|
||||
results[node.repr] = []
|
||||
results[node.repr].append({"script": script,
|
||||
"output_file": param['output_path'],
|
||||
"health": health,
|
||||
"details": details})
|
||||
results[node.repr].append({'script': script,
|
||||
'output_file': param['output_path'],
|
||||
'health': health,
|
||||
'details': details})
|
||||
node_manager.analyze_results = results
|
||||
|
||||
|
||||
def analyze_print_results(node_manager):
|
||||
code_colors = {3: ["RED", "\033[91m"],
|
||||
2: ["YELLOW", "\033[93m"],
|
||||
0: ["GREEN", "\033[92m"],
|
||||
1: ["BLUE", "\033[94m"]}
|
||||
color_end = "\033[0m"
|
||||
print("Nodes health analysis:")
|
||||
code_colors = {GREEN: ['GREEN', '\033[92m'],
|
||||
UNKNOWN: ['UNKNOWN', '\033[94m'],
|
||||
YELLOW: ['YELLOW', '\033[93m'],
|
||||
RED: ['RED', '\033[91m']}
|
||||
color_end = '\033[0m'
|
||||
print('Nodes health analysis:')
|
||||
for node, result in node_manager.analyze_results.items():
|
||||
node_health = max([x["health"] for x in result])
|
||||
node_health = max([x['health'] for x in result])
|
||||
node_color = code_colors[node_health][1]
|
||||
health_repr = code_colors[node_health][0]
|
||||
print(" %s%s: %s%s" % (node_color, node, health_repr, color_end))
|
||||
print(' %s%s: %s%s' % (node_color, node, health_repr, color_end))
|
||||
if node_health == 0:
|
||||
continue
|
||||
for r in result:
|
||||
if r['health'] == 0:
|
||||
continue
|
||||
color = code_colors[r["health"]][1]
|
||||
color = code_colors[r['health']][1]
|
||||
sys.stdout.write(color)
|
||||
for key, value in r.items():
|
||||
if key == "health":
|
||||
value = code_colors[value][0]
|
||||
if key == "details" and len(value) > 0:
|
||||
if len(value) > 1:
|
||||
print(" details:")
|
||||
for d in value:
|
||||
print(" - %s" % d)
|
||||
else:
|
||||
print(" details: %s" % value[0])
|
||||
elif key != "details":
|
||||
print(" %s: %s" % (key, value))
|
||||
health_repr = code_colors[r['health']][0]
|
||||
print(' %s: %s' % (r['script'], health_repr))
|
||||
print(' %s: %s' % ('output_file', r['output_file']))
|
||||
if len(r['details']) > 1:
|
||||
print(' details:')
|
||||
for d in r['details']:
|
||||
print(' - %s' % d)
|
||||
else:
|
||||
print(' details: %s' % r['details'][0])
|
||||
sys.stdout.write(color_end)
|
||||
|
21
timmy/analyze_health.py
Normal file
21
timmy/analyze_health.py
Normal file
@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
GREEN = 0
|
||||
UNKNOWN = 1
|
||||
YELLOW = 2
|
||||
RED = 3
|
76
timmy/analyze_modules/__example__.py
Normal file
76
timmy/analyze_modules/__example__.py
Normal file
@ -0,0 +1,76 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''
|
||||
please import and use health constants from analyze_health
|
||||
GREEN - no issues
|
||||
UNKNOWN - cannot determine / cannot parse output
|
||||
YELLOW - condition is bad but not critical / impactful
|
||||
RED - critical / impactful condition
|
||||
|
||||
|
||||
if you want to write log messages, add the following lines:
|
||||
from timmy.env import project_name
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(project_name)
|
||||
'''
|
||||
from timmy.analyze_health import GREEN, UNKNOWN, YELLOW, RED
|
||||
from timmy.env import project_name
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(project_name)
|
||||
|
||||
|
||||
def register(function_mapping):
|
||||
'''
|
||||
this function is mandatory and it's name must be "register"
|
||||
it should have 1 argument which is a dict
|
||||
it should update the dict with a relation between script names and
|
||||
analyzing functions
|
||||
more than one script can be mapped by a single module
|
||||
see script names in timmy_data/rq/scripts folder
|
||||
'''
|
||||
function_mapping['script-basename'] = parsing_function
|
||||
|
||||
|
||||
def parsing_function(data, script, node):
|
||||
'''
|
||||
each analyzing function should have 3 arguments:
|
||||
data - list of strings aquired by reading the output file
|
||||
script - path to the script file
|
||||
node - node object
|
||||
|
||||
return should contain 2 values:
|
||||
health - set to one of the imported constants according to the analysis
|
||||
details - a list of strings - an explanatory message or
|
||||
lines which were indicative of the issue
|
||||
'''
|
||||
health = UNKNOWN
|
||||
line = data[0] # in this example we only look at the first line
|
||||
details = [line]
|
||||
if line.find('error'):
|
||||
health = RED
|
||||
details.append('This is very bad! Do something NOW!!!')
|
||||
elif line.find('warning'):
|
||||
health = YELLOW
|
||||
details.append('Who cares if it is not RED, right? :)')
|
||||
elif line.find('ok'):
|
||||
health = GREEN
|
||||
return health, details
|
0
timmy/analyze_modules/__init__.py
Normal file
0
timmy/analyze_modules/__init__.py
Normal file
88
timmy/analyze_modules/df.py
Normal file
88
timmy/analyze_modules/df.py
Normal file
@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from timmy.analyze_health import GREEN, UNKNOWN, YELLOW, RED
|
||||
from timmy.env import project_name
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(project_name)
|
||||
|
||||
col_msg = 'Column "%s" not found in output of "%s" from node "%s"'
|
||||
|
||||
|
||||
def register(function_mapping):
|
||||
function_mapping['df-m'] = parse_df_m
|
||||
function_mapping['df-i'] = parse_df_i
|
||||
|
||||
|
||||
def parse_df_m(data, script, node):
|
||||
column_use = "Use%"
|
||||
full = 100
|
||||
near_full = 80
|
||||
health = GREEN
|
||||
details = []
|
||||
if column_use not in data[0]:
|
||||
logger.warning(col_msg % (column_use, script, node.repr))
|
||||
health = UNKNOWN
|
||||
index = data[0].split().index(column_use)
|
||||
prepend_str = '' # workaround for data which spans 2 lines
|
||||
index_shift = 0
|
||||
for line in data[2:]:
|
||||
if len(line.split()) <= index:
|
||||
prepend_str = line.rstrip()
|
||||
index_shift = len(line.split())
|
||||
continue
|
||||
value = int(line.split()[index - index_shift][:-1])
|
||||
if value >= full:
|
||||
health = RED
|
||||
details.append(prepend_str + line)
|
||||
elif value >= near_full:
|
||||
health = YELLOW if health < YELLOW else health
|
||||
details.append(prepend_str + line)
|
||||
prepend_str = ''
|
||||
index_shift = 0
|
||||
return health, details
|
||||
|
||||
|
||||
def parse_df_i(data, script, node):
|
||||
column_use = "IUse%"
|
||||
full = 100
|
||||
near_full = 80
|
||||
health = GREEN
|
||||
details = []
|
||||
if column_use not in data[0]:
|
||||
logger.warning(col_msg % (column_use, script, node.repr))
|
||||
health = UNKNOWN
|
||||
index = data[0].split().index(column_use)
|
||||
prepend_str = '' # workaround for data which spans 2 lines
|
||||
index_shift = 0
|
||||
for line in data[2:]:
|
||||
if len(line.split()) <= index:
|
||||
prepend_str = line.rstrip()
|
||||
index_shift = len(line.split())
|
||||
continue
|
||||
if "%" in line.split()[index - index_shift]:
|
||||
value = int(line.split()[index - index_shift][:-1])
|
||||
if value >= full:
|
||||
health = RED
|
||||
details.append(prepend_str + line)
|
||||
elif value >= near_full:
|
||||
health = YELLOW if health < YELLOW else health
|
||||
details.append(prepend_str + line)
|
||||
prepend_str = ''
|
||||
return health, details
|
195
timmy/analyze_modules/rabbitmq.py
Normal file
195
timmy/analyze_modules/rabbitmq.py
Normal file
@ -0,0 +1,195 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from timmy.analyze_health import GREEN, UNKNOWN, YELLOW, RED
|
||||
from timmy.env import project_name
|
||||
import logging
|
||||
import re
|
||||
import yaml
|
||||
|
||||
|
||||
logger = logging.getLogger(project_name)
|
||||
|
||||
|
||||
def register(function_mapping):
|
||||
function_mapping['rabbitmqctl-list-queues'] = parse_list_queues
|
||||
function_mapping['rabbitmqctl-status'] = parse_status
|
||||
|
||||
|
||||
def parse_list_queues(data, script, node):
|
||||
warning = 100
|
||||
error = 1000
|
||||
health = GREEN
|
||||
details = []
|
||||
for line in data[1:]:
|
||||
elements = line.rstrip().split()
|
||||
if len(elements) < 2:
|
||||
logger.warning('no value in list_queues: "%s"' % line.rstrip())
|
||||
else:
|
||||
count = int(elements[1])
|
||||
if count < error and count >= warning:
|
||||
health = max(health, YELLOW)
|
||||
details.append(line)
|
||||
return health, details
|
||||
|
||||
|
||||
def prepare_status(data):
|
||||
bad_yaml = ''.join(data[1:])
|
||||
# quoting string elements
|
||||
bad_yaml = re.sub(r'([,{])([a-z_A-Z]+)([,}])', r'\1"\2"\3', bad_yaml)
|
||||
# changing first element int a key - replacing , with :
|
||||
bad_yaml = re.sub(r'({[^,]+),', r'\1:', bad_yaml)
|
||||
bad_yaml_list = list(bad_yaml)
|
||||
good_yaml, _ = fix_dicts(bad_yaml_list, 0)
|
||||
status_list = yaml.load(''.join(good_yaml))
|
||||
status_dict = squash_dicts(status_list)
|
||||
return status_dict
|
||||
|
||||
|
||||
def fix_dicts(json_str_list, pos):
|
||||
'''recursively puts all comma-separted values into square
|
||||
brackets to make data look like normal 'key: value' dicts
|
||||
'''
|
||||
quoted_string = False
|
||||
value = True
|
||||
value_pos = 0
|
||||
commas = False
|
||||
is_list = False
|
||||
in_list = 0
|
||||
while pos < len(json_str_list):
|
||||
if not quoted_string:
|
||||
if json_str_list[pos] == '{':
|
||||
json_str_list, pos = fix_dicts(json_str_list, pos+1)
|
||||
elif json_str_list[pos] == '"':
|
||||
quoted_string = True
|
||||
elif json_str_list[pos] == ':':
|
||||
value = True
|
||||
value_pos = pos + 1
|
||||
elif json_str_list[pos] == '[':
|
||||
if value and not commas:
|
||||
is_list = True
|
||||
in_list += 1
|
||||
elif json_str_list[pos] == ']':
|
||||
in_list -= 1
|
||||
elif json_str_list[pos] == ',':
|
||||
commas = True
|
||||
if not in_list:
|
||||
is_list = False
|
||||
elif json_str_list[pos] == '}':
|
||||
if not is_list and commas:
|
||||
json_str_list = (json_str_list[:value_pos] + ['['] +
|
||||
json_str_list[value_pos:pos] + [']'] +
|
||||
json_str_list[pos:])
|
||||
pos += 2
|
||||
return json_str_list, pos
|
||||
elif json_str_list[pos] == '"':
|
||||
quoted_string = False
|
||||
pos += 1
|
||||
return json_str_list, pos
|
||||
|
||||
|
||||
def squash_dicts(input_data):
|
||||
# recursively converts [{a:1},{b:2},{c:3}...] into {a:1, b:2, c:3}'''
|
||||
if type(input_data) is list:
|
||||
for i in range(len(input_data)):
|
||||
input_data[i] = squash_dicts(input_data[i])
|
||||
if all([type(i) is dict for i in input_data]):
|
||||
kv_list = [(k, v) for i in input_data for k, v in i.items()]
|
||||
input_data = dict(kv_list)
|
||||
elif type(input_data) is dict:
|
||||
for k, v in input_data.items():
|
||||
input_data[k] = squash_dicts(v)
|
||||
return input_data
|
||||
|
||||
|
||||
def parse_status(data, script, node):
|
||||
status = prepare_status(data)
|
||||
health = GREEN
|
||||
details = []
|
||||
|
||||
# disk free check
|
||||
try:
|
||||
dfree = int(status['disk_free'])
|
||||
dlimit = int(status['disk_free_limit'])
|
||||
dfree_ok = 10**9 # 1GB
|
||||
if dfree > dlimit and dfree < dfree_ok:
|
||||
health = max(health, YELLOW)
|
||||
details.append('disk_free: %s, disk_free_limit: %s'
|
||||
% (dfree, dlimit))
|
||||
elif dfree <= dlimit:
|
||||
health = max(health, RED)
|
||||
details.append('disk_free: %s, disk_free_limit: %s'
|
||||
% (dfree, dlimit))
|
||||
except ValueError:
|
||||
details.append('cannot convert disk_free* to int')
|
||||
health = max(health, UNKNOWN)
|
||||
except KeyError:
|
||||
details.append('disk_free* not present')
|
||||
health = max(health, UNKNOWN)
|
||||
|
||||
# process limit check
|
||||
try:
|
||||
pused = float(status['processes']['used'])
|
||||
plimit = float(status['processes']['limit'])
|
||||
ok_ratio = 0.9
|
||||
if pused < plimit and pused/plimit > ok_ratio:
|
||||
health = max(health, YELLOW)
|
||||
details.append('processes used: %s, processes limit: %s'
|
||||
% (pused, plimit))
|
||||
elif pused >= plimit:
|
||||
health = max(health, RED)
|
||||
details.append('processes used: %s, processes limit: %s'
|
||||
% (pused, plimit))
|
||||
except ValueError:
|
||||
details.append('cannot convert processes* to numbers')
|
||||
health = max(health, UNKNOWN)
|
||||
except KeyError:
|
||||
details.append('processes* not present')
|
||||
health = max(health, UNKNOWN)
|
||||
|
||||
# fd check
|
||||
try:
|
||||
sused = float(status['file_descriptors']['sockets_used'])
|
||||
slimit = float(status['file_descriptors']['sockets_limit'])
|
||||
ok_ratio = 0.9
|
||||
if sused < slimit and sused/slimit > ok_ratio:
|
||||
health = max(health, YELLOW)
|
||||
details.append('sockets used: %s, sockets limit: %s'
|
||||
% (sused, slimit))
|
||||
elif sused >= slimit:
|
||||
health = max(health, RED)
|
||||
details.append('sockets used: %s, sockets limit: %s'
|
||||
% (sused, slimit))
|
||||
fdused = float(status['file_descriptors']['total_used'])
|
||||
fdlimit = float(status['file_descriptors']['total_limit'])
|
||||
ok_ratio = 0.9
|
||||
if fdused < fdlimit and fdused/fdlimit > ok_ratio:
|
||||
health = max(health, YELLOW)
|
||||
details.append('fd total used: %s, fd total limit: %s'
|
||||
% (fdused, fdlimit))
|
||||
elif fdused >= fdlimit:
|
||||
health = max(health, RED)
|
||||
details.append('fd total used: %s, fd total limit: %s'
|
||||
% (fdused, fdlimit))
|
||||
except ValueError:
|
||||
details.append('cannot convert file_descriptors* to numbers')
|
||||
health = max(health, UNKNOWN)
|
||||
except KeyError:
|
||||
details.append('file_descriptors* not present')
|
||||
health = max(health, UNKNOWN)
|
||||
|
||||
return health, details
|
@ -86,6 +86,11 @@ def parser_init(add_help=False):
|
||||
' a path specified by "rqdir" configuration'
|
||||
' parameter. For help on shell mode, read'
|
||||
' timmy/conf.py.') % Node.skey)
|
||||
parser.add_argument('--one-way', action='store_true',
|
||||
help=('When executing scripts_all_pairs (if defined),'
|
||||
' for each pair of nodes [A, B] run client'
|
||||
' script only on A (A->B connection).'
|
||||
' Default is to run both A->B and B->A.'))
|
||||
parser.add_argument('-P', '--put', nargs=2, action='append',
|
||||
metavar=('SOURCE', 'DESTINATION'),
|
||||
help=('Enables shell mode. Can be specified multiple'
|
||||
@ -300,6 +305,8 @@ def main(argv=None):
|
||||
conf['analyze'] = True
|
||||
if args.offline:
|
||||
conf['offline'] = True
|
||||
if args.one_way:
|
||||
conf['scripts_all_pairs_one_way'] = True
|
||||
logger.info('Using rqdir: %s, rqfile: %s' %
|
||||
(conf['rqdir'], conf['rqfile']))
|
||||
nm = pretty_run(args.quiet, 'Initializing node data',
|
||||
@ -379,5 +386,6 @@ def main(argv=None):
|
||||
not args.quiet, not conf['offline']]):
|
||||
print('Archives available in "%s".' % nm.conf['archive_dir'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv))
|
||||
|
@ -74,6 +74,7 @@ def init_default_conf():
|
||||
conf['clean'] = True
|
||||
conf['analyze'] = False
|
||||
conf['offline'] = False # mark all nodes as offline
|
||||
conf['scripts_all_pairs_one_way'] = False
|
||||
return conf
|
||||
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
# under the License.
|
||||
|
||||
project_name = 'timmy'
|
||||
version = '1.25.0'
|
||||
version = '1.26.3'
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
|
@ -96,6 +96,7 @@ class FLock:
|
||||
# lockfile can be erased and everything still works normally.
|
||||
pass
|
||||
|
||||
|
||||
# Test main routine
|
||||
if __name__ == '__main__':
|
||||
import time
|
||||
|
@ -548,7 +548,7 @@ class NodeManager(object):
|
||||
conf['archive_dir'] += timestamp_str
|
||||
if conf['clean']:
|
||||
shutil.rmtree(conf['outdir'], ignore_errors=True)
|
||||
tools.mdir(conf['outdir'])
|
||||
tools.mdir(conf['outdir'])
|
||||
version_filename = '%s_version.txt' % project_name
|
||||
version_filepath = os.path.join(conf['outdir'], version_filename)
|
||||
with open(version_filepath, 'a') as f:
|
||||
@ -902,13 +902,14 @@ class NodeManager(object):
|
||||
|
||||
@run_with_lock
|
||||
def run_scripts_all_pairs(self, maxthreads, fake=False):
|
||||
if len(self.selected_nodes) < 2:
|
||||
nodes = self.selected_nodes.values()
|
||||
if len(nodes) < 2:
|
||||
self.logger.warning('less than 2 nodes are available, '
|
||||
'skipping paired scripts')
|
||||
return
|
||||
run_server_start_items = []
|
||||
run_server_stop_items = []
|
||||
for n in self.selected_nodes.values():
|
||||
for n in nodes:
|
||||
start_args = {'phase': 'server_start', 'fake': fake}
|
||||
run_server_start_items.append(tools.RunItem(target=n.exec_pair,
|
||||
args=start_args,
|
||||
@ -920,7 +921,8 @@ class NodeManager(object):
|
||||
dict_result=True)
|
||||
for key in result:
|
||||
self.nodes[key].scripts_all_pairs = result[key]
|
||||
for pairset in tools.all_pairs(self.selected_nodes.values()):
|
||||
one_way = self.conf['scripts_all_pairs_one_way']
|
||||
for pairset in tools.all_pairs(nodes, one_way=one_way):
|
||||
run_client_items = []
|
||||
self.logger.info(['%s->%s' % (p[0].ip, p[1].ip) for p in pairset])
|
||||
for pair in pairset:
|
||||
@ -953,5 +955,6 @@ class NodeManager(object):
|
||||
def main(argv=None):
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv))
|
||||
|
@ -408,29 +408,31 @@ def w_list(value):
|
||||
return value if type(value) == list else [value]
|
||||
|
||||
|
||||
def all_pairs(items):
|
||||
def incomplete(i_set, p_dict):
|
||||
for i, p_set in p_dict.items():
|
||||
not_paired = i_set.difference(p_set).difference([i])
|
||||
if not_paired:
|
||||
return not_paired
|
||||
def all_pairs(items, one_way=False):
|
||||
def incomplete(items_set, paired_dict):
|
||||
for paired_set in paired_dict.values():
|
||||
if items_set.difference(paired_set):
|
||||
return True
|
||||
|
||||
items_set = set(items)
|
||||
pairs = []
|
||||
paired = {}
|
||||
for i in items_set:
|
||||
paired[i] = set()
|
||||
paired[i] = set([i])
|
||||
while incomplete(items_set, paired):
|
||||
busy = set()
|
||||
current_pairs = []
|
||||
for i in [i for i in items if items_set.difference(paired[i])]:
|
||||
can_pair = incomplete(items_set.difference(busy), {i: paired[i]})
|
||||
if i not in busy and can_pair:
|
||||
pair_i = next(iter(can_pair))
|
||||
current_pairs.append([i, pair_i])
|
||||
busy.add(i)
|
||||
busy.add(pair_i)
|
||||
paired[i].add(pair_i)
|
||||
for i in items_set:
|
||||
if items_set.difference(paired[i]) and i not in busy:
|
||||
can_pair = items_set.difference(busy).difference(paired[i])
|
||||
if can_pair:
|
||||
pair_i = can_pair.pop()
|
||||
current_pairs.append([i, pair_i])
|
||||
busy.add(i)
|
||||
busy.add(pair_i)
|
||||
paired[i].add(pair_i)
|
||||
if one_way:
|
||||
paired[pair_i].add(i)
|
||||
pairs.append(current_pairs)
|
||||
return pairs
|
||||
|
||||
|
@ -78,13 +78,13 @@ scripts:
|
||||
controller: [nova-manage-vm-list]
|
||||
'7.0':
|
||||
by_roles:
|
||||
fuel: [fuel-notifications, fuel-dockerctl-list, fuel-docker-ps, fuel-dockerctl-check, docker-images]
|
||||
fuel: [fuel-notifications, fuel-dockerctl-list, fuel-docker-ps, fuel-dockerctl-check, docker-images, fuel-all-network-templates-combined-yaml]
|
||||
'8.0':
|
||||
by_roles:
|
||||
fuel: [fuel-notifications, fuel-dockerctl-list, fuel-docker-ps, fuel-dockerctl-check, docker-images, fuel-bootstrap-list]
|
||||
fuel: [fuel-notifications, fuel-dockerctl-list, fuel-docker-ps, fuel-dockerctl-check, docker-images, fuel-bootstrap-list, fuel-all-network-templates-combined-yaml]
|
||||
'9.0':
|
||||
by_roles:
|
||||
fuel: [fuel-notifications, fuel-bootstrap-list, shotgun2-report]
|
||||
fuel: [fuel-notifications, fuel-bootstrap-list, shotgun2-report, fuel-all-network-templates-combined-yaml]
|
||||
by_roles:
|
||||
fuel: [fuel-release, fuel-task-list, fuel-environment-list, fuel-postgres-dump, fuel-node-json]
|
||||
cinder: [ovs-vsctl-show, cinder-manage]
|
||||
|
26
timmy_data/rq/scripts/fuel-all-network-templates-combined-yaml
Executable file
26
timmy_data/rq/scripts/fuel-all-network-templates-combined-yaml
Executable file
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
exec /usr/bin/env python - << EOF
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from fuelclient.v1.environment import EnvironmentClient
|
||||
import subprocess
|
||||
import json
|
||||
import yaml
|
||||
|
||||
class CEnvironmentClient(EnvironmentClient):
|
||||
def download_network_template(self, environment_id):
|
||||
env = self._entity_wrapper(environment_id)
|
||||
template_data = env.get_network_template_data()
|
||||
return template_data
|
||||
|
||||
ec = CEnvironmentClient()
|
||||
p = subprocess.Popen(['fuel', 'env', 'list', '--json'],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
out, err = p.communicate()
|
||||
clusters_dict = json.loads(out)
|
||||
for cluster in clusters_dict:
|
||||
clid=int(cluster['id'])
|
||||
print('---\n# cluster id:%s\n%s' %(clid, yaml.dump(yaml.load((json.dumps(ec.download_network_template(clid)))))))
|
||||
EOF
|
Loading…
x
Reference in New Issue
Block a user