Merge "fix: conf_assign_once can assign to skipped nodes"
This commit is contained in:
commit
851e279185
@ -445,10 +445,9 @@ class NodeManager(BaseNodeManager):
|
|||||||
|
|
||||||
def get_release_cli(self):
|
def get_release_cli(self):
|
||||||
run_items = []
|
run_items = []
|
||||||
for key, node in self.nodes.items():
|
for key, node in self.selected_nodes.items():
|
||||||
if not node.filtered_out:
|
run_items.append(tools.RunItem(target=node.get_release,
|
||||||
run_items.append(tools.RunItem(target=node.get_release,
|
key=key))
|
||||||
key=key))
|
|
||||||
result = tools.run_batch(run_items, 100, dict_result=True)
|
result = tools.run_batch(run_items, 100, dict_result=True)
|
||||||
if result:
|
if result:
|
||||||
for key in result:
|
for key in result:
|
||||||
@ -464,9 +463,8 @@ class NodeManager(BaseNodeManager):
|
|||||||
|
|
||||||
def nodes_get_roles_hiera(self, maxthreads=100):
|
def nodes_get_roles_hiera(self, maxthreads=100):
|
||||||
run_items = []
|
run_items = []
|
||||||
for key, node in self.nodes.items():
|
for key, node in self.selected_nodes.items():
|
||||||
if all([not node.filtered_out, not node.roles,
|
if node.status != 'discover' and not node.roles:
|
||||||
node.status != 'discover']):
|
|
||||||
run_items.append(tools.RunItem(target=node.get_roles_hiera,
|
run_items.append(tools.RunItem(target=node.get_roles_hiera,
|
||||||
key=key))
|
key=key))
|
||||||
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
||||||
@ -477,8 +475,8 @@ class NodeManager(BaseNodeManager):
|
|||||||
def nodes_get_cluster_ids(self, maxthreads=100):
|
def nodes_get_cluster_ids(self, maxthreads=100):
|
||||||
self.logger.debug('getting cluster ids from nodes')
|
self.logger.debug('getting cluster ids from nodes')
|
||||||
run_items = []
|
run_items = []
|
||||||
for key, node in self.nodes.items():
|
for key, node in self.selected_nodes.items():
|
||||||
if not node.filtered_out and not node.cluster:
|
if not node.cluster:
|
||||||
run_items.append(tools.RunItem(target=node.get_cluster_id,
|
run_items.append(tools.RunItem(target=node.get_cluster_id,
|
||||||
key=key))
|
key=key))
|
||||||
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
||||||
|
@ -82,7 +82,7 @@ class Node(object):
|
|||||||
self.name = name
|
self.name = name
|
||||||
self.fqdn = fqdn
|
self.fqdn = fqdn
|
||||||
self.accessible = True
|
self.accessible = True
|
||||||
self.filtered_out = False
|
self.skipped = False
|
||||||
self.outputs_timestamp = False
|
self.outputs_timestamp = False
|
||||||
self.outputs_timestamp_dir = None
|
self.outputs_timestamp_dir = None
|
||||||
self.apply_conf(conf)
|
self.apply_conf(conf)
|
||||||
@ -97,10 +97,10 @@ class Node(object):
|
|||||||
return self.pt.format(*fields)
|
return self.pt.format(*fields)
|
||||||
|
|
||||||
def print_table(self):
|
def print_table(self):
|
||||||
if not self.filtered_out:
|
if not self.skipped:
|
||||||
my_id = self.id
|
my_id = self.id
|
||||||
else:
|
else:
|
||||||
my_id = str(self.id) + ' [skipped]'
|
my_id = '%s [skipped]' % self.id
|
||||||
return [str(my_id), str(self.cluster), str(self.ip), str(self.mac),
|
return [str(my_id), str(self.cluster), str(self.ip), str(self.mac),
|
||||||
self.os_platform, ','.join(self.roles),
|
self.os_platform, ','.join(self.roles),
|
||||||
str(self.online), str(self.accessible), str(self.status),
|
str(self.online), str(self.accessible), str(self.status),
|
||||||
@ -542,7 +542,7 @@ class NodeManager(object):
|
|||||||
# apply soft-filter on all nodes
|
# apply soft-filter on all nodes
|
||||||
for node in self.nodes.values():
|
for node in self.nodes.values():
|
||||||
if not self.filter(node, self.conf['soft_filter']):
|
if not self.filter(node, self.conf['soft_filter']):
|
||||||
node.filtered_out = True
|
node.skipped = True
|
||||||
|
|
||||||
def post_init(self):
|
def post_init(self):
|
||||||
self.nodes_reapply_conf()
|
self.nodes_reapply_conf()
|
||||||
@ -663,7 +663,7 @@ class NodeManager(object):
|
|||||||
attr_name = k[len(once_p):]
|
attr_name = k[len(once_p):]
|
||||||
assigned = dict((k, None) for k in self.conf[k])
|
assigned = dict((k, None) for k in self.conf[k])
|
||||||
for ak in assigned:
|
for ak in assigned:
|
||||||
for node in self.nodes.values():
|
for node in self.selected_nodes.values():
|
||||||
if hasattr(node, attr_name) and not assigned[ak]:
|
if hasattr(node, attr_name) and not assigned[ak]:
|
||||||
attr = w_list(getattr(node, attr_name))
|
attr = w_list(getattr(node, attr_name))
|
||||||
for v in attr:
|
for v in attr:
|
||||||
@ -681,8 +681,8 @@ class NodeManager(object):
|
|||||||
|
|
||||||
def nodes_get_os(self, maxthreads=100):
|
def nodes_get_os(self, maxthreads=100):
|
||||||
run_items = []
|
run_items = []
|
||||||
for key, node in self.nodes.items():
|
for key, node in self.selected_nodes.items():
|
||||||
if not node.filtered_out and not node.os_platform:
|
if not node.os_platform:
|
||||||
run_items.append(tools.RunItem(target=node.get_os, key=key))
|
run_items.append(tools.RunItem(target=node.get_os, key=key))
|
||||||
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
||||||
for key in result:
|
for key in result:
|
||||||
@ -692,10 +692,9 @@ class NodeManager(object):
|
|||||||
def nodes_check_access(self, maxthreads=100):
|
def nodes_check_access(self, maxthreads=100):
|
||||||
self.logger.debug('checking if nodes are accessible')
|
self.logger.debug('checking if nodes are accessible')
|
||||||
run_items = []
|
run_items = []
|
||||||
for key, node in self.nodes.items():
|
for key, node in self.selected_nodes.items():
|
||||||
if not node.filtered_out:
|
run_items.append(tools.RunItem(target=node.check_access,
|
||||||
run_items.append(tools.RunItem(target=node.check_access,
|
key=key))
|
||||||
key=key))
|
|
||||||
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
||||||
for key in result:
|
for key in result:
|
||||||
self.nodes[key].accessible = result[key]
|
self.nodes[key].accessible = result[key]
|
||||||
@ -730,11 +729,10 @@ class NodeManager(object):
|
|||||||
@run_with_lock
|
@run_with_lock
|
||||||
def run_commands(self, timeout=15, fake=False, maxthreads=100):
|
def run_commands(self, timeout=15, fake=False, maxthreads=100):
|
||||||
run_items = []
|
run_items = []
|
||||||
for key, node in self.nodes.items():
|
for key, node in self.selected_nodes.items():
|
||||||
if not node.filtered_out:
|
run_items.append(tools.RunItem(target=node.exec_cmd,
|
||||||
run_items.append(tools.RunItem(target=node.exec_cmd,
|
args={'fake': fake},
|
||||||
args={'fake': fake},
|
key=key))
|
||||||
key=key))
|
|
||||||
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
||||||
for key in result:
|
for key in result:
|
||||||
self.nodes[key].mapcmds = result[key][0]
|
self.nodes[key].mapcmds = result[key][0]
|
||||||
@ -743,15 +741,14 @@ class NodeManager(object):
|
|||||||
def calculate_log_size(self, timeout=15, maxthreads=100):
|
def calculate_log_size(self, timeout=15, maxthreads=100):
|
||||||
total_size = 0
|
total_size = 0
|
||||||
run_items = []
|
run_items = []
|
||||||
for key, node in self.nodes.items():
|
for key, node in self.selected_nodes.items():
|
||||||
if not node.filtered_out:
|
run_items.append(tools.RunItem(target=node.logs_populate,
|
||||||
run_items.append(tools.RunItem(target=node.logs_populate,
|
args={'timeout': timeout},
|
||||||
args={'timeout': timeout},
|
key=key))
|
||||||
key=key))
|
|
||||||
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
result = tools.run_batch(run_items, maxthreads, dict_result=True)
|
||||||
for key in result:
|
for key in result:
|
||||||
self.nodes[key].logs = result[key]
|
self.nodes[key].logs = result[key]
|
||||||
for node in self.nodes.values():
|
for node in self.selected_nodes.values():
|
||||||
total_size += sum(node.logs_dict().values())
|
total_size += sum(node.logs_dict().values())
|
||||||
self.logger.info('Full log size on nodes(with fuel): %d bytes' %
|
self.logger.info('Full log size on nodes(with fuel): %d bytes' %
|
||||||
total_size)
|
total_size)
|
||||||
@ -803,7 +800,7 @@ class NodeManager(object):
|
|||||||
|
|
||||||
def find_adm_interface_speed(self):
|
def find_adm_interface_speed(self):
|
||||||
'''Returns interface speed through which logs will be dowloaded'''
|
'''Returns interface speed through which logs will be dowloaded'''
|
||||||
for node in self.nodes.values():
|
for node in self.selected_nodes.values():
|
||||||
if not (node.ip == 'localhost' or node.ip.startswith('127.')):
|
if not (node.ip == 'localhost' or node.ip.startswith('127.')):
|
||||||
cmd = ("%s$(/sbin/ip -o route get %s | cut -d' ' -f3)/speed" %
|
cmd = ("%s$(/sbin/ip -o route get %s | cut -d' ' -f3)/speed" %
|
||||||
('cat /sys/class/net/', node.ip))
|
('cat /sys/class/net/', node.ip))
|
||||||
@ -831,7 +828,7 @@ class NodeManager(object):
|
|||||||
py_slowpipe = tools.slowpipe % speed
|
py_slowpipe = tools.slowpipe % speed
|
||||||
limitcmd = "| python -c '%s'; exit ${PIPESTATUS}" % py_slowpipe
|
limitcmd = "| python -c '%s'; exit ${PIPESTATUS}" % py_slowpipe
|
||||||
run_items = []
|
run_items = []
|
||||||
for node in [n for n in self.nodes.values() if not n.filtered_out]:
|
for node in self.selected_nodes.values():
|
||||||
if not node.logs_dict():
|
if not node.logs_dict():
|
||||||
self.logger.info(("%s: no logs to collect") % node.repr)
|
self.logger.info(("%s: no logs to collect") % node.repr)
|
||||||
continue
|
continue
|
||||||
@ -860,26 +857,26 @@ class NodeManager(object):
|
|||||||
@run_with_lock
|
@run_with_lock
|
||||||
def get_files(self, timeout=15):
|
def get_files(self, timeout=15):
|
||||||
run_items = []
|
run_items = []
|
||||||
for n in [n for n in self.nodes.values() if not n.filtered_out]:
|
for node in self.selected_nodes.values():
|
||||||
run_items.append(tools.RunItem(target=n.get_files))
|
run_items.append(tools.RunItem(target=node.get_files))
|
||||||
tools.run_batch(run_items, 10)
|
tools.run_batch(run_items, 10)
|
||||||
|
|
||||||
@run_with_lock
|
@run_with_lock
|
||||||
def put_files(self):
|
def put_files(self):
|
||||||
run_items = []
|
run_items = []
|
||||||
for n in [n for n in self.nodes.values() if not n.filtered_out]:
|
for node in self.selected_nodes.values():
|
||||||
run_items.append(tools.RunItem(target=n.put_files))
|
run_items.append(tools.RunItem(target=node.put_files))
|
||||||
tools.run_batch(run_items, 10)
|
tools.run_batch(run_items, 10)
|
||||||
|
|
||||||
@run_with_lock
|
@run_with_lock
|
||||||
def run_scripts_all_pairs(self, maxthreads, fake=False):
|
def run_scripts_all_pairs(self, maxthreads, fake=False):
|
||||||
if len(self.selected_nodes()) < 2:
|
if len(self.selected_nodes) < 2:
|
||||||
self.logger.warning('less than 2 nodes are available, '
|
self.logger.warning('less than 2 nodes are available, '
|
||||||
'skipping paired scripts')
|
'skipping paired scripts')
|
||||||
return
|
return
|
||||||
run_server_start_items = []
|
run_server_start_items = []
|
||||||
run_server_stop_items = []
|
run_server_stop_items = []
|
||||||
for n in self.selected_nodes():
|
for n in self.selected_nodes.values():
|
||||||
start_args = {'phase': 'server_start', 'fake': fake}
|
start_args = {'phase': 'server_start', 'fake': fake}
|
||||||
run_server_start_items.append(tools.RunItem(target=n.exec_pair,
|
run_server_start_items.append(tools.RunItem(target=n.exec_pair,
|
||||||
args=start_args,
|
args=start_args,
|
||||||
@ -891,7 +888,7 @@ class NodeManager(object):
|
|||||||
dict_result=True)
|
dict_result=True)
|
||||||
for key in result:
|
for key in result:
|
||||||
self.nodes[key].scripts_all_pairs = result[key]
|
self.nodes[key].scripts_all_pairs = result[key]
|
||||||
for pairset in tools.all_pairs(self.selected_nodes()):
|
for pairset in tools.all_pairs(self.selected_nodes.values()):
|
||||||
run_client_items = []
|
run_client_items = []
|
||||||
self.logger.info(['%s->%s' % (p[0].ip, p[1].ip) for p in pairset])
|
self.logger.info(['%s->%s' % (p[0].ip, p[1].ip) for p in pairset])
|
||||||
for pair in pairset:
|
for pair in pairset:
|
||||||
@ -916,8 +913,9 @@ class NodeManager(object):
|
|||||||
nodes[k].append(n)
|
nodes[k].append(n)
|
||||||
return nodes
|
return nodes
|
||||||
|
|
||||||
|
@property
|
||||||
def selected_nodes(self):
|
def selected_nodes(self):
|
||||||
return [n for n in self.nodes.values() if not n.filtered_out]
|
return dict([(ip, n) for ip, n in self.nodes.items() if not n.skipped])
|
||||||
|
|
||||||
|
|
||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user