Change output format and proceed with new report

Execution results are now written as flat list of records, where each
record is attributed with scenario name, test name, level of concurrency,
node hostname and agent id.

The report is restyled completely. The new ideology is to show a single
slice of data per time, tree-like navigation is replaced by selectors
which allow to drill-down and browse data from different points of view.

Change-Id: Ic760bb48b1dabd8e05ccb05b72da1a1d9f6a2441
This commit is contained in:
Ilya Shakhat 2015-04-10 19:35:58 +03:00
parent d79f932d98
commit 1bed81b604
9 changed files with 710 additions and 815 deletions

View File

@ -18,11 +18,11 @@ class BaseAggregator(object):
def __init__(self, test_definition):
self.test_definition = test_definition
def test_summary(self, test_data):
def test_summary(self, records):
pass
def iteration_summary(self, iteration_data):
def concurrency_summary(self, records):
pass
def agent_summary(self, agent_data):
def record_summary(self, record):
pass

View File

@ -14,7 +14,6 @@
# limitations under the License.
import collections
import uuid
from oslo_log import log as logging
@ -43,25 +42,25 @@ class TrafficAggregator(base.BaseAggregator):
def __init__(self, test_definition):
super(TrafficAggregator, self).__init__(test_definition)
def test_summary(self, test_data):
def test_summary(self, records):
chart = []
xs = []
mean_v = collections.defaultdict(list)
for iteration in test_data['results_per_iteration']:
xs.append(len(iteration['results_per_agent']))
for k, v in iteration['stats'].items():
for record in records:
xs.append(record['concurrency'])
for k, v in record['stats'].items():
mean_v[k].append(v['mean'])
for k in mean_v.keys():
chart.append(['Mean %s' % k] + mean_v[k])
chart.append(['x'] + xs)
test_data.update({
return {
'chart': chart,
})
}
def iteration_summary(self, iteration_data):
def concurrency_summary(self, records):
max_v = collections.defaultdict(list)
min_v = collections.defaultdict(list)
mean_v = collections.defaultdict(list)
@ -69,11 +68,11 @@ class TrafficAggregator(base.BaseAggregator):
chart = []
nodes = []
for one in iteration_data['results_per_agent']:
nodes.append(one['agent']['node'])
chart += one['chart']
for record in records:
nodes.append(record['node'])
chart += record['chart']
for k, v in one['stats'].items():
for k, v in record['stats'].items():
max_v[k].append(v['max'])
min_v[k].append(v['min'])
mean_v[k].append(v['mean'])
@ -91,39 +90,38 @@ class TrafficAggregator(base.BaseAggregator):
node_chart.append(['Max %s' % k] + max_v[k])
node_chart.append(['Min %s' % k] + min_v[k])
iteration_data.update({
'uuid': uuid.uuid4(),
return {
'stats': stats,
'x-chart': chart,
'node_chart': node_chart,
})
}
def agent_summary(self, agent_data):
def record_summary(self, record):
# convert bps to Mbps
for idx, item_meta in enumerate(agent_data.get('meta', [])):
for idx, item_meta in enumerate(record.get('meta', [])):
if item_meta[1] == 'bps':
for row in agent_data.get('samples'):
for row in record.get('samples'):
if row[idx]:
row[idx] = float(row[idx]) / 1024 / 1024
item_meta[1] = 'Mbps'
# calculate stats
agent_data['stats'] = dict()
agent_data['chart'] = []
record['stats'] = dict()
record['chart'] = []
for idx, item_meta in enumerate(agent_data.get('meta', [])):
column = [row[idx] for row in agent_data.get('samples')]
for idx, item_meta in enumerate(record.get('meta', [])):
column = [row[idx] for row in record.get('samples')]
item_title = item_meta[0]
if item_title != 'time':
agent_data['stats'][item_title] = {
record['stats'][item_title] = {
'max': safe_max(column),
'min': safe_min(column),
'mean': mean(column),
'unit': item_meta[1],
}
agent_data['chart'].append([item_title] + column)
record['chart'].append([item_title] + column)
# drop stdout
if 'stdout' in agent_data:
del agent_data['stdout']
if 'stdout' in record:
del record['stdout']

View File

@ -95,8 +95,8 @@ SERVER_OPTS = [
REPORT_OPTS = [
cfg.StrOpt('report-template',
default=(utils.env('SHAKER_REPORT_TEMPLATE') or
'shaker/resources/report_template.jinja2'),
help='Report template in Jinja format'),
'shaker/resources/report_template.html'),
help='Report template file name'),
cfg.StrOpt('report',
default=utils.env('SHAKER_REPORT'),
help='Report file name, defaults to env[SHAKER_REPORT]. '),

View File

@ -61,9 +61,7 @@ class BaseExecutor(object):
self.test_definition, self.agent, message)
return dict(stdout=message.get('stdout'),
stderr=message.get('stderr'),
command=self.get_command(),
agent=self.agent)
command=self.get_command())
def process_failure(self):
return dict(command=self.get_command(),
agent=self.agent)
return dict(command=self.get_command())

View File

@ -31,62 +31,73 @@ from shaker.engine import utils
LOG = logging.getLogger(__name__)
def calculate_stats(data):
for test_result in data.get('result', []):
aggregator = aggregators.get_aggregator(test_result['definition'])
def calculate_stats(records, tests):
aggregates = []
# scenario -> test -> concurrency -> [record]
rec_map = collections.defaultdict(
functools.partial(collections.defaultdict,
functools.partial(collections.defaultdict, list)))
for iteration_result in test_result['results_per_iteration']:
for agent_result in iteration_result['results_per_agent']:
aggregator.agent_summary(agent_result)
for record in records:
aggregator = aggregators.get_aggregator(tests[record['test']])
aggregator.record_summary(record)
aggregator.iteration_summary(iteration_result)
rec_map[record['scenario']][record['test']][
record['concurrency']].append(record)
aggregator.test_summary(test_result)
for scenario, per_scenario in rec_map.items():
for test, per_test in per_scenario.items():
aggregator = aggregators.get_aggregator(tests[test])
concurrency_aggregates = []
for concurrency, per_concurrency in per_test.items():
summary = aggregator.concurrency_summary(per_concurrency)
if summary:
summary.update(dict(scenario=scenario, test=test,
concurrency=concurrency,
type='agg_concurrency'))
aggregates.append(summary)
concurrency_aggregates.append(summary)
per_test_summary = aggregator.test_summary(concurrency_aggregates)
if per_test_summary:
per_test_summary.update(dict(scenario=scenario, test=test,
type='agg_test'))
aggregates.append(per_test_summary)
return aggregates
SLARecord = collections.namedtuple('SLARecord',
['sla', 'status', 'location', 'stats'])
def _verify_stats_against_sla(sla, stats, location):
def _verify_stats_against_sla(sla, record, location):
res = []
for term in sla:
status = utils.eval_expr(term, stats)
status = utils.eval_expr(term, record['stats'])
sla_record = SLARecord(sla=term, status=status,
location=location, stats=stats)
location=location, stats=record['stats'])
res.append(sla_record)
LOG.debug('SLA: %s', sla_record)
return res
def verify_sla(data):
res = []
for test_result in data.get('result', []):
test_name = (test_result['definition'].get('title') or
test_result['definition'].get('class'))
sla = test_result['definition'].get('sla')
if not sla:
continue
def verify_sla(records, tests):
sla_results = []
# test -> [sla]
sla_map = dict((test_id, test['sla'])
for test_id, test in tests.items() if 'sla' in test)
for iteration_result in test_result['results_per_iteration']:
size = str(len(iteration_result['results_per_agent']))
sla_info = _verify_stats_against_sla(
sla, iteration_result['stats'],
'%s.%s' % (test_name, size))
res += sla_info
iteration_result['sla_info'] = sla_info
for agent_result in iteration_result['results_per_agent']:
agent_id = agent_result['agent']['id']
sla_info = _verify_stats_against_sla(
sla, agent_result['stats'],
'%s.%s.%s' % (test_name, size, agent_id))
res += sla_info
agent_result['sla_info'] = sla_info
return res
for record in records:
if (record['test'] in sla_map) and ('stats' in record):
sla = sla_map[record['test']]
path = [str(record[key])
for key in ['test', 'concurrency', 'node', 'agent_id']
if key in record]
info = _verify_stats_against_sla(sla, record, '.'.join(path))
sla_results += info
record['sla_info'] = info
return sla_results
def save_to_subunit(sla_res, subunit_filename):
@ -122,14 +133,18 @@ def generate_report(data, report_template, report_filename, subunit_filename):
LOG.debug('Generating report, template: %s, output: %s',
report_template, report_filename or '<dummy>')
calculate_stats(data)
sla_res = verify_sla(data)
data['records'] += calculate_stats(data['records'], data['tests'])
sla_res = verify_sla(data['records'], data['tests'])
if subunit_filename:
save_to_subunit(sla_res, subunit_filename)
# add more filters to jinja
jinja_env = jinja2.Environment()
jinja_env = jinja2.Environment(variable_start_string='[[[',
variable_end_string=']]]',
comment_start_string='[[#',
comment_end_string='#]]')
jinja_env.filters['json'] = json.dumps
jinja_env.filters['yaml'] = functools.partial(yaml.safe_dump, indent=2,
default_flow_style=False)

View File

@ -17,7 +17,7 @@ import copy
import json
import multiprocessing
import os
import uuid
import re
from oslo_config import cfg
from oslo_log import log as logging
@ -47,6 +47,11 @@ def _extend_agents(agents_map):
return extended_agents
def _make_test_title(test):
s = test.get('title') or test.get('class')
return re.sub(r'[^\x21-\x7e\x80-\xff]+', '_', s).lower()
def _pick_agents(agents, size):
# slave agents do not execute any tests
agents = [a for a in agents.values() if a.get('mode') != 'slave']
@ -68,41 +73,38 @@ def _pick_agents(agents, size):
def execute(quorum, execution, agents):
agents = _extend_agents(agents)
result = []
records = []
for test in execution['tests']:
LOG.debug('Running test %s on all agents', test)
test_title = _make_test_title(test)
results_per_iteration = []
for selected_agents in _pick_agents(agents, execution.get('size')):
executors = dict((a['id'], executors_classes.get_executor(test, a))
for a in selected_agents)
execution_result = quorum.execute(executors)
values = execution_result.values()
for v in values:
v['uuid'] = str(uuid.uuid4())
results_per_iteration.append({
'agents': selected_agents,
'results_per_agent': values,
})
test['uuid'] = str(uuid.uuid4())
result.append({
'results_per_iteration': results_per_iteration,
'definition': test,
})
for agent_id, data in execution_result.items():
data.update(dict(
agent_id=agent_id,
node=agents[agent_id].get('node'),
concurrency=len(selected_agents),
test=test_title,
executor=test.get('class'),
type='raw',
))
records.append(data)
LOG.info('Execution is done')
return result
return records
def play_scenario(scenario):
deployment = None
output = dict(scenario=scenario)
output = dict(scenario=scenario, records=[], agents={})
output['tests'] = dict((_make_test_title(test), test)
for test in scenario['execution']['tests'])
try:
deployment = deploy.Deployment(cfg.CONF.server_endpoint)
@ -117,6 +119,7 @@ def play_scenario(scenario):
agents = deployment.deploy(scenario['deployment'],
base_dir=os.path.dirname(cfg.CONF.scenario))
agents = _extend_agents(agents)
output['agents'] = agents
LOG.debug('Deployed agents: %s', agents)
@ -139,14 +142,18 @@ def play_scenario(scenario):
quorum.join(set(agents.keys()))
execution_result = execute(quorum, scenario['execution'], agents)
output['result'] = execution_result
for record in execution_result:
record['scenario'] = (scenario.get('title') or
scenario.get('file_name'))
output['records'] = execution_result
except BaseException as e:
if isinstance(e, KeyboardInterrupt):
LOG.info('Caught SIGINT. Terminating')
else:
error_msg = 'Error while executing scenario: %s' % e
LOG.error(error_msg)
output['error'] = error_msg
LOG.exception(e)
output['scenario']['error'] = error_msg
finally:
if deployment:
deployment.cleanup()
@ -160,13 +167,22 @@ def main():
config.REPORT_OPTS
)
scenario = utils.read_yaml_file(cfg.CONF.scenario)
scenario['file_name'] = cfg.CONF.scenario
output = dict(records=[], agents={}, scenarios={}, tests={})
output = play_scenario(scenario)
for scenario_file_name in [cfg.CONF.scenario]:
scenario = utils.read_yaml_file(scenario_file_name)
scenario['title'] = scenario.get('title') or scenario_file_name
scenario['file_name'] = cfg.CONF.scenario
play_output = play_scenario(scenario)
output['scenarios'][scenario['title']] = play_output['scenario']
output['records'] += play_output['records']
output['agents'].update(play_output['agents'])
output['tests'].update(play_output['tests'])
if cfg.CONF.output:
utils.write_file(json.dumps(output), cfg.CONF.output)
utils.write_file(json.dumps(output, indent=2), cfg.CONF.output)
if cfg.CONF.no_report_on_error and 'error' in output:
LOG.info('Skipped report generation due to errors and '

File diff suppressed because one or more lines are too long

View File

@ -1,586 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Shaker Report</title>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet"
href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/css/bootstrap.min.css">
<!-- Optional theme -->
<link rel="stylesheet"
href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/css/bootstrap-theme.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.2/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/js/bootstrap.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.9/c3.min.js"></script>
<script type="text/javascript" src="https://cdn.datatables.net/1.10.2/js/jquery.dataTables.min.js"></script>
<link rel=stylesheet type=text/css href="https://cdn.datatables.net/1.10.2/css/jquery.dataTables.min.css">
<style type="text/css">
body {
padding-top: 50px;
}
.navbar-fixed-top {
border: 0 none;
}
.navbar-inverse .navbar-brand {
color: white;
}
.sidebar {
display: none;
}
@media (min-width: 768px) {
.sidebar {
background-color: #f5f5f5;
border-right: 1px solid #ccc;
bottom: 0;
display: block;
left: 0;
overflow-x: hidden;
overflow-y: auto;
padding: 12px;
position: fixed;
top: 51px;
z-index: 1000;
}
}
.nav-sidebar {
margin-bottom: 10px;
margin-left: -20px;
margin-right: -21px;
}
.nav-sidebar > li > a {
padding: 5px 20px;
}
.nav-sidebar > .active > a, .nav-sidebar > .active > a:hover, .nav-sidebar > .active > a:focus {
background-color: #428bca;
color: #fff;
}
.main {
padding: 10px;
}
@media (min-width: 768px) {
.main {
padding-left: 40px;
padding-right: 40px;
}
}
h5 {
font-weight: bold;
}
/*-- Chart --*/
.c3 svg {
font: 10px sans-serif;
}
.c3 path, .c3 line {
fill: none;
stroke: #000;
}
.c3 text {
-webkit-user-select: none;
-moz-user-select: none;
user-select: none;
}
.c3-legend-item-tile,
.c3-xgrid-focus,
.c3-ygrid,
.c3-event-rect,
.c3-bars path {
shape-rendering: crispEdges;
}
.c3-chart-arc path {
stroke: #fff;
}
.c3-chart-arc text {
fill: #fff;
font-size: 13px;
}
/*-- Axis --*/
.c3-axis-x .tick {
}
.c3-axis-x-label {
}
.c3-axis-y .tick {
}
.c3-axis-y-label {
}
.c3-axis-y2 .tick {
}
.c3-axis-y2-label {
}
/*-- Grid --*/
.c3-grid line {
stroke: #aaa;
}
.c3-grid text {
fill: #aaa;
}
.c3-xgrid, .c3-ygrid {
stroke-dasharray: 3 3;
}
.c3-xgrid-focus {
}
/*-- Text on Chart --*/
.c3-text {
}
.c3-text.c3-empty {
fill: #808080;
font-size: 2em;
}
/*-- Line --*/
.c3-line {
stroke-width: 1px;
}
/*-- Point --*/
.c3-circle._expanded_ {
stroke-width: 1px;
stroke: white;
}
.c3-selected-circle {
fill: white;
stroke-width: 2px;
}
/*-- Bar --*/
.c3-bar {
stroke-width: 0;
}
.c3-bar._expanded_ {
fill-opacity: 0.75;
}
/*-- Arc --*/
.c3-chart-arcs-title {
font-size: 1.3em;
}
/*-- Focus --*/
.c3-target.c3-focused {
opacity: 1;
}
.c3-target.c3-focused path.c3-line, .c3-target.c3-focused path.c3-step {
stroke-width: 2px;
}
.c3-target.c3-defocused {
opacity: 0.3 !important;
}
/*-- Region --*/
.c3-region {
fill: steelblue;
fill-opacity: .1;
}
/*-- Brush --*/
.c3-brush .extent {
fill-opacity: .1;
}
/*-- Select - Drag --*/
.c3-dragarea {
}
/*-- Legend --*/
.c3-legend-item {
font-size: 12px;
}
.c3-legend-item-hidden {
opacity: 0.15;
}
.c3-legend-background {
opacity: 0.75;
fill: white;
stroke: lightgray;
stroke-width: 1
}
/*-- Tooltip --*/
.c3-tooltip-container {
z-index: 10;
}
.c3-tooltip {
border-collapse:collapse;
border-spacing:0;
background-color:#fff;
empty-cells:show;
-webkit-box-shadow: 7px 7px 12px -9px rgb(119,119,119);
-moz-box-shadow: 7px 7px 12px -9px rgb(119,119,119);
box-shadow: 7px 7px 12px -9px rgb(119,119,119);
opacity: 0.9;
}
.c3-tooltip tr {
border:1px solid #CCC;
}
.c3-tooltip th {
background-color: #aaa;
font-size:14px;
padding:2px 5px;
text-align:left;
color:#FFF;
}
.c3-tooltip td {
font-size:13px;
padding: 3px 6px;
background-color:#fff;
border-left:1px dotted #999;
}
.c3-tooltip td > span {
display: inline-block;
width: 10px;
height: 10px;
margin-right: 6px;
}
.c3-tooltip td.value{
text-align: right;
}
.c3-area {
stroke-width: 0;
opacity: 0.2;
}
.c3-chart-arcs .c3-chart-arcs-background {
fill: #e0e0e0;
stroke: none;
}
.c3-chart-arcs .c3-chart-arcs-gauge-unit {
fill: #000;
font-size: 16px;
}
.c3-chart-arcs .c3-chart-arcs-gauge-max {
fill: #777;
}
.c3-chart-arcs .c3-chart-arcs-gauge-min {
fill: #777;
}
.c3-chart-arc .c3-gauge-value {
fill: #000;
/* font-size: 28px !important;*/
}
</style>
<script type="application/javascript">
$(document).ready(function () {
$("#scenario_agents").dataTable({
"autoWidth": true,
"paging": false,
"ordering": false,
"info": false
});
});
</script>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top">
<div class="container-fluid">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed"
data-toggle="collapse" data-target="#navbar"
aria-expanded="false" aria-controls="navbar">
<span class="sr-only">Toggle navigation</span>
</button>
<a class="navbar-brand">Shaker Report</a>
</div>
<div id="navbar" class="navbar-collapse collapse">
<ul class="nav navbar-nav navbar-right">
<li><a href="https://github.com/stackforge/shaker">About</a>
</li>
</ul>
</div>
</div>
</nav>
<div class="container-fluid">
<div class="row tabbable">
<div class="col-sm-3 col-md-2 sidebar">
<ul class="nav nav-sidebar">
<li class="active"><a href="#scenario" data-toggle="tab">Overview</a>
</li>
{% set tests = report.result %}
{% for test in tests %}
<li><a href="#test-{{ test.definition.uuid }}" data-toggle="tab">
{% if test.definition.title %}
{{ test.definition.title }}
{% else %}
Test {{ test.definition }}
{% endif %}
</a></li>
{% set iterations = test.results_per_iteration|length %}
{% for result_per_iteration in test.results_per_iteration %}
{% set cnt = result_per_iteration.agents|length %}
<li class="col-md-offset-1"><a href="#test-{{ test.definition.uuid }}-{{ cnt }}" data-toggle="tab">
{% if iterations == 1 %}
Details
{% else %}
{{ cnt }} Threads
{% endif %}
</a></li>
{% endfor %}
{% endfor %}
</ul>
</div>
<div class="col-sm-9 col-sm-offset-3 col-md-10 col-md-offset-2 main">
<div class="tab-content">
<div id="scenario" class="tab-pane active">
<h3>Scenario:</h3>
<pre>{{ report.scenario|yaml }}</pre>
{% if report.error %}
<h3 class="text-danger">Error</h3>
<pre class="bg-danger">{{ report.error }}</pre>
{% endif %}
<h3>Agents:</h3>
<table id="scenario_agents">
<thead><tr><th>Agent</th><th>Mode</th><th>IP</th><th>Node</th><th>Instance Name</th></tr></thead>
<tbody>
{% for agent in report.agents.values() %}
{% if agent.mode != 'slave' %}
<tr>
<td>{{ agent.id }}</td>
<td>{{ agent.mode }}</td>
<td>{{ agent.ip }}</td>
<td>{{ agent.node }}</td>
<td>{{ agent.instance_name }}</td>
</tr>
{% if agent.slave %}
<tr style="background-color: #efefef">
<td style="padding-left: 2em;">{{ agent.slave.id }}</td>
<td>{{ agent.slave.mode }}</td>
<td>{{ agent.slave.ip }}</td>
<td>{{ agent.slave.node }}</td>
<td>{{ agent.slave.instance_name }}</td>
</tr>
{% endif %}
{% endif %}
{% endfor %}
</tbody>
</table>
</div>
{% set tests = report.result %}
{% for test in tests %}
<div id="test-{{ test.definition.uuid }}" class="tab-pane">
<h3>Test Case Specification</h3>
<pre>{{ test.definition|yaml }}</pre>
{# show summary only of number of iterations > 1 #}
{% set iteration_cnt = test.results_per_iteration|length %}
{% if iteration_cnt > 1 %}
{% if test.chart %}
<div id="chart-{{ test.definition.uuid }}"></div>
<script type="application/javascript">
$(document).ready(function () {
c3.generate({
bindto: '#chart-{{ test.definition.uuid }}',
data: {
x: 'x',
columns: {{ test.chart|json }},
type: 'step'
},
axis: {
x: { label: '# threads' },
y: { label: 'Bandwidth, Mbits/s', min: 0 }
}
});
});
</script>
{% endif %}
{% endif %}
</div>
{% for result_per_iteration in test.results_per_iteration %}
{% set cnt = result_per_iteration.agents|length %}
<div id="test-{{ test.definition.uuid }}-{{ cnt }}" class="tab-pane">
<h3>Iteration Summary</h3>
{# show summary only of number of agents > 1 #}
{% set agent_cnt = result_per_iteration.results_per_agent|length %}
{% if agent_cnt > 1 %}
{% if result_per_iteration.stats %}
<div class="row">
{% for stat_title, stat_values in result_per_iteration.stats.items() %}
<div class="col-md-4">
<h5>Stats for {{ stat_title }}</h5>
<dl class="dl-horizontal">
<dt>Max</dt><dd>{{ stat_values.max|round(2) }} {{ stat_values.unit }}</dd>
<dt>Min</dt><dd>{{ stat_values.min|round(2) }} {{ stat_values.unit }}</dd>
<dt>Mean</dt><dd>{{ stat_values.mean|round(2) }} {{ stat_values.unit }}</dd>
</dl>
</div>
{% endfor %}
</div>
{% endif %}
{% if result_per_iteration.chart %}
<div id="chart-{{ result_per_iteration.uuid }}"></div>
<script type="application/javascript">
$(document).ready(function () {
c3.generate({
bindto: '#chart-{{ result_per_iteration.uuid }}',
data: {
x: 'time',
columns: {{ result_per_iteration.chart|json }},
types: { bandwidth: 'area' }
},
axis: {
x: { label: 'time' },
y: { label: 'Bandwidth, Mbits/s', min: 0 }
}
});
});
</script>
{% endif %}
{% if result_per_iteration.node_chart %}
<h5>Per-node stats</h5>
<div id="chart-{{ result_per_iteration.uuid }}-node"></div>
<script type="application/javascript">
$(document).ready(function () {
c3.generate({
bindto: '#chart-{{ result_per_iteration.uuid }}-node',
data: {
x: 'x',
columns: {{ result_per_iteration.node_chart|json }},
type: 'step',
order: null
},
axis: {
x: { type: 'category' }
}
});
});
</script>
{% endif %}
{% endif %}
{#### PER-AGENT DATA ####}
{% for result_per_agent in result_per_iteration.results_per_agent %}
<h4>Agent {{ result_per_agent.agent.id }}
({{ result_per_agent.agent.ip }}, {{ result_per_agent.agent.node }})</h4>
<h5>Status: {{ result_per_agent.status }}</h5>
{% if result_per_agent.samples %}
{% if result_per_agent.stats %}
<div class="row">
{% for stat_title, stat_values in result_per_agent.stats.items() %}
<div class="col-md-4">
<h5>Stats for {{ stat_title }}</h5>
<dl class="dl-horizontal">
<dt>Max</dt><dd>{{ stat_values.max|round(2) }} {{ stat_values.unit }}</dd>
<dt>Min</dt><dd>{{ stat_values.min|round(2) }} {{ stat_values.unit }}</dd>
<dt>Mean</dt><dd>{{ stat_values.mean|round(2) }} {{ stat_values.unit }}</dd>
</dl>
</div>
{% endfor %}
</div>
{% endif %}
{% if result_per_agent.chart %}
<div id="chart-{{ result_per_agent.uuid }}"></div>
<script type="application/javascript">
$(document).ready(function () {
c3.generate({
bindto: '#chart-{{ result_per_agent.uuid }}',
data: {
x: 'time',
columns: {{ result_per_agent.chart|json }},
types: { bandwidth: 'area' }
},
axis: {
x: { label: 'time' },
y: { label: 'Bandwidth, Mbits/s', min: 0 }
}
});
});
</script>
{% endif %}
{% endif %}
{% if result_per_agent.command %}
<h5>Command:</h5>
<pre>{{ result_per_agent.command }}</pre>
{% endif %}
{% if result_per_agent.stdout %}
<h5>Stdout:</h5>
<pre>{{ result_per_agent.stdout }}</pre>
{% endif %}
{% if result_per_agent.stderr %}
<h5>Stderr:</h5>
<pre>{{ result_per_agent.stderr }}</pre>
{% endif %}
{% endfor %}
</div>
{% endfor %}
{% endfor %}
</div>
</div>
</div>
</div>
</body>
</html>

View File

@ -36,7 +36,7 @@ class TestTrafficAggregator(testtools.TestCase):
[5, 1.9, None]],
}
processed = copy.deepcopy(original)
aggregator.agent_summary(processed)
aggregator.record_summary(processed)
self.assertFalse('stdout' in processed)
@ -61,58 +61,57 @@ class TestTrafficAggregator(testtools.TestCase):
['TCP download', None, None, 60.0, 65.0, 61.0, None]]
self.assertEqual(expected_chart, processed['chart'])
def test_iteration_summary(self):
def test_concurrency_summary(self):
aggregator = traffic.TrafficAggregator(None)
original = {
'results_per_agent': [
{
'agent': {'node': 'alpha'},
'stats': {
'Ping ICMP': {
'max': 2.6,
'min': 1.9,
'mean': 2.2,
'unit': 'ms',
},
'TCP download': {
'max': 65.0,
'min': 60.0,
'mean': 62.0,
'unit': 'Mbps',
}
original = [
{
'agent_id': 'alpha_agent',
'node': 'alpha',
'stats': {
'Ping ICMP': {
'max': 2.6,
'min': 1.9,
'mean': 2.2,
'unit': 'ms',
},
'chart': [['time', 0, 1, 2, 3, 4, 5],
['Ping ICMP', 1.9, 2.4, 2.6, 2.2, 2.2, 1.9],
['TCP download', None, None, 60.0, 65.0, 61.0,
None]]
'TCP download': {
'max': 65.0,
'min': 60.0,
'mean': 62.0,
'unit': 'Mbps',
}
},
{
'agent': {'node': 'beta'},
'stats': {
'Ping ICMP': {
'max': 3.6,
'min': 2.9,
'mean': 3.2,
'unit': 'ms',
},
'TCP download': {
'max': 75.0,
'min': 70.0,
'mean': 72.0,
'unit': 'Mbps',
}
'chart': [['time', 0, 1, 2, 3, 4, 5],
['Ping ICMP', 1.9, 2.4, 2.6, 2.2, 2.2, 1.9],
['TCP download', None, None, 60.0, 65.0, 61.0,
None]]
},
{
'agent_id': 'beta_agent',
'node': 'beta',
'stats': {
'Ping ICMP': {
'max': 3.6,
'min': 2.9,
'mean': 3.2,
'unit': 'ms',
},
'chart': [['time', 0, 1, 2, 3, 4, 5],
['Ping ICMP', 2.9, 3.4, 3.6, 3.2, 3.2, 2.9],
['TCP download', None, None, 70.0, 75.0, 71.0,
None]]
'TCP download': {
'max': 75.0,
'min': 70.0,
'mean': 72.0,
'unit': 'Mbps',
}
},
]
}
processed = copy.deepcopy(original)
aggregator.iteration_summary(processed)
'chart': [['time', 0, 1, 2, 3, 4, 5],
['Ping ICMP', 2.9, 3.4, 3.6, 3.2, 3.2, 2.9],
['TCP download', None, None, 70.0, 75.0, 71.0,
None]]
},
]
aggregate = aggregator.concurrency_summary(original)
expected_stats = {
'Ping ICMP': {
'max': 3.6,
@ -128,4 +127,4 @@ class TestTrafficAggregator(testtools.TestCase):
}
}
self.assertEqual(expected_stats, processed['stats'])
self.assertEqual(expected_stats, aggregate['stats'])