Feature commit #5 to support running fio
Change-Id: Ib362fe7de3a84c37e4c11483c631fb6e4d6d0891
This commit is contained in:
parent
7f638bfd59
commit
5597be46a4
@ -57,8 +57,8 @@ sed -i "s/^exit\s0/python kb_vm_agent.py \&\n\0/g" /etc/rc.local
|
||||
# ======
|
||||
# Client
|
||||
# ======
|
||||
# python redis client
|
||||
pip install redis
|
||||
# python redis client, HdrHistorgram_py
|
||||
pip install redis hdrhistogram
|
||||
|
||||
# Install HdrHistorgram_c
|
||||
cd /tmp
|
||||
|
@ -13,11 +13,13 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
from hdrh.histogram import HdrHistogram
|
||||
import redis
|
||||
|
||||
# Define the version of the KloudBuster agent and VM image
|
||||
@ -29,7 +31,7 @@ import redis
|
||||
#
|
||||
# This version must be incremented if the interface changes or if new features
|
||||
# are added to the agent VM
|
||||
__version__ = '5'
|
||||
__version__ = '6'
|
||||
|
||||
# TODO(Logging on Agent)
|
||||
|
||||
@ -177,6 +179,13 @@ class KBA_Client(object):
|
||||
self.report('READY', None, __version__)
|
||||
time.sleep(2)
|
||||
|
||||
def post_processing(self, p_output):
|
||||
# If the result is coming from storage testing tool (FIO), compress
|
||||
# the buckets from the output using HdrHistogram, and send it back
|
||||
# to kb-master node.
|
||||
if self.__class__.__name__ == 'KBA_Storage_Client':
|
||||
return self.encode_bins(p_output)
|
||||
|
||||
def exec_command(self, cmd):
|
||||
# Execute the command, and returns the outputs
|
||||
cmds = ['bash', '-c']
|
||||
@ -208,8 +217,8 @@ class KBA_Client(object):
|
||||
else:
|
||||
p_output += line
|
||||
if line.rstrip() == "}":
|
||||
p_output = self.post_processing(p_output)
|
||||
cmd_res_dict = dict(zip(("status", "stdout", "stderr"), (0, p_output, '')))
|
||||
continue
|
||||
|
||||
stderr = p.communicate()[1]
|
||||
return (p.returncode, p_output, stderr)
|
||||
@ -284,6 +293,34 @@ class KBA_HTTP_Client(KBA_Client):
|
||||
|
||||
class KBA_Storage_Client(KBA_Client):
|
||||
|
||||
def encode_bins(self, p_output):
|
||||
p_output = json.loads(p_output)
|
||||
test_list = ['read', 'write', 'trim']
|
||||
|
||||
for test in test_list:
|
||||
histogram = HdrHistogram(1, 5 * 3600 * 1000, 3)
|
||||
clat = p_output['jobs'][0][test]['clat']['bins']
|
||||
total_buckets = clat['FIO_IO_U_PLAT_NR']
|
||||
grp_msb_bits = clat['FIO_IO_U_PLAT_BITS']
|
||||
buckets_per_grp = clat['FIO_IO_U_PLAT_VAL']
|
||||
|
||||
for bucket in xrange(total_buckets):
|
||||
if clat[str(bucket)]:
|
||||
grp = bucket / buckets_per_grp
|
||||
subbucket = bucket % buckets_per_grp
|
||||
if grp == 0:
|
||||
val = subbucket - 1
|
||||
else:
|
||||
base = 2 ** (grp_msb_bits + grp - 1)
|
||||
val = int(base + (base / buckets_per_grp) * (subbucket - 0.5))
|
||||
histogram.record_value(val, clat[str(bucket)])
|
||||
|
||||
p_output['jobs'][0][test]['clat']['hist'] = histogram.encode()
|
||||
p_output['jobs'][0][test]['clat'].pop('bins')
|
||||
p_output['jobs'][0][test]['clat'].pop('percentile')
|
||||
|
||||
return json.dumps(p_output)
|
||||
|
||||
def exec_init_volume(self, size):
|
||||
self.last_cmd = KB_Instance.init_volume(size)
|
||||
return self.exec_command(self.last_cmd)
|
||||
|
@ -17,7 +17,7 @@ import json
|
||||
|
||||
from perf_tool import PerfTool
|
||||
|
||||
# from hdrh.histogram import HdrHistogram
|
||||
from hdrh.histogram import HdrHistogram
|
||||
import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -41,8 +41,8 @@ class FioTool(PerfTool):
|
||||
read_bw = result['jobs'][0]['read']['bw']
|
||||
write_iops = result['jobs'][0]['write']['iops']
|
||||
write_bw = result['jobs'][0]['write']['bw']
|
||||
read_clat = result['jobs'][0]['read']['clat']['bins']
|
||||
write_clat = result['jobs'][0]['write']['clat']['bins']
|
||||
read_hist = result['jobs'][0]['read']['clat']['hist']
|
||||
write_hist = result['jobs'][0]['write']['clat']['hist']
|
||||
except Exception:
|
||||
return self.parse_error('Could not parse: "%s"' % (stdout))
|
||||
|
||||
@ -55,10 +55,10 @@ class FioTool(PerfTool):
|
||||
parsed_output['write_iops'] = write_iops
|
||||
if write_bw:
|
||||
parsed_output['write_bw'] = write_bw
|
||||
if read_bw and read_clat:
|
||||
parsed_output['read_clat'] = read_clat
|
||||
if write_bw and write_clat:
|
||||
parsed_output['write_clat'] = write_clat
|
||||
if read_bw and read_hist:
|
||||
parsed_output['read_hist'] = read_hist
|
||||
if write_bw and write_hist:
|
||||
parsed_output['write_hist'] = write_hist
|
||||
|
||||
return parsed_output
|
||||
|
||||
@ -78,39 +78,21 @@ class FioTool(PerfTool):
|
||||
clat_list = []
|
||||
# perc_list = [1, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99, 99.5, 99.9, 99.95, 99.99]
|
||||
perc_list = [50, 75, 90, 99, 99.9, 99.99, 99.999]
|
||||
if 'read_clat' in results[0]['results']:
|
||||
clat_list.append('read_clat')
|
||||
if 'write_clat' in results[0]['results']:
|
||||
clat_list.append('write_clat')
|
||||
if 'read_hist' in results[0]['results']:
|
||||
clat_list.append('read_hist')
|
||||
if 'write_hist' in results[0]['results']:
|
||||
clat_list.append('write_hist')
|
||||
|
||||
for clat in clat_list:
|
||||
total_buckets = results[0]['results'][clat]['FIO_IO_U_PLAT_NR']
|
||||
grp_msb_bits = results[0]['results'][clat]['FIO_IO_U_PLAT_BITS']
|
||||
buckets_per_grp = results[0]['results'][clat]['FIO_IO_U_PLAT_VAL']
|
||||
|
||||
d_bins = {}
|
||||
total_count = cur_count = cur_bucket = 0
|
||||
all_res[clat] = []
|
||||
histogram = HdrHistogram(1, 5 * 3600 * 1000, 3)
|
||||
for item in results:
|
||||
for bucket in xrange(total_buckets):
|
||||
d_bins[bucket] = d_bins.get(bucket, 0) + item['results'][clat][str(bucket)]
|
||||
total_count += item['results'][clat][str(bucket)]
|
||||
histogram.decode_and_add(item['results'][clat])
|
||||
|
||||
for perc in perc_list:
|
||||
count_at_perc = float(perc) * total_count / 100
|
||||
while cur_count < count_at_perc and cur_bucket < total_buckets:
|
||||
cur_count += d_bins[cur_bucket]
|
||||
cur_bucket += 1
|
||||
|
||||
grp = cur_bucket / buckets_per_grp
|
||||
subbucket = cur_bucket % buckets_per_grp
|
||||
if grp == 0:
|
||||
val = subbucket - 1
|
||||
else:
|
||||
base = 2 ** (grp_msb_bits + grp - 1)
|
||||
val = int(base + (base / buckets_per_grp) * (subbucket - 0.5))
|
||||
|
||||
all_res[clat].append([perc, val])
|
||||
latency_dict = histogram.get_percentile_to_value_dict(perc_list)
|
||||
for key, value in latency_dict.iteritems():
|
||||
all_res[clat].append([key, value])
|
||||
all_res[clat].sort()
|
||||
|
||||
return all_res
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user