Feature enhancements for storage testing
1. volume_size will be ignored if not running storage test; 2. Change the degrading percentile calculation logic for progression runs; 3. Remove total_server_vm counts from result reporting; 4. The result will now show the total IOPS/Throughputs requested for all VMs; Change-Id: I5718734f6aa4fe2a94cf96340f92bd735e1c55db
This commit is contained in:
parent
70742b22b9
commit
f14c427391
@ -187,6 +187,8 @@ class KBA_Client(object):
|
||||
# to kb-master node.
|
||||
if self.__class__.__name__ == 'KBA_Storage_Client':
|
||||
return self.encode_bins(p_output)
|
||||
else:
|
||||
return p_output
|
||||
|
||||
def exec_command(self, cmd):
|
||||
# Execute the command, and returns the outputs
|
||||
|
@ -137,11 +137,14 @@ client:
|
||||
# the capacity of storage is reached, the overall performance will start
|
||||
# to degrade.
|
||||
#
|
||||
# e.g. In the randread and randwrite mode, the last run with 10 VMs is
|
||||
# measured at 1000 IOPS, i.e. 100 IOPS/VM; the current run with 11 VMs
|
||||
# is measured at 880 IOPS, i.e. 80 IOPS/VM. So this set of data shows a
|
||||
# 20% degrading. KloudBuster will continue the progression run if the
|
||||
# degrading percentile is within (less or equal) the range defined below.
|
||||
# e.g. In the randread and randwrite mode, for example the IOPS is limited
|
||||
# to 100 IOPS/VM. In the iteration of 10 VMs, the requested IOPS for the
|
||||
# system is 100 * 10 = 1000. However, the measured IOPS is degraded to
|
||||
# only 800 IOPS. So the degraded percentile is calculated as 800/1000=20%
|
||||
# for this set of data.
|
||||
#
|
||||
# KloudBuster will continue the progression run if the degrading percentile
|
||||
# is within (less or equal) the range defined below.
|
||||
storage_stop_limit: 20
|
||||
|
||||
# Assign floating IP for every client side test VM
|
||||
@ -244,8 +247,9 @@ client:
|
||||
iodepth: 64
|
||||
rate: '60M'
|
||||
|
||||
# Volumes size in GB for each VM, setting to 0 to disable volume creation
|
||||
volume_size: 0
|
||||
# Volumes size in GB for each VM
|
||||
# Will effect only in storage testing mode
|
||||
volume_size: 1
|
||||
|
||||
# Prompt before running benchmarking tools
|
||||
prompt_before_run: False
|
||||
|
@ -102,6 +102,10 @@ class KBConfig(object):
|
||||
'storage performance tests.')
|
||||
raise KBConfigParseException()
|
||||
|
||||
if not self.storage_mode:
|
||||
# Ignore volume_size if not performing storage testing
|
||||
self.config_scale.client['volume_size'] = 0
|
||||
|
||||
if self.alt_cfg:
|
||||
self.config_scale = self.config_scale + AttrDict(self.alt_cfg)
|
||||
|
||||
|
@ -95,11 +95,14 @@ class KBRunner_Storage(KBRunner):
|
||||
tc_result['block_size'] = cur_config['block_size']
|
||||
tc_result['iodepth'] = cur_config['iodepth']
|
||||
if 'rate_iops' in cur_config:
|
||||
tc_result['rate_iops'] = cur_config['rate_iops']
|
||||
tc_result['rate_iops'] = vm_count * cur_config['rate_iops']
|
||||
if 'rate' in cur_config:
|
||||
tc_result['rate'] = cur_config['rate']
|
||||
req_rate = cur_config['rate']
|
||||
ex_unit = 'KMG'.find(req_rate[-1].upper())
|
||||
req_rate = vm_count * int(req_rate[:-1]) * (1024 ** (ex_unit))\
|
||||
if ex_unit != -1 else vm_count * int(req_rate)
|
||||
tc_result['rate'] = req_rate
|
||||
tc_result['total_client_vms'] = vm_count
|
||||
tc_result['total_server_vms'] = tc_result['total_client_vms']
|
||||
self.tool_result.append(tc_result)
|
||||
except KBInitVolumeException:
|
||||
raise KBException("Could not initilize the volume.")
|
||||
@ -111,7 +114,6 @@ class KBRunner_Storage(KBRunner):
|
||||
|
||||
if self.config.progression.enabled:
|
||||
self.tool_result = {}
|
||||
self.last_result = None
|
||||
start = self.config.progression.vm_start
|
||||
step = self.config.progression.vm_step
|
||||
limit = self.config.progression.storage_stop_limit
|
||||
@ -136,23 +138,21 @@ class KBRunner_Storage(KBRunner):
|
||||
LOG.info('-- Stage %s: %s --' % (cur_stage, str(self.tool_result)))
|
||||
cur_stage += 1
|
||||
|
||||
if self.tool_result and self.last_result:
|
||||
if self.tool_result:
|
||||
for idx, cur_tc in enumerate(self.config.storage_tool_configs):
|
||||
req_iops = self.tool_result[idx].get('rate_iops', 0)
|
||||
req_rate = self.tool_result[idx].get('rate', 0)
|
||||
if cur_tc['mode'] in ['randread', 'read']:
|
||||
last_iops = self.last_result[idx]['read_iops'] / cur_vm_count
|
||||
last_bw = self.last_result[idx]['read_bw'] / cur_vm_count
|
||||
cur_iops = self.tool_result[idx]['read_iops'] / target_vm_count
|
||||
cur_bw = self.tool_result[idx]['read_bw'] / target_vm_count
|
||||
cur_iops = int(self.tool_result[idx]['read_iops'])
|
||||
cur_rate = int(self.tool_result[idx]['read_bw'])
|
||||
else:
|
||||
last_iops = self.last_result[idx]['write_iops'] / cur_vm_count
|
||||
last_bw = self.last_result[idx]['write_bw'] / cur_vm_count
|
||||
cur_iops = self.tool_result[idx]['write_iops'] / target_vm_count
|
||||
cur_bw = self.tool_result[idx]['write_bw'] / target_vm_count
|
||||
cur_iops = int(self.tool_result[idx]['write_iops'])
|
||||
cur_rate = int(self.tool_result[idx]['write_bw'])
|
||||
|
||||
degrade_iops = (last_iops - cur_iops) * 100 / last_iops
|
||||
degrade_bw = (last_bw - cur_bw) * 100 / last_bw
|
||||
degrade_iops = (req_iops - cur_iops) * 100 / req_iops if req_iops else 0
|
||||
degrade_rate = (req_rate - cur_rate) * 100 / req_rate if req_rate else 0
|
||||
if ((cur_tc['mode'] in ['randread', 'randwrite'] and degrade_iops > limit)
|
||||
or (cur_tc['mode'] in ['read', 'write'] and degrade_bw > limit)):
|
||||
or (cur_tc['mode'] in ['read', 'write'] and degrade_rate > limit)):
|
||||
LOG.warning('KloudBuster is stopping the iteration because the result '
|
||||
'reaches the stop limit.')
|
||||
tc_flag = True
|
||||
@ -160,7 +160,6 @@ class KBRunner_Storage(KBRunner):
|
||||
if tc_flag:
|
||||
break
|
||||
|
||||
self.last_result = self.tool_result
|
||||
yield self.tool_result
|
||||
else:
|
||||
self.single_run(test_only=test_only)
|
||||
|
Loading…
x
Reference in New Issue
Block a user