gnocchi: Use Dynamic Aggregates API

Switch to using the Dynamic Aggregates API as the Metric Aggregation
API is deprecated.

When using the Dynamic Aggregates API, any aggregation using rates
can use the underlying base measures for the aggregation rather than
the rate, for example:

    (aggregation rate:mean (metric cpu mean))

The tuple of data for each record returned via this API is encapsulated
with information about the aggregation used so adapt the sanitization
function to deal with this and the formatting of the metrics measures
API as well.

Change-Id: I4f631d224404460138f4050b1b981d577b592544
Closes-Bug: 1946793
This commit is contained in:
James Page 2022-02-18 09:51:41 +00:00
parent 8ac7c65cff
commit 74eadfbd58
5 changed files with 228 additions and 121 deletions

View File

@ -200,10 +200,15 @@ class AggregationMetricByResourcesLookupRule(AlarmGnocchiThresholdRule):
'interface': conf.service_credentials.interface, 'interface': conf.service_credentials.interface,
'region_name': conf.service_credentials.region_name}) 'region_name': conf.service_credentials.region_name})
try: try:
gnocchi_client.metric.aggregation( gnocchi_client.aggregates.fetch(
metrics=rule.metric, operations=[
query=query, 'aggregate', rule.aggregation_method,
aggregation=rule.aggregation_method, [
'metric', rule.metric,
rule.aggregation_method.lstrip('rate:')
]
],
search=query,
needed_overlap=0, needed_overlap=0,
start="-1 day", start="-1 day",
stop="now", stop="now",

View File

@ -47,6 +47,12 @@ class GnocchiBase(threshold.ThresholdEvaluator):
# but not a stddev-of-stddevs). # but not a stddev-of-stddevs).
# TODO(sileht): support alarm['exclude_outliers'] # TODO(sileht): support alarm['exclude_outliers']
LOG.debug('sanitize stats %s', statistics) LOG.debug('sanitize stats %s', statistics)
# NOTE(jamespage)
# Dynamic Aggregates are returned in a dict struct so
# check for this first.
if isinstance(statistics, dict):
# Pop array of measures from aggregated subdict
statistics = statistics['measures']['aggregated']
statistics = [stats[VALUE] for stats in statistics statistics = [stats[VALUE] for stats in statistics
if stats[GRANULARITY] == rule['granularity']] if stats[GRANULARITY] == rule['granularity']]
if not statistics: if not statistics:
@ -93,6 +99,16 @@ class GnocchiResourceThresholdEvaluator(GnocchiBase):
class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase): class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase):
def _statistics(self, rule, start, end): def _statistics(self, rule, start, end):
try: try:
_operations = [
'aggregate', rule['aggregation_method']
]
for metric in rule['metrics']:
_operations.append(
[
'metric', metric,
rule['aggregation_method'].lstrip('rate:')
]
)
# FIXME(sileht): In case of a heat autoscaling stack decide to # FIXME(sileht): In case of a heat autoscaling stack decide to
# delete an instance, the gnocchi metrics associated to this # delete an instance, the gnocchi metrics associated to this
# instance will be no more updated and when the alarm will ask # instance will be no more updated and when the alarm will ask
@ -101,11 +117,10 @@ class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase):
# So temporary set 'needed_overlap' to 0 to disable the # So temporary set 'needed_overlap' to 0 to disable the
# gnocchi checks about missing points. For more detail see: # gnocchi checks about missing points. For more detail see:
# https://bugs.launchpad.net/gnocchi/+bug/1479429 # https://bugs.launchpad.net/gnocchi/+bug/1479429
return self._gnocchi_client.metric.aggregation( return self._gnocchi_client.aggregates.fetch(
metrics=rule['metrics'], operations=_operations,
granularity=rule['granularity'], granularity=rule['granularity'],
start=start, stop=end, start=start, stop=end,
aggregation=rule['aggregation_method'],
needed_overlap=0) needed_overlap=0)
except exceptions.MetricNotFound: except exceptions.MetricNotFound:
raise threshold.InsufficientDataError( raise threshold.InsufficientDataError(
@ -128,24 +143,28 @@ class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase):
class GnocchiAggregationResourcesThresholdEvaluator(GnocchiBase): class GnocchiAggregationResourcesThresholdEvaluator(GnocchiBase):
def _statistics(self, rule, start, end): def _statistics(self, rule, start, end):
# FIXME(sileht): In case of a heat autoscaling stack decide to
# delete an instance, the gnocchi metrics associated to this
# instance will be no more updated and when the alarm will ask
# for the aggregation, gnocchi will raise a 'No overlap'
# exception.
# So temporary set 'needed_overlap' to 0 to disable the
# gnocchi checks about missing points. For more detail see:
# https://bugs.launchpad.net/gnocchi/+bug/1479429
try: try:
return self._gnocchi_client.metric.aggregation( # FIXME(sileht): In case of a heat autoscaling stack decide to
metrics=rule['metric'], # delete an instance, the gnocchi metrics associated to this
# instance will be no more updated and when the alarm will ask
# for the aggregation, gnocchi will raise a 'No overlap'
# exception.
# So temporary set 'needed_overlap' to 0 to disable the
# gnocchi checks about missing points. For more detail see:
# https://bugs.launchpad.net/gnocchi/+bug/1479429
return self._gnocchi_client.aggregates.fetch(
operations=[
'aggregate', rule['aggregation_method'],
[
'metric', rule['metric'],
rule['aggregation_method'].lstrip('rate:')
]
],
granularity=rule['granularity'], granularity=rule['granularity'],
query=json.loads(rule['query']), search=json.loads(rule['query']),
resource_type=rule["resource_type"], resource_type=rule["resource_type"],
start=start, stop=end, start=start, stop=end,
aggregation=rule['aggregation_method'], needed_overlap=0)
needed_overlap=0,
)
except exceptions.MetricNotFound: except exceptions.MetricNotFound:
raise threshold.InsufficientDataError( raise threshold.InsufficientDataError(
'metric %s does not exists' % rule['metric'], []) 'metric %s does not exists' % rule['metric'], [])

View File

@ -2503,14 +2503,16 @@ class TestAlarmsRuleGnocchi(TestAlarmsBase):
self.post_json('/alarms', params=json, headers=self.auth_headers) self.post_json('/alarms', params=json, headers=self.auth_headers)
self.assertEqual([mock.call( self.assertEqual([mock.call(
aggregation='count', operations=[
metrics='ameter', 'aggregate', 'count',
['metric', 'ameter', 'count']
],
needed_overlap=0, needed_overlap=0,
start="-1 day", start="-1 day",
stop="now", stop="now",
query=expected_query, search=expected_query,
resource_type="instance")], resource_type="instance")],
c.metric.aggregation.mock_calls), c.aggregates.fetch.mock_calls),
alarms = list(self.alarm_conn.get_alarms(enabled=False)) alarms = list(self.alarm_conn.get_alarms(enabled=False))
self.assertEqual(1, len(alarms)) self.assertEqual(1, len(alarms))

View File

@ -35,8 +35,16 @@ class BaseCompositeEvaluate(base.TestEvaluatorBase):
super(BaseCompositeEvaluate, self).setUp() super(BaseCompositeEvaluate, self).setUp()
@staticmethod @staticmethod
def _get_gnocchi_stats(granularity, values): def _get_gnocchi_stats(granularity, values, aggregated=False):
now = timeutils.utcnow_ts() now = timeutils.utcnow_ts()
if aggregated:
return {
'measures': {
'aggregated':
[[str(now - len(values) * granularity),
granularity, value] for value in values]
}
}
return [[str(now - len(values) * granularity), return [[str(now - len(values) * granularity),
granularity, value] for value in values] granularity, value] for value in values]
@ -236,7 +244,7 @@ class CompositeTest(BaseCompositeEvaluate):
def test_simple_insufficient(self): def test_simple_insufficient(self):
self._set_all_alarms('ok') self._set_all_alarms('ok')
self.client.metric.aggregation.return_value = [] self.client.aggregates.fetch.return_value = []
self.client.metric.get_measures.return_value = [] self.client.metric.get_measures.return_value = []
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('insufficient data') self._assert_all_alarms('insufficient data')
@ -287,26 +295,36 @@ class CompositeTest(BaseCompositeEvaluate):
# self.sub_rule4: ok # self.sub_rule4: ok
# self.sub_rule5: ok # self.sub_rule5: ok
# self.sub_rule6: alarm # self.sub_rule6: alarm
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v maxs = self._get_gnocchi_stats(
for v in range(1, 5)]) 60, [self.sub_rule2['threshold'] + v
avgs1 = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v for v in range(1, 5)],
for v in range(1, 4)]) aggregated=True)
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v avgs1 = self._get_gnocchi_stats(
for v in range(1, 6)]) 60, [self.sub_rule3['threshold'] + v
for v in range(1, 4)])
gavgs1 = self._get_gnocchi_stats(60, [self.sub_rule4['threshold'] avgs2 = self._get_gnocchi_stats(
- v for v in range(1, 6)]) 60, [self.sub_rule1['threshold'] - v
gmaxs = self._get_gnocchi_stats(300, [self.sub_rule5['threshold'] + v for v in range(1, 6)],
for v in range(1, 5)]) aggregated=True)
gavgs2 = self._get_gnocchi_stats(50, [self.sub_rule6['threshold'] + v gavgs1 = self._get_gnocchi_stats(
for v in range(1, 7)]) 60, [self.sub_rule4['threshold']
- v for v in range(1, 6)],
aggregated=True)
gmaxs = self._get_gnocchi_stats(
300, [self.sub_rule5['threshold'] + v
for v in range(1, 5)],
aggregated=True)
gavgs2 = self._get_gnocchi_stats(
50, [self.sub_rule6['threshold'] + v
for v in range(1, 7)],
aggregated=True)
self.client.metric.get_measures.side_effect = [gavgs1] self.client.metric.get_measures.side_effect = [gavgs1]
self.client.metric.aggregation.side_effect = [maxs, avgs1, avgs2, self.client.aggregates.fetch.side_effect = [maxs, avgs1, avgs2,
gmaxs, gavgs2] gmaxs, gavgs2]
self.evaluator.evaluate(alarm) self.evaluator.evaluate(alarm)
self.assertEqual(1, self.client.metric.get_measures.call_count) self.assertEqual(1, self.client.metric.get_measures.call_count)
self.assertEqual(5, self.client.metric.aggregation.call_count) self.assertEqual(5, self.client.aggregates.fetch.call_count)
self.assertEqual('alarm', alarm.state) self.assertEqual('alarm', alarm.state)
expected = mock.call( expected = mock.call(
alarm, 'ok', alarm, 'ok',
@ -320,12 +338,14 @@ class CompositeTest(BaseCompositeEvaluate):
def test_alarm_with_short_circuit_logic(self): def test_alarm_with_short_circuit_logic(self):
alarm = self.alarms[1] alarm = self.alarms[1]
# self.sub_rule1: alarm # self.sub_rule1: alarm
avgs = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] + v avgs = self._get_gnocchi_stats(
for v in range(1, 6)]) 60, [self.sub_rule1['threshold'] + v
self.client.metric.aggregation.side_effect = [avgs] for v in range(1, 6)],
aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs]
self.evaluator.evaluate(alarm) self.evaluator.evaluate(alarm)
self.assertEqual('alarm', alarm.state) self.assertEqual('alarm', alarm.state)
self.assertEqual(1, self.client.metric.aggregation.call_count) self.assertEqual(1, self.client.aggregates.fetch.call_count)
expected = mock.call(self.alarms[1], 'insufficient data', expected = mock.call(self.alarms[1], 'insufficient data',
*self._reason( *self._reason(
'alarm', 'alarm',
@ -336,12 +356,14 @@ class CompositeTest(BaseCompositeEvaluate):
def test_ok_with_short_circuit_logic(self): def test_ok_with_short_circuit_logic(self):
alarm = self.alarms[2] alarm = self.alarms[2]
# self.sub_rule1: ok # self.sub_rule1: ok
avgs = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v avgs = self._get_gnocchi_stats(
for v in range(1, 6)]) 60, [self.sub_rule1['threshold'] - v
self.client.metric.aggregation.side_effect = [avgs] for v in range(1, 6)],
aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs]
self.evaluator.evaluate(alarm) self.evaluator.evaluate(alarm)
self.assertEqual('ok', alarm.state) self.assertEqual('ok', alarm.state)
self.assertEqual(1, self.client.metric.aggregation.call_count) self.assertEqual(1, self.client.aggregates.fetch.call_count)
expected = mock.call(self.alarms[2], 'insufficient data', expected = mock.call(self.alarms[2], 'insufficient data',
*self._reason( *self._reason(
'ok', 'ok',
@ -351,13 +373,19 @@ class CompositeTest(BaseCompositeEvaluate):
def test_unknown_state_with_sub_rules_trending_state(self): def test_unknown_state_with_sub_rules_trending_state(self):
alarm = self.alarms[0] alarm = self.alarms[0]
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v maxs = self._get_gnocchi_stats(
for v in range(-1, 4)]) 60, [self.sub_rule2['threshold'] + v
avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v for v in range(-1, 4)],
for v in range(-1, 3)]) aggregated=True)
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v avgs = self._get_gnocchi_stats(
for v in range(1, 6)]) 60, [self.sub_rule3['threshold'] + v
self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs] for v in range(-1, 3)],
aggregated=True)
avgs2 = self._get_gnocchi_stats(
60, [self.sub_rule1['threshold'] - v
for v in range(1, 6)],
aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs2, maxs, avgs]
self.evaluator.evaluate(alarm) self.evaluator.evaluate(alarm)
self.assertEqual('alarm', alarm.state) self.assertEqual('alarm', alarm.state)
@ -374,13 +402,19 @@ class CompositeTest(BaseCompositeEvaluate):
alarm.repeat_actions = True alarm.repeat_actions = True
alarm.state = 'ok' alarm.state = 'ok'
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v maxs = self._get_gnocchi_stats(
for v in range(-1, 4)]) 60, [self.sub_rule2['threshold'] + v
avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v for v in range(-1, 4)],
for v in range(-1, 3)]) aggregated=True)
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v avgs = self._get_gnocchi_stats(
for v in range(1, 6)]) 60, [self.sub_rule3['threshold'] + v
self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs] for v in range(-1, 3)],
aggregated=True)
avgs2 = self._get_gnocchi_stats(
60, [self.sub_rule1['threshold'] - v
for v in range(1, 6)],
aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs2, maxs, avgs]
self.evaluator.evaluate(alarm) self.evaluator.evaluate(alarm)
self.assertEqual('ok', alarm.state) self.assertEqual('ok', alarm.state)
@ -396,13 +430,19 @@ class CompositeTest(BaseCompositeEvaluate):
def test_known_state_with_sub_rules_trending_state_and_not_repeat(self): def test_known_state_with_sub_rules_trending_state_and_not_repeat(self):
alarm = self.alarms[2] alarm = self.alarms[2]
alarm.state = 'ok' alarm.state = 'ok'
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v maxs = self._get_gnocchi_stats(
for v in range(-1, 4)]) 60, [self.sub_rule2['threshold'] + v
avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v for v in range(-1, 4)],
for v in range(-1, 3)]) aggregated=True)
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v avgs = self._get_gnocchi_stats(
for v in range(1, 6)]) 60, [self.sub_rule3['threshold'] + v
self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs] for v in range(-1, 3)],
aggregated=True)
avgs2 = self._get_gnocchi_stats(
60, [self.sub_rule1['threshold'] - v
for v in range(1, 6)],
aggregated=True)
self.client.aggregates.fetch.side_effect = [avgs2, maxs, avgs]
self.evaluator.evaluate(alarm) self.evaluator.evaluate(alarm)
self.assertEqual('ok', alarm.state) self.assertEqual('ok', alarm.state)
self.assertEqual([], self.notifier.notify.mock_calls) self.assertEqual([], self.notifier.notify.mock_calls)

View File

@ -109,9 +109,9 @@ class TestGnocchiEvaluatorBase(base.TestEvaluatorBase):
comparison_operator='gt', comparison_operator='gt',
threshold=80.0, threshold=80.0,
evaluation_periods=6, evaluation_periods=6,
aggregation_method='mean', aggregation_method='rate:mean',
granularity=50, granularity=50,
metric='cpu_util', metric='cpu',
resource_type='instance', resource_type='instance',
query='{"=": {"server_group": ' query='{"=": {"server_group": '
'"my_autoscaling_group"}}') '"my_autoscaling_group"}}')
@ -121,8 +121,16 @@ class TestGnocchiEvaluatorBase(base.TestEvaluatorBase):
super(TestGnocchiEvaluatorBase, self).setUp() super(TestGnocchiEvaluatorBase, self).setUp()
@staticmethod @staticmethod
def _get_stats(granularity, values): def _get_stats(granularity, values, aggregated=False):
now = timeutils.utcnow_ts() now = timeutils.utcnow_ts()
if aggregated:
return {
'measures': {
'aggregated':
[[str(now - len(values) * granularity),
granularity, value] for value in values]
}
}
return [[str(now - len(values) * granularity), return [[str(now - len(values) * granularity),
granularity, value] for value in values] granularity, value] for value in values]
@ -431,13 +439,17 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
self.alarms = self.prepared_alarms[1:2] self.alarms = self.prepared_alarms[1:2]
def test_retry_transient_api_failure(self): def test_retry_transient_api_failure(self):
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v maxs = self._get_stats(
for v in range(4)]) 300,
self.client.metric.aggregation.side_effect = [Exception('boom'), maxs] [self.alarms[0].rule['threshold'] + v
for v in range(4)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [Exception('boom'), maxs]
self._test_retry_transient() self._test_retry_transient()
def test_simple_insufficient(self): def test_simple_insufficient(self):
self.client.metric.aggregation.return_value = [] self.client.aggregates.fetch.return_value = []
self._test_simple_insufficient() self._test_simple_insufficient()
@mock.patch.object(timeutils, 'utcnow') @mock.patch.object(timeutils, 'utcnow')
@ -445,26 +457,33 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0)
self._set_all_alarms('ok') self._set_all_alarms('ok')
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v maxs = self._get_stats(
for v in range(4)]) 300,
self.client.metric.aggregation.side_effect = [maxs] [self.alarms[0].rule['threshold'] - v
for v in range(4)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
start_alarm = "2015-01-26T12:32:00" start_alarm = "2015-01-26T12:32:00"
end = "2015-01-26T12:57:00" end = "2015-01-26T12:57:00"
self.assertEqual( self.assertEqual(
[mock.call.aggregation(aggregation='max', [mock.call.fetch(
metrics=[ operations=[
'0bb1604d-1193-4c0a-b4b8-74b170e35e83', 'aggregate', 'max',
'9ddc209f-42f8-41e1-b8f1-8804f59c4053'], ['metric', '0bb1604d-1193-4c0a-b4b8-74b170e35e83', 'max'], # noqa
granularity=300, ['metric', '9ddc209f-42f8-41e1-b8f1-8804f59c4053', 'max'], # noqa
needed_overlap=0, ],
start=start_alarm, stop=end)], granularity=300,
self.client.metric.mock_calls) needed_overlap=0,
start=start_alarm, stop=end)],
self.client.aggregates.mock_calls)
self._assert_all_alarms('alarm') self._assert_all_alarms('alarm')
expected = [mock.call(alarm) for alarm in self.alarms] expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls) self.assertEqual(expected, update_calls)
maxs = maxs['measures']['aggregated']
reason = ('Transition to alarm due to 4 samples outside ' reason = ('Transition to alarm due to 4 samples outside '
'threshold, most recent: %s' % maxs[-1][2]) 'threshold, most recent: %s' % maxs[-1][2])
@ -475,13 +494,14 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
def test_simple_alarm_clear(self): def test_simple_alarm_clear(self):
self._set_all_alarms('alarm') self._set_all_alarms('alarm')
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v
for v in range(1, 5)]) for v in range(1, 5)], aggregated=True)
self.client.metric.aggregation.side_effect = [maxs] self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('ok') self._assert_all_alarms('ok')
expected = [mock.call(alarm) for alarm in self.alarms] expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls) self.assertEqual(expected, update_calls)
maxs = maxs['measures']['aggregated']
reason = ('Transition to ok due to 4 samples inside ' reason = ('Transition to ok due to 4 samples inside '
'threshold, most recent: %s' % maxs[-1][2]) 'threshold, most recent: %s' % maxs[-1][2])
reason_data = self._reason_data('inside', 4, maxs[-1][2]) reason_data = self._reason_data('inside', 4, maxs[-1][2])
@ -491,9 +511,13 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
def test_equivocal_from_known_state_ok(self): def test_equivocal_from_known_state_ok(self):
self._set_all_alarms('ok') self._set_all_alarms('ok')
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v maxs = self._get_stats(
for v in range(-1, 3)]) 300,
self.client.metric.aggregation.side_effect = [maxs] [self.alarms[0].rule['threshold'] - v
for v in range(-1, 3)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('ok') self._assert_all_alarms('ok')
self.assertEqual( self.assertEqual(
@ -505,18 +529,26 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
self._set_all_alarms('ok') self._set_all_alarms('ok')
# NOTE(sileht): we add one useless point (81.0) that will break # NOTE(sileht): we add one useless point (81.0) that will break
# the test if the evaluator doesn't remove it. # the test if the evaluator doesn't remove it.
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v maxs = self._get_stats(
for v in range(-1, 5)]) 300,
self.client.metric.aggregation.side_effect = [maxs] [self.alarms[0].rule['threshold'] - v
for v in range(-1, 5)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('alarm') self._assert_all_alarms('alarm')
def test_equivocal_from_known_state_and_repeat_actions(self): def test_equivocal_from_known_state_and_repeat_actions(self):
self._set_all_alarms('ok') self._set_all_alarms('ok')
self.alarms[0].repeat_actions = True self.alarms[0].repeat_actions = True
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v maxs = self._get_stats(
for v in range(-1, 3)]) 300,
self.client.metric.aggregation.side_effect = [maxs] [self.alarms[0].rule['threshold'] - v
for v in range(-1, 3)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('ok') self._assert_all_alarms('ok')
self.assertEqual([], self.storage_conn.update_alarm.call_args_list) self.assertEqual([], self.storage_conn.update_alarm.call_args_list)
@ -530,9 +562,12 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
self._set_all_alarms('alarm') self._set_all_alarms('alarm')
self.alarms[0].repeat_actions = True self.alarms[0].repeat_actions = True
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v maxs = self._get_stats(
for v in range(4)]) 300, [self.alarms[0].rule['threshold'] - v
self.client.metric.aggregation.side_effect = [maxs] for v in range(4)],
aggregated=True
)
self.client.aggregates.fetch.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('alarm') self._assert_all_alarms('alarm')
self.assertEqual([], self.storage_conn.update_alarm.call_args_list) self.assertEqual([], self.storage_conn.update_alarm.call_args_list)
@ -553,13 +588,13 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
def test_retry_transient_api_failure(self): def test_retry_transient_api_failure(self):
avgs2 = self._get_stats(50, [self.alarms[0].rule['threshold'] - v avgs2 = self._get_stats(50, [self.alarms[0].rule['threshold'] - v
for v in range(6)]) for v in range(6)], aggregated=True)
self.client.metric.aggregation.side_effect = [ self.client.aggregates.fetch.side_effect = [
exceptions.ClientException(500, "error"), avgs2] exceptions.ClientException(500, "error"), avgs2]
self._test_retry_transient() self._test_retry_transient()
def test_simple_insufficient(self): def test_simple_insufficient(self):
self.client.metric.aggregation.return_value = [] self.client.aggregates.fetch.return_value = []
self._test_simple_insufficient() self._test_simple_insufficient()
@mock.patch.object(timeutils, 'utcnow') @mock.patch.object(timeutils, 'utcnow')
@ -567,25 +602,30 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0)
self._set_all_alarms('ok') self._set_all_alarms('ok')
avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v
for v in range(1, 7)]) for v in range(1, 7)], aggregated=True)
self.client.metric.aggregation.side_effect = [avgs] self.client.aggregates.fetch.side_effect = [avgs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
start_alarm = "2015-01-26T12:51:10" start_alarm = "2015-01-26T12:51:10"
end = "2015-01-26T12:57:00" end = "2015-01-26T12:57:00"
self.assertEqual( self.assertEqual(
[mock.call.aggregation(aggregation='mean', metrics='cpu_util', [mock.call.fetch(
granularity=50, operations=[
needed_overlap=0, 'aggregate', 'rate:mean',
query={"=": {"server_group": ['metric', 'cpu', 'mean'],
"my_autoscaling_group"}}, ],
resource_type='instance', granularity=50,
start=start_alarm, stop=end)], search={"=": {"server_group":
self.client.metric.mock_calls) "my_autoscaling_group"}},
resource_type='instance',
start=start_alarm, stop=end,
needed_overlap=0)],
self.client.aggregates.mock_calls)
self._assert_all_alarms('alarm') self._assert_all_alarms('alarm')
expected = [mock.call(alarm) for alarm in self.alarms] expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls) self.assertEqual(expected, update_calls)
avgs = avgs['measures']['aggregated']
reason = ('Transition to alarm due to 6 samples outside ' reason = ('Transition to alarm due to 6 samples outside '
'threshold, most recent: %s' % avgs[-1][2]) 'threshold, most recent: %s' % avgs[-1][2])
reason_data = self._reason_data('outside', 6, avgs[-1][2]) reason_data = self._reason_data('outside', 6, avgs[-1][2])
@ -595,13 +635,14 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
def test_simple_alarm_clear(self): def test_simple_alarm_clear(self):
self._set_all_alarms('alarm') self._set_all_alarms('alarm')
avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] - v avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] - v
for v in range(6)]) for v in range(6)], aggregated=True)
self.client.metric.aggregation.side_effect = [avgs] self.client.aggregates.fetch.side_effect = [avgs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('ok') self._assert_all_alarms('ok')
expected = [mock.call(alarm) for alarm in self.alarms] expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls) self.assertEqual(expected, update_calls)
avgs = avgs['measures']['aggregated']
reason = ('Transition to ok due to 6 samples inside ' reason = ('Transition to ok due to 6 samples inside '
'threshold, most recent: %s' % avgs[-1][2]) 'threshold, most recent: %s' % avgs[-1][2])
reason_data = self._reason_data('inside', 6, avgs[-1][2]) reason_data = self._reason_data('inside', 6, avgs[-1][2])
@ -611,8 +652,8 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
def test_equivocal_from_known_state_ok(self): def test_equivocal_from_known_state_ok(self):
self._set_all_alarms('ok') self._set_all_alarms('ok')
avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v
for v in range(6)]) for v in range(6)], aggregated=True)
self.client.metric.aggregation.side_effect = [avgs] self.client.aggregates.fetch.side_effect = [avgs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('ok') self._assert_all_alarms('ok')
self.assertEqual( self.assertEqual(