1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2025-09-01 19:02:31 +01:00

Adding classifiers to metrics and updating csv and telemetry to take advantage of them

- Adding "classifiers" field to Metric objects. This is a dict mapping
  classifier names (arbitrary strings) to corresponding values for that
  specific metrics. This is to allow extensions to add
  extension-specific annotations to metric that could be handled in a
  generic way (e.g. by result processors).
- Updating telemetry workload to add classifiers for the url and internal
  iteration (or "time") for a particular result.
- Updating csv result processor with the option to use classifiers to
  add columns to results.csv (either using all classifiers found, or
  only for the specific ones listed).
This commit is contained in:
Sergei Trofimov
2015-05-14 15:10:42 +01:00
parent 782d4501cd
commit 512bacc1be
4 changed files with 65 additions and 34 deletions

View File

@@ -192,6 +192,9 @@ class ExecutionContext(object):
self.current_job = None
self.output_directory = self.run_output_directory
def add_metric(self, *args, **kwargs):
self.result.add_metric(*args, **kwargs)
def add_artifact(self, name, path, kind, *args, **kwargs):
if self.current_job is None:
self.add_run_artifact(name, path, kind, *args, **kwargs)

View File

@@ -261,8 +261,8 @@ class IterationResult(object):
self.metrics = []
self.artifacts = []
def add_metric(self, name, value, units=None, lower_is_better=False):
self.metrics.append(Metric(name, value, units, lower_is_better))
def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
def has_metric(self, name):
for metric in self.metrics:
@@ -300,14 +300,18 @@ class Metric(object):
has no units (e.g. it's a count or a standardised score).
:param lower_is_better: Boolean flag indicating where lower values are
better than higher ones. Defaults to False.
:param classifiers: A set of key-value pairs to further classify this metric
beyond current iteration (e.g. this can be used to identify
sub-tests).
"""
def __init__(self, name, value, units=None, lower_is_better=False):
def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None):
self.name = name
self.value = numeric(value)
self.units = units
self.lower_is_better = lower_is_better
self.classifiers = classifiers or {}
def to_dict(self):
return self.__dict__