mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-09-02 03:12:34 +01:00
wa: Rename results_processors
to output_processors
For clarity and to better reflect their purpose, rename `results_processors` to `output_processors`.
This commit is contained in:
0
wa/output_processors/__init__.py
Normal file
0
wa/output_processors/__init__.py
Normal file
97
wa/output_processors/csvproc.py
Normal file
97
wa/output_processors/csvproc.py
Normal file
@@ -0,0 +1,97 @@
|
||||
import csv
|
||||
|
||||
from wa import OutputProcessor, Parameter
|
||||
from wa.framework.exception import ConfigError
|
||||
from wa.utils.types import list_of_strings
|
||||
|
||||
|
||||
class CsvReportProcessor(OutputProcessor):
|
||||
|
||||
name = 'csv'
|
||||
description = """
|
||||
Creates a ``results.csv`` in the output directory containing results for
|
||||
all iterations in CSV format, each line containing a single metric.
|
||||
|
||||
"""
|
||||
|
||||
parameters = [
|
||||
Parameter('use_all_classifiers', kind=bool, default=False,
|
||||
global_alias='use_all_classifiers',
|
||||
description="""
|
||||
If set to ``True``, this will add a column for every classifier
|
||||
that features in at least one collected metric.
|
||||
|
||||
.. note:: This cannot be ``True`` if ``extra_columns`` is set.
|
||||
|
||||
"""),
|
||||
Parameter('extra_columns', kind=list_of_strings,
|
||||
description="""
|
||||
List of classifiers to use as columns.
|
||||
|
||||
.. note:: This cannot be set if ``use_all_classifiers`` is
|
||||
``True``.
|
||||
|
||||
"""),
|
||||
]
|
||||
|
||||
def validate(self):
|
||||
super(CsvReportProcessor, self).validate()
|
||||
if self.use_all_classifiers and self.extra_columns:
|
||||
msg = 'extra_columns cannot be specified when '\
|
||||
'use_all_classifiers is True'
|
||||
raise ConfigError(msg)
|
||||
|
||||
def initialize(self):
|
||||
self.outputs_so_far = [] # pylint: disable=attribute-defined-outside-init
|
||||
self.artifact_added = False
|
||||
|
||||
def process_job_output(self, output, target_info, run_output):
|
||||
self.outputs_so_far.append(output)
|
||||
self._write_outputs(self.outputs_so_far, run_output)
|
||||
if not self.artifact_added:
|
||||
run_output.add_artifact('run_result_csv', 'results.csv', 'export')
|
||||
self.artifact_added = True
|
||||
|
||||
def process_run_output(self, output, target_info):
|
||||
self.outputs_so_far.append(output)
|
||||
self._write_outputs(self.outputs_so_far, output)
|
||||
if not self.artifact_added:
|
||||
output.add_artifact('run_result_csv', 'results.csv', 'export')
|
||||
self.artifact_added = True
|
||||
|
||||
def _write_outputs(self, outputs, output):
|
||||
if self.use_all_classifiers:
|
||||
classifiers = set([])
|
||||
for out in outputs:
|
||||
for metric in out.metrics:
|
||||
classifiers.update(metric.classifiers.keys())
|
||||
extra_columns = list(classifiers)
|
||||
elif self.extra_columns:
|
||||
extra_columns = self.extra_columns
|
||||
else:
|
||||
extra_columns = []
|
||||
|
||||
outfile = output.get_path('results.csv')
|
||||
with open(outfile, 'wb') as wfh:
|
||||
writer = csv.writer(wfh)
|
||||
writer.writerow(['id', 'workload', 'iteration', 'metric', ] +
|
||||
extra_columns + ['value', 'units'])
|
||||
|
||||
for o in outputs:
|
||||
if o.kind == 'job':
|
||||
header = [o.id, o.label, o.iteration]
|
||||
elif o.kind == 'run':
|
||||
# Should be a RunOutput. Run-level metrics aren't attached
|
||||
# to any job so we leave 'id' and 'iteration' blank, and use
|
||||
# the run name for the 'label' field.
|
||||
header = [None, o.info.run_name, None]
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'Output of kind "{}" unrecognised by csvproc'.format(o.kind))
|
||||
|
||||
for metric in o.result.metrics:
|
||||
row = (header + [metric.name] +
|
||||
[str(metric.classifiers.get(c, ''))
|
||||
for c in extra_columns] +
|
||||
[str(metric.value), metric.units or ''])
|
||||
writer.writerow(row)
|
58
wa/output_processors/status.py
Normal file
58
wa/output_processors/status.py
Normal file
@@ -0,0 +1,58 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
# pylint: disable=R0201
|
||||
import time
|
||||
from collections import Counter
|
||||
|
||||
from wa import OutputProcessor, Status
|
||||
from wa.utils.misc import write_table
|
||||
|
||||
|
||||
class StatusTxtReporter(OutputProcessor):
|
||||
name = 'status'
|
||||
description = """
|
||||
Outputs a txt file containing general status information about which runs
|
||||
failed and which were successful
|
||||
|
||||
"""
|
||||
|
||||
def process_run_output(self, output, target_info):
|
||||
counter = Counter()
|
||||
for jo in output.jobs:
|
||||
counter[jo.status] += 1
|
||||
|
||||
outfile = output.get_path('status.txt')
|
||||
self.logger.info('Status available in {}'.format(outfile))
|
||||
with open(outfile, 'w') as wfh:
|
||||
wfh.write('Run name: {}\n'.format(output.info.run_name))
|
||||
wfh.write('Run status: {}\n'.format(output.status))
|
||||
wfh.write('Date: {}\n'.format(time.strftime("%c")))
|
||||
if output.events:
|
||||
wfh.write('Events:\n')
|
||||
for event in output.events:
|
||||
wfh.write('\t{}\n'.format(event.summary))
|
||||
|
||||
txt = '{}/{} iterations completed without error\n'
|
||||
wfh.write(txt.format(counter[Status.OK], len(output.jobs)))
|
||||
wfh.write('\n')
|
||||
status_lines = [map(str, [o.id, o.label, o.iteration, o.status,
|
||||
o.event_summary])
|
||||
for o in output.jobs]
|
||||
write_table(status_lines, wfh, align='<<>><')
|
||||
|
||||
output.add_artifact('run_status_summary', 'status.txt', 'export')
|
||||
|
63
wa/output_processors/targz.py
Normal file
63
wa/output_processors/targz.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import shutil
|
||||
import tarfile
|
||||
|
||||
from wa import OutputProcessor, Parameter
|
||||
from wa.framework import signal
|
||||
|
||||
|
||||
class TargzProcessor(OutputProcessor):
|
||||
|
||||
name = 'targz'
|
||||
|
||||
description = '''
|
||||
Create a tarball of the output directory.
|
||||
|
||||
This will create a gzip-compressed tarball of the output directory. By
|
||||
default, it will be created at the same level and will have the same name
|
||||
as the output directory but with a .tar.gz extensions.
|
||||
'''
|
||||
|
||||
parameters = [
|
||||
Parameter('outfile',
|
||||
description='''
|
||||
The name of the output file to be used. If this is not an
|
||||
absolute path, the file will be created realtive to the
|
||||
directory in which WA was invoked. If this contains
|
||||
subdirectories, they must already exist.
|
||||
|
||||
The name may contain named format specifiers. Any of the
|
||||
``RunInfo`` fields can be named, resulting in the value of
|
||||
that filed (e.g. ``'start_time'``) being formatted into the
|
||||
tarball name.
|
||||
|
||||
By default, the output file will be created at the same
|
||||
level, share the name of the WA output directory (but with
|
||||
.tar.gz extension).
|
||||
'''),
|
||||
Parameter('delete-output', kind=bool, default=False,
|
||||
description='''
|
||||
if set to ``True``, WA output directory will be deleted after
|
||||
the tarball is created.
|
||||
'''),
|
||||
]
|
||||
|
||||
def initialize(self):
|
||||
if self.delete_output:
|
||||
self.logger.debug('Registering RUN_FINALIZED handler.')
|
||||
signal.connect(self.delete_output_directory, signal.RUN_FINALIZED, priority=-100)
|
||||
|
||||
def export_run_output(self, run_output, target_info):
|
||||
if self.outfile:
|
||||
outfile_path = self.outfile.format(**run_output.info.to_pod())
|
||||
else:
|
||||
outfile_path = run_output.basepath.rstrip('/') + '.tar.gz'
|
||||
|
||||
self.logger.debug('Creating {}'.format(outfile_path))
|
||||
with tarfile.open(outfile_path, 'w:gz') as tar:
|
||||
tar.add(run_output.basepath)
|
||||
|
||||
def delete_output_directory(self, context):
|
||||
self.logger.debug('Deleting output directory')
|
||||
shutil.rmtree(context.run_output.basepath)
|
||||
|
||||
|
47
wa/output_processors/uxperf.py
Normal file
47
wa/output_processors/uxperf.py
Normal file
@@ -0,0 +1,47 @@
|
||||
|
||||
from wa import OutputProcessor
|
||||
from wa.utils.android import LogcatParser
|
||||
|
||||
|
||||
class UxperfProcessor(OutputProcessor):
|
||||
|
||||
name = 'uxperf'
|
||||
|
||||
def process_job_output(self, output, target_info, job_output):
|
||||
logcat = output.get_artifact('logcat')
|
||||
if not logcat:
|
||||
return
|
||||
|
||||
parser = LogcatParser()
|
||||
start_times = {}
|
||||
|
||||
filepath = output.get_path(logcat.path)
|
||||
for entry in parser.parse(filepath):
|
||||
if not entry.tag == 'UX_PERF':
|
||||
continue
|
||||
|
||||
parts = entry.message.split()
|
||||
if len(parts) != 3:
|
||||
message = 'Unexpected UX_PERF message @ {}: {}'
|
||||
self.logger.warning(message.format(entry.timestamp, entry.message))
|
||||
continue
|
||||
|
||||
action, state, when = parts
|
||||
when = int(when)
|
||||
if state == 'start':
|
||||
if action in start_times:
|
||||
self.logger.warning('start before end @ {}'.format(entry.timestamp))
|
||||
start_times[action] = when
|
||||
elif state == 'end':
|
||||
start_time = start_times.pop(action, None)
|
||||
if start_time is None:
|
||||
self.logger.warning('end without start @ {}'.format(entry.timestamp))
|
||||
continue
|
||||
|
||||
duration = (when - start_time) / 1000
|
||||
metric_name = '{}_duration'.format(action)
|
||||
output.add_metric(metric_name, duration, 'microseconds',
|
||||
lower_is_better=True)
|
||||
|
||||
else:
|
||||
self.logger.warning('Unexpected state "{}" @ {}'.format(state, entry.timestamp))
|
Reference in New Issue
Block a user