1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2024-10-05 18:31:12 +01:00

wa: Rename update_results reference to update_output

To be consistant with the rename of output_processors, old references
to `update_results` have now been changed to `update_output`.
This commit is contained in:
Marc Bonnici 2018-01-10 14:37:13 +00:00 committed by setrofim
parent 538cd8d332
commit 987f4ec4f1
9 changed files with 19 additions and 19 deletions

View File

@ -38,8 +38,8 @@ Hence, the following methods are sufficient to implement to add new instrument:
where instrument measures start being registered/taken.
- stop: It is invoked just after the workload execution stops. The measures
should stop being taken/registered.
- update_result: It is invoked after the workload updated its result.
update_result is where the taken measures are added to the result so it
- update_output: It is invoked after the workload updated its result.
update_output is where the taken measures are added to the output so it
can be processed by Workload Automation.
- teardown is invoked after the workload is teared down. It is a good place
to clean any logs generated by the instrument.
@ -76,13 +76,13 @@ stop method::
def stop(self, context):
self.device.execute('{} stop'.format(self.trace_on_device))
The generated result can be updated inside update_result, or if it is trace, we
just pull the file to the host device. context has a result variable which
The generated output can be updated inside update_output, or if it is trace, we
just pull the file to the host device. context has an output variable which
has add_metric method. It can be used to add the instrumentation results metrics
to the final result for the workload. The method can be passed 4 params, which
are metric key, value, unit and lower_is_better, which is a boolean. ::
def update_result(self, context):
def update_output(self, context):
# pull the trace file to the device
result = os.path.join(self.device.working_directory, 'trace.txt')
self.device.pull(result, context.working_directory)
@ -127,7 +127,7 @@ SIGNAL_MAP = OrderedDict([
('start', signal.BEFORE_WORKLOAD_EXECUTION),
('stop', signal.AFTER_WORKLOAD_EXECUTION),
('process_workload_output', signal.SUCCESSFUL_WORKLOAD_OUTPUT_UPDATE),
('update_result', signal.AFTER_WORKLOAD_OUTPUT_UPDATE),
('update_output', signal.AFTER_WORKLOAD_OUTPUT_UPDATE),
('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('finalize', signal.RUN_FINALIZED),
@ -321,7 +321,7 @@ def install(instrument, context):
raise ValueError(message.format(attr_name, arg_num))
priority = get_priority(attr)
logger.debug('\tConnecting %s to %s with priority %s(%d)', attr.__name__,
logger.debug('\tConnecting %s to %s with priority %s(%d)', attr.__name__,
SIGNAL_MAP[attr_name], priority.name, priority.value)
mc = ManagedCallback(instrument, attr)

View File

@ -319,7 +319,7 @@ class EnergyMeasurement(Instrument):
for instrument in self.instruments.itervalues():
instrument.stop()
def update_result(self, context):
def update_output(self, context):
for device, instrument in self.instruments.iteritems():
# Append the device key to the filename and artifact name, unless
# it's None (as it will be for backends with only 1

View File

@ -122,7 +122,7 @@ class FpsInstrument(Instrument):
return
self.collector.stop()
def update_result(self, context):
def update_output(self, context):
if not self._is_enabled:
return
outpath = os.path.join(context.output_directory, 'frames.csv')

View File

@ -54,7 +54,7 @@ class HwmonInstrument(Instrument):
def stop(self, context):
self.after = self.instrument.take_measurement()
def update_result(self, context):
def update_output(self, context):
measurements_before = {m.channel.label: m for m in self.before}
measurements_after = {m.channel.label: m for m in self.after}

View File

@ -153,7 +153,7 @@ class SysfsExtractor(Instrument):
for dev_dir, _, after_dir, _ in self.device_and_host_paths:
self.target.pull(dev_dir, after_dir)
def update_result(self, context):
def update_output(self, context):
if self.use_tmpfs:
on_device_tarball = self.target.path.join(self.target.working_directory, self.tarname)
on_host_tarball = self.target.path.join(context.output_directory, self.tarname)
@ -221,7 +221,7 @@ class ExecutionTimeInstrument(Instrument):
def stop(self, context):
self.end_time = time.time()
def update_result(self, context):
def update_output(self, context):
execution_time = self.end_time - self.start_time
context.add_metric('execution_time', execution_time, 'seconds')
@ -244,7 +244,7 @@ class ApkVersion(Instrument):
else:
self.apk_info = None
def update_result(self, context):
def update_output(self, context):
if self.apk_info:
context.result.add_metric(self.name, self.apk_info.version_name)
@ -277,7 +277,7 @@ class InterruptStatsInstrument(Instrument):
with open(_f(self.after_file), 'w') as wfh:
wfh.write(self.target.execute('cat /proc/interrupts'))
def update_result(self, context):
def update_output(self, context):
# If workload execution failed, the after_file may not have been created.
if os.path.isfile(self.after_file):
_diff_interrupt_files(self.before_file, self.after_file, _f(self.diff_file))

View File

@ -87,7 +87,7 @@ class FilePoller(Instrument):
def stop(self, context):
self.target.killall('poller', signal='TERM', as_root=self.as_root)
def update_result(self, context):
def update_output(self, context):
host_output_file = os.path.join(context.output_directory, 'poller.csv')
self.target.pull(self.target_output_path, host_output_file)
context.add_artifact('poller_output', host_output_file, kind='data')

View File

@ -200,7 +200,7 @@ class TraceCmdInstrument(Instrument):
def stop(self, context):
self.collector.stop()
def update_result(self, context): # NOQA pylint: disable=R0912
def update_output(self, context): # NOQA pylint: disable=R0912
outfile = os.path.join(context.output_directory, 'trace.dat')
self.collector.get_trace(outfile)
context.add_artifact('trace-cmd-bin', outfile, 'data')

View File

@ -176,7 +176,7 @@ class RtApp(Workload):
timeout=self.timeout,
as_root=True)
def update_result(self, context):
def update_output(self, context):
self._pull_rt_app_logs(context)
context.result.classifiers.update(dict(
duration=self.duration,

View File

@ -87,7 +87,7 @@ class Vellamo(ApkUiautoWorkload):
super(Vellamo, self).update_output(context)
# Get total scores from logcat
self.non_root_update_result(context)
self.non_root_update_output(context)
if not self.target.is_rooted:
return
@ -143,7 +143,7 @@ class Vellamo(ApkUiautoWorkload):
score = result['score']
context.add_metric(name, score)
def non_root_update_result(self, context):
def non_root_update_output(self, context):
failed = []
logcat_file = context.get_artifact_path('logcat')
with open(logcat_file) as fh: