mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-01-31 02:01:16 +00:00
wa: Rename update_results
reference to update_output
To be consistant with the rename of output_processors, old references to `update_results` have now been changed to `update_output`.
This commit is contained in:
parent
538cd8d332
commit
987f4ec4f1
@ -38,8 +38,8 @@ Hence, the following methods are sufficient to implement to add new instrument:
|
||||
where instrument measures start being registered/taken.
|
||||
- stop: It is invoked just after the workload execution stops. The measures
|
||||
should stop being taken/registered.
|
||||
- update_result: It is invoked after the workload updated its result.
|
||||
update_result is where the taken measures are added to the result so it
|
||||
- update_output: It is invoked after the workload updated its result.
|
||||
update_output is where the taken measures are added to the output so it
|
||||
can be processed by Workload Automation.
|
||||
- teardown is invoked after the workload is teared down. It is a good place
|
||||
to clean any logs generated by the instrument.
|
||||
@ -76,13 +76,13 @@ stop method::
|
||||
def stop(self, context):
|
||||
self.device.execute('{} stop'.format(self.trace_on_device))
|
||||
|
||||
The generated result can be updated inside update_result, or if it is trace, we
|
||||
just pull the file to the host device. context has a result variable which
|
||||
The generated output can be updated inside update_output, or if it is trace, we
|
||||
just pull the file to the host device. context has an output variable which
|
||||
has add_metric method. It can be used to add the instrumentation results metrics
|
||||
to the final result for the workload. The method can be passed 4 params, which
|
||||
are metric key, value, unit and lower_is_better, which is a boolean. ::
|
||||
|
||||
def update_result(self, context):
|
||||
def update_output(self, context):
|
||||
# pull the trace file to the device
|
||||
result = os.path.join(self.device.working_directory, 'trace.txt')
|
||||
self.device.pull(result, context.working_directory)
|
||||
@ -127,7 +127,7 @@ SIGNAL_MAP = OrderedDict([
|
||||
('start', signal.BEFORE_WORKLOAD_EXECUTION),
|
||||
('stop', signal.AFTER_WORKLOAD_EXECUTION),
|
||||
('process_workload_output', signal.SUCCESSFUL_WORKLOAD_OUTPUT_UPDATE),
|
||||
('update_result', signal.AFTER_WORKLOAD_OUTPUT_UPDATE),
|
||||
('update_output', signal.AFTER_WORKLOAD_OUTPUT_UPDATE),
|
||||
('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
|
||||
('finalize', signal.RUN_FINALIZED),
|
||||
|
||||
|
@ -319,7 +319,7 @@ class EnergyMeasurement(Instrument):
|
||||
for instrument in self.instruments.itervalues():
|
||||
instrument.stop()
|
||||
|
||||
def update_result(self, context):
|
||||
def update_output(self, context):
|
||||
for device, instrument in self.instruments.iteritems():
|
||||
# Append the device key to the filename and artifact name, unless
|
||||
# it's None (as it will be for backends with only 1
|
||||
|
@ -122,7 +122,7 @@ class FpsInstrument(Instrument):
|
||||
return
|
||||
self.collector.stop()
|
||||
|
||||
def update_result(self, context):
|
||||
def update_output(self, context):
|
||||
if not self._is_enabled:
|
||||
return
|
||||
outpath = os.path.join(context.output_directory, 'frames.csv')
|
||||
|
@ -54,7 +54,7 @@ class HwmonInstrument(Instrument):
|
||||
def stop(self, context):
|
||||
self.after = self.instrument.take_measurement()
|
||||
|
||||
def update_result(self, context):
|
||||
def update_output(self, context):
|
||||
measurements_before = {m.channel.label: m for m in self.before}
|
||||
measurements_after = {m.channel.label: m for m in self.after}
|
||||
|
||||
|
@ -153,7 +153,7 @@ class SysfsExtractor(Instrument):
|
||||
for dev_dir, _, after_dir, _ in self.device_and_host_paths:
|
||||
self.target.pull(dev_dir, after_dir)
|
||||
|
||||
def update_result(self, context):
|
||||
def update_output(self, context):
|
||||
if self.use_tmpfs:
|
||||
on_device_tarball = self.target.path.join(self.target.working_directory, self.tarname)
|
||||
on_host_tarball = self.target.path.join(context.output_directory, self.tarname)
|
||||
@ -221,7 +221,7 @@ class ExecutionTimeInstrument(Instrument):
|
||||
def stop(self, context):
|
||||
self.end_time = time.time()
|
||||
|
||||
def update_result(self, context):
|
||||
def update_output(self, context):
|
||||
execution_time = self.end_time - self.start_time
|
||||
context.add_metric('execution_time', execution_time, 'seconds')
|
||||
|
||||
@ -244,7 +244,7 @@ class ApkVersion(Instrument):
|
||||
else:
|
||||
self.apk_info = None
|
||||
|
||||
def update_result(self, context):
|
||||
def update_output(self, context):
|
||||
if self.apk_info:
|
||||
context.result.add_metric(self.name, self.apk_info.version_name)
|
||||
|
||||
@ -277,7 +277,7 @@ class InterruptStatsInstrument(Instrument):
|
||||
with open(_f(self.after_file), 'w') as wfh:
|
||||
wfh.write(self.target.execute('cat /proc/interrupts'))
|
||||
|
||||
def update_result(self, context):
|
||||
def update_output(self, context):
|
||||
# If workload execution failed, the after_file may not have been created.
|
||||
if os.path.isfile(self.after_file):
|
||||
_diff_interrupt_files(self.before_file, self.after_file, _f(self.diff_file))
|
||||
|
@ -87,7 +87,7 @@ class FilePoller(Instrument):
|
||||
def stop(self, context):
|
||||
self.target.killall('poller', signal='TERM', as_root=self.as_root)
|
||||
|
||||
def update_result(self, context):
|
||||
def update_output(self, context):
|
||||
host_output_file = os.path.join(context.output_directory, 'poller.csv')
|
||||
self.target.pull(self.target_output_path, host_output_file)
|
||||
context.add_artifact('poller_output', host_output_file, kind='data')
|
||||
|
@ -200,7 +200,7 @@ class TraceCmdInstrument(Instrument):
|
||||
def stop(self, context):
|
||||
self.collector.stop()
|
||||
|
||||
def update_result(self, context): # NOQA pylint: disable=R0912
|
||||
def update_output(self, context): # NOQA pylint: disable=R0912
|
||||
outfile = os.path.join(context.output_directory, 'trace.dat')
|
||||
self.collector.get_trace(outfile)
|
||||
context.add_artifact('trace-cmd-bin', outfile, 'data')
|
||||
|
@ -176,7 +176,7 @@ class RtApp(Workload):
|
||||
timeout=self.timeout,
|
||||
as_root=True)
|
||||
|
||||
def update_result(self, context):
|
||||
def update_output(self, context):
|
||||
self._pull_rt_app_logs(context)
|
||||
context.result.classifiers.update(dict(
|
||||
duration=self.duration,
|
||||
|
@ -87,7 +87,7 @@ class Vellamo(ApkUiautoWorkload):
|
||||
super(Vellamo, self).update_output(context)
|
||||
|
||||
# Get total scores from logcat
|
||||
self.non_root_update_result(context)
|
||||
self.non_root_update_output(context)
|
||||
|
||||
if not self.target.is_rooted:
|
||||
return
|
||||
@ -143,7 +143,7 @@ class Vellamo(ApkUiautoWorkload):
|
||||
score = result['score']
|
||||
context.add_metric(name, score)
|
||||
|
||||
def non_root_update_result(self, context):
|
||||
def non_root_update_output(self, context):
|
||||
failed = []
|
||||
logcat_file = context.get_artifact_path('logcat')
|
||||
with open(logcat_file) as fh:
|
||||
|
Loading…
x
Reference in New Issue
Block a user