1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2025-10-30 06:34:13 +00:00

wa: Rename Instrumentation to Instruments

To be maintain a consistent naming scheme, rename all instances of
`Instrumentation` to `Instruments`
This commit is contained in:
Marc Bonnici
2018-01-10 14:54:43 +00:00
committed by setrofim
parent 987f4ec4f1
commit 446a1cfbb0
23 changed files with 34 additions and 34 deletions

View File

68
wa/instruments/dmesg.py Normal file
View File

@@ -0,0 +1,68 @@
# Copyright 2014-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from wa import Instrument, Parameter
from wa.framework.exception import InstrumentError
from wa.framework.instruments import slow
from wa.utils.misc import ensure_file_directory_exists as _f
class DmesgInstrument(Instrument):
# pylint: disable=no-member,attribute-defined-outside-init
"""
Collected dmesg output before and during the run.
"""
name = 'dmesg'
parameters = [
Parameter('loglevel', kind=int, allowed_values=range(8),
description='Set loglevel for console output.')
]
loglevel_file = '/proc/sys/kernel/printk'
def initialize(self, context):
self.need_root = self.target.os == 'android'
if self.need_root and not self.target.is_rooted:
raise InstrumentError('Need root to collect dmesg on Android')
def setup(self, context):
if self.loglevel:
self.old_loglevel = self.target.read_int(self.loglevel_file)
self.target.write_value(self.loglevel_file, self.loglevel, verify=False)
self.before_file = _f(os.path.join(context.output_directory, 'dmesg', 'before'))
self.after_file = _f(os.path.join(context.output_directory, 'dmesg', 'after'))
@slow
def start(self, context):
with open(self.before_file, 'w') as wfh:
wfh.write(self.target.execute('dmesg', as_root=self.need_root))
context.add_artifact('dmesg_before', self.before_file, kind='data')
if self.target.is_rooted:
self.target.execute('dmesg -c', as_root=True)
@slow
def stop(self, context):
with open(self.after_file, 'w') as wfh:
wfh.write(self.target.execute('dmesg', as_root=self.need_root))
context.add_artifact('dmesg_after', self.after_file, kind='data')
def teardown(self, context): # pylint: disable=unused-argument
if self.loglevel:
self.target.write_value(self.loglevel_file, self.old_loglevel, verify=False)

View File

@@ -0,0 +1,373 @@
# Copyright 2013-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0613,E1101
from __future__ import division
from collections import defaultdict
import os
from devlib import DerivedEnergyMeasurements
from devlib.instrument import CONTINUOUS
from devlib.instrument.energy_probe import EnergyProbeInstrument
from devlib.instrument.daq import DaqInstrument
from devlib.instrument.acmecape import AcmeCapeInstrument
from devlib.instrument.monsoon import MonsoonInstrument
from devlib.platform.arm import JunoEnergyInstrument
from devlib.utils.misc import which
from wa import Instrument, Parameter
from wa.framework import pluginloader
from wa.framework.plugin import Plugin
from wa.framework.exception import ConfigError, InstrumentError
from wa.utils.types import list_of_strings, list_of_ints, list_or_string, obj_dict, identifier
class EnergyInstrumentBackend(Plugin):
name = None
kind = 'energy_instrument_backend'
parameters = []
instrument = None
def get_parameters(self):
return {p.name : p for p in self.parameters}
def validate_parameters(self, params):
pass
def get_instruments(self, target, **kwargs):
"""
Get a dict mapping device keys to an Instruments
Typically there is just a single device/instrument, in which case the
device key is arbitrary.
"""
return {None: self.instrument(target, **kwargs)}
class DAQBackend(EnergyInstrumentBackend):
name = 'daq'
parameters = [
Parameter('resistor_values', kind=list_of_ints,
description="""
The values of resistors (in Ohms) across which the voltages
are measured on.
"""),
Parameter('labels', kind=list_of_strings,
description="""
'List of port labels. If specified, the length of the list
must match the length of ``resistor_values``.
"""),
Parameter('host', kind=str, default='localhost',
description="""
The host address of the machine that runs the daq Server which
the instrument communicates with.
"""),
Parameter('port', kind=int, default=45677,
description="""
The port number for daq Server in which daq instrument
communicates with.
"""),
Parameter('device_id', kind=str, default='Dev1',
description="""
The ID under which the DAQ is registered with the driver.
"""),
Parameter('v_range', kind=str, default=2.5,
description="""
Specifies the voltage range for the SOC voltage channel on the
DAQ (please refer to :ref:`daq_setup` for details).
"""),
Parameter('dv_range', kind=str, default=0.2,
description="""
Specifies the voltage range for the resistor voltage channel
on the DAQ (please refer to :ref:`daq_setup` for details).
"""),
Parameter('sample_rate_hz', kind=str, default=10000,
description="""
Specify the sample rate in Hz.
"""),
Parameter('channel_map', kind=list_of_ints,
default=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23),
description="""
Represents mapping from logical AI channel number to physical
connector on the DAQ (varies between DAQ models). The default
assumes DAQ 6363 and similar with AI channels on connectors
0-7 and 16-23.
""")
]
instrument = DaqInstrument
def validate_parameters(self, params):
if not params.get('resistor_values'):
raise ConfigError('Mandatory parameter "resistor_values" is not set.')
if params.get('labels'):
if len(params.get('labels')) != len(params.get('resistor_values')):
msg = 'Number of DAQ port labels does not match the number of resistor values.'
raise ConfigError(msg)
class EnergyProbeBackend(EnergyInstrumentBackend):
name = 'energy_probe'
parameters = [
Parameter('resistor_values', kind=list_of_ints,
description="""
The values of resistors (in Ohms) across which the voltages
are measured on.
"""),
Parameter('labels', kind=list_of_strings,
description="""
'List of port labels. If specified, the length of the list
must match the length of ``resistor_values``.
"""),
Parameter('device_entry', kind=str, default='/dev/ttyACM0',
description="""
Path to /dev entry for the energy probe (it should be /dev/ttyACMx)
"""),
]
instrument = EnergyProbeInstrument
def validate_parameters(self, params):
if not params.get('resistor_values'):
raise ConfigError('Mandatory parameter "resistor_values" is not set.')
if params.get('labels'):
if len(params.get('labels')) != len(params.get('resistor_values')):
msg = 'Number of Energy Probe port labels does not match the number of resistor values.'
raise ConfigError(msg)
class AcmeCapeBackend(EnergyInstrumentBackend):
name = 'acme_cape'
parameters = [
Parameter('iio-capture', default=which('iio-capture'),
description="""
Path to the iio-capture binary will be taken from the
environment, if not specfied.
"""),
Parameter('host', default='baylibre-acme.local',
description="""
Host name (or IP address) of the ACME cape board.
"""),
Parameter('iio-devices', default='iio:device0',
kind=list_or_string,
description="""
"""),
Parameter('buffer-size', kind=int, default=256,
description="""
Size of the capture buffer (in KB).
"""),
]
def get_instruments(self, target,
iio_capture, host, iio_devices, buffer_size):
#
# Devlib's ACME instrument uses iio-capture under the hood, which can
# only capture data from one IIO device at a time. Devlib's instrument
# API expects to produce a single CSV file for the Instrument, with a
# single axis of sample timestamps. These two things cannot be correctly
# reconciled without changing the devlib Instrument API - get_data would
# need to be able to return two distinct sets of data.
#
# Instead, where required WA will instantiate the ACME instrument
# multiple times (once for each IIO device), producing two separate CSV
# files. Aggregated energy info _can_ be meaningfully combined from
# multiple IIO devices, so we will later sum the derived stats across
# each of the channels reported by the instruments.
#
ret = {}
for iio_device in iio_devices:
ret[iio_device] = AcmeCapeInstrument(
target, iio_capture=iio_capture, host=host,
iio_device=iio_device, buffer_size=buffer_size)
return ret
class MonsoonBackend(EnergyInstrumentBackend):
name = 'monsoon'
parameters = [
Parameter('monsoon_bin', default=which('monsoon.py'),
description="""
Path to monsoon.py executable. If not provided,
``$PATH`` is searched.
"""),
Parameter('tty_device', default='/dev/ttyACM0',
description="""
TTY device to use to communicate with the Power
Monitor. If not provided, /dev/ttyACM0 is used.
""")
]
instrument = MonsoonInstrument
class JunoEnergyBackend(EnergyInstrumentBackend):
name = 'juno_readenergy'
instrument = JunoEnergyInstrument
class EnergyMeasurement(Instrument):
name = 'energy_measurement'
description = """
This instrument is designed to be used as an interface to the various
energy measurement instruments located in devlib.
"""
parameters = [
Parameter('instrument', kind=str, mandatory=True,
allowed_values=['daq', 'energy_probe', 'acme_cape', 'monsoon', 'juno_readenergy'],
description="""
Specify the energy instruments to be enabled.
"""),
Parameter('instrument_parameters', kind=dict, default={},
description="""
Specify the parameters used to initialize the desired
instruments.
"""),
Parameter('sites', kind=list_or_string,
description="""
Specify which sites measurements should be collected
from, if not specified the measurements will be
collected for all available sites.
"""),
Parameter('kinds', kind=list_or_string,
description="""
Specify the kinds of measurements should be collected,
if not specified measurements will be
collected for all available kinds.
"""),
Parameter('channels', kind=list_or_string,
description="""
Specify the channels to be collected,
if not specified the measurements will be
collected for all available channels.
"""),
]
def __init__(self, target, loader=pluginloader, **kwargs):
super(EnergyMeasurement, self).__init__(target, **kwargs)
self.instruments = None
self.measurement_csvs = {}
self.loader = loader
self.backend = self.loader.get_plugin(self.instrument)
self.params = obj_dict()
instrument_parameters = {identifier(k): v
for k, v in self.instrument_parameters.iteritems()}
supported_params = self.backend.get_parameters()
for name, param in supported_params.iteritems():
value = instrument_parameters.pop(name, None)
param.set_value(self.params, value)
if instrument_parameters:
msg = 'Unexpected parameters for backend "{}": {}'
raise ConfigError(msg.format(self.instrument, instrument_parameters))
self.backend.validate_parameters(self.params)
def initialize(self, context):
self.instruments = self.backend.get_instruments(self.target, **self.params)
for instrument in self.instruments.itervalues():
if not (instrument.mode & CONTINUOUS):
msg = '{} instrument does not support continuous measurement collection'
raise ConfigError(msg.format(self.instrument))
instrument.setup()
for channel in self.channels or []:
# Check that the expeccted channels exist.
# If there are multiple Instruments, they were all constructed with
# the same channels param, so check them all.
for instrument in self.instruments.itervalues():
if not instrument.get_channels(channel):
raise ConfigError('No channels found for "{}"'.format(channel))
def setup(self, context):
for instrument in self.instruments.itervalues():
instrument.reset(sites=self.sites,
kinds=self.kinds,
channels=self.channels)
def start(self, context):
for instrument in self.instruments.itervalues():
instrument.start()
def stop(self, context):
for instrument in self.instruments.itervalues():
instrument.stop()
def update_output(self, context):
for device, instrument in self.instruments.iteritems():
# Append the device key to the filename and artifact name, unless
# it's None (as it will be for backends with only 1
# devce/instrument)
if len(self.instruments) > 1:
name = 'energy_instrument_output_{}'.format(device)
else:
name = 'energy_instrument_output'
outfile = os.path.join(context.output_directory, '{}.csv'.format(name))
measurements = instrument.get_data(outfile)
if not measurements:
raise InstrumentError("Failed to collect energy data from {}"
.format(self.backend.name))
self.measurement_csvs[device] = measurements
context.add_artifact(name, measurements.path, 'data',
classifiers={'device': device})
self.extract_metrics(context)
def extract_metrics(self, context):
metrics_by_name = defaultdict(list)
for device in self.instruments:
csv = self.measurement_csvs[device]
derived_measurements = DerivedEnergyMeasurements.process(csv)
for meas in derived_measurements:
# Append the device key to the metric name, unless it's None (as
# it will be for backends with only 1 devce/instrument)
if len(self.instruments) > 1:
metric_name = '{}_{}'.format(meas.name, device)
else:
metric_name = meas.name
context.add_metric(metric_name, meas.value, meas.units,
classifiers={'device': device})
metrics_by_name[meas.name].append(meas)
# Where we have multiple instruments, add up all the metrics with the
# same name. For instance with ACME we may have multiple IIO devices
# each reporting 'device_energy' and 'device_power', so sum them up to
# produce aggregated energy and power metrics.
# (Note that metrics_by_name uses the metric name originally reported by
# the devlib instrument, before we potentially appended a device key to
# it)
if len(self.instruments) > 1:
for name, metrics in metrics_by_name.iteritems():
units = metrics[0].units
value = sum(m.value for m in metrics)
context.add_metric(name, value, units)

169
wa/instruments/fps.py Normal file
View File

@@ -0,0 +1,169 @@
import os
import shutil
from devlib import SurfaceFlingerFramesInstrument, GfxInfoFramesInstrument
from devlib import DerivedSurfaceFlingerStats, DerivedGfxInfoStats
from wa import Instrument, Parameter, WorkloadError
from wa.utils.types import numeric
class FpsInstrument(Instrument):
name = 'fps'
description = """
Measures Frames Per Second (FPS) and associated metrics for a workload.
.. note:: This instrument depends on pandas Python library (which is not part of standard
WA dependencies), so you will need to install that first, before you can use it.
Android L and below use SurfaceFlinger to calculate the FPS data.
Android M and above use gfxinfo to calculate the FPS data.
SurfaceFlinger:
The view is specified by the workload as ``view`` attribute. This defaults
to ``'SurfaceView'`` for game workloads, and ``None`` for non-game
workloads (as for them FPS mesurement usually doesn't make sense).
Individual workloads may override this.
gfxinfo:
The view is specified by the workload as ``package`` attribute.
This is because gfxinfo already processes for all views in a package.
"""
parameters = [
Parameter('drop_threshold', kind=numeric, default=5,
description="""
Data points below this FPS will be dropped as they do not
constitute "real" gameplay. The assumption being that while
actually running, the FPS in the game will not drop below X
frames per second, except on loading screens, menus, etc,
which should not contribute to FPS calculation.
"""),
Parameter('keep_raw', kind=bool, default=False,
description="""
If set to ``True``, this will keep the raw dumpsys output in
the results directory (this is maily used for debugging)
Note: frames.csv with collected frames data will always be
generated regardless of this setting.
"""),
Parameter('crash_threshold', kind=float, default=0.7,
description="""
Specifies the threshold used to decided whether a
measured/expected frames ration indicates a content crash.
E.g. a value of ``0.75`` means the number of actual frames
counted is a quarter lower than expected, it will treated as
a content crash.
If set to zero, no crash check will be performed.
"""),
Parameter('period', kind=float, default=2, constraint=lambda x: x > 0,
description="""
Specifies the time period between polling frame data in
seconds when collecting frame data. Using a lower value
improves the granularity of timings when recording actions
that take a short time to complete. Note, this will produce
duplicate frame data in the raw dumpsys output, however, this
is filtered out in frames.csv. It may also affect the
overall load on the system.
The default value of 2 seconds corresponds with the
NUM_FRAME_RECORDS in
android/services/surfaceflinger/FrameTracker.h (as of the
time of writing currently 128) and a frame rate of 60 fps
that is applicable to most devices.
"""),
Parameter('force_surfaceflinger', kind=bool, default=False,
description="""
By default, the method to capture fps data is based on
Android version. If this is set to true, force the
instrument to use the SurfaceFlinger method regardless of its
Android version.
"""),
]
def __init__(self, target, **kwargs):
super(FpsInstrument, self).__init__(target, **kwargs)
self.collector = None
self.processor = None
self._is_enabled = None
def setup(self, context):
use_gfxinfo = self.target.get_sdk_version() >= 23 and not self.force_surfaceflinger
if use_gfxinfo:
collector_target_attr = 'package'
else:
collector_target_attr = 'view'
collector_target = getattr(context.workload, collector_target_attr, None)
if not collector_target:
self._is_enabled = False
msg = 'Workload {} does not define a {}; disabling frame collection and FPS evaluation.'
self.logger.info(msg.format(context.workload.name, collector_target_attr))
return
self._is_enabled = True
if use_gfxinfo:
self.collector = GfxInfoFramesInstrument(self.target, collector_target, self.period)
self.processor = DerivedGfxInfoStats(self.drop_threshold, filename='fps.csv')
else:
self.collector = SurfaceFlingerFramesInstrument(self.target, collector_target, self.period)
self.processor = DerivedSurfaceFlingerStats(self.drop_threshold, filename='fps.csv')
self.collector.reset()
def start(self, context):
if not self._is_enabled:
return
self.collector.start()
def stop(self, context):
if not self._is_enabled:
return
self.collector.stop()
def update_output(self, context):
if not self._is_enabled:
return
outpath = os.path.join(context.output_directory, 'frames.csv')
frames_csv = self.collector.get_data(outpath)
raw_output = self.collector.get_raw()
processed = self.processor.process(frames_csv)
processed.extend(self.processor.process_raw(*raw_output))
fps, frame_count, fps_csv = processed[:3]
rest = processed[3:]
context.add_metric(fps.name, fps.value, fps.units)
context.add_metric(frame_count.name, frame_count.value, frame_count.units)
context.add_artifact('frames', frames_csv.path, kind='raw')
context.add_artifact('fps', fps_csv.path, kind='data')
for metric in rest:
context.add_metric(metric.name, metric.value, metric.units, lower_is_better=True)
if not self.keep_raw:
for entry in raw_output:
if os.path.isdir(entry):
shutil.rmtree(entry)
elif os.path.isfile(entry):
os.remove(entry)
if not frame_count.value:
context.add_event('Could not frind frames data in gfxinfo output')
context.set_status('PARTIAL')
self.check_for_crash(context, fps.value, frame_count.value,
context.current_job.run_time.total_seconds())
def check_for_crash(self, context, fps, frames, exec_time):
if not self.crash_threshold:
return
self.logger.debug('Checking for crashed content.')
if all([exec_time, fps, frames]):
expected_frames = fps * exec_time
ratio = frames / expected_frames
self.logger.debug('actual/expected frames: {:.2}'.format(ratio))
if ratio < self.crash_threshold:
msg = 'Content for {} appears to have crashed.\n'.format(context.current_job.spec.label)
msg += 'Content crash detected (actual/expected frames: {:.2}).'.format(ratio)
raise WorkloadError(msg)

88
wa/instruments/hwmon.py Normal file
View File

@@ -0,0 +1,88 @@
# Copyright 2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from devlib import HwmonInstrument as _Instrument
from wa import Instrument
from wa.framework.instruments import fast
MOMENTARY_QUANTITIES = ['temperature', 'power', 'voltage', 'current', 'fps']
CUMULATIVE_QUANTITIES = ['energy', 'tx', 'tx/rx', 'frames']
class HwmonInstrument(Instrument):
name = 'hwmon'
description = """
Hardware Monitor (hwmon) is a generic Linux kernel subsystem,
providing access to hardware monitoring components like temperature or
voltage/current sensors.
Data from hwmon that are a snapshot of a fluctuating value, such as
temperature and voltage, are reported once at the beginning and once at the
end of the workload run. Data that are a cumulative total of a quantity,
such as energy (which is the cumulative total of power consumption), are
reported as the difference between the values at the beginning and at the
end of the workload run.
There is currently no functionality to filter sensors: all of the available
hwmon data will be reported.
"""
def initialize(self, context):
self.instrument = _Instrument(self.target)
def setup(self, context):
self.instrument.reset()
@fast
def start(self, context):
self.before = self.instrument.take_measurement()
@fast
def stop(self, context):
self.after = self.instrument.take_measurement()
def update_output(self, context):
measurements_before = {m.channel.label: m for m in self.before}
measurements_after = {m.channel.label: m for m in self.after}
if measurements_before.keys() != measurements_after.keys():
self.logger.warning(
'hwmon before/after measurements returned different entries!')
for label, measurement_after in measurements_after.iteritems():
if label not in measurements_before:
continue # We've already warned about this
measurement_before = measurements_before[label]
if measurement_after.channel.kind in MOMENTARY_QUANTITIES:
context.add_metric('{}_before'.format(label),
measurement_before.value,
measurement_before.channel.units)
context.add_metric('{}_after'.format(label),
measurement_after.value,
measurement_after.channel.units)
elif measurement_after.channel.kind in CUMULATIVE_QUANTITIES:
diff = measurement_after.value - measurement_before.value
context.add_metric(label, diff, measurement_after.channel.units)
else:
self.logger.warning(
"Don't know what to do with hwmon channel '{}'"
.format(measurement_after.channel))
def teardown(self, context):
self.instrument.teardown()

389
wa/instruments/misc.py Normal file
View File

@@ -0,0 +1,389 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0613,no-member,attribute-defined-outside-init
"""
Some "standard" instruments to collect additional info about workload execution.
.. note:: The run() method of a Workload may perform some "boilerplate" as well as
the actual execution of the workload (e.g. it may contain UI automation
needed to start the workload). This "boilerplate" execution will also
be measured by these instruments. As such, they are not suitable for collected
precise data about specific operations.
"""
import os
import re
import logging
import time
import tarfile
from itertools import izip, izip_longest
from subprocess import CalledProcessError
from devlib.exception import TargetError
from devlib.utils.android import ApkInfo
from wa import Instrument, Parameter, very_fast
from wa.framework.exception import ConfigError
from wa.framework.instruments import slow
from wa.utils.misc import as_relative, diff_tokens, write_table
from wa.utils.misc import ensure_file_directory_exists as _f
from wa.utils.misc import ensure_directory_exists as _d
from wa.utils.types import list_of_strings
logger = logging.getLogger(__name__)
class SysfsExtractor(Instrument):
name = 'sysfs_extractor'
description = """
Collects the contest of a set of directories, before and after workload execution
and diffs the result.
"""
mount_command = 'mount -t tmpfs -o size={} tmpfs {}'
extract_timeout = 30
tarname = 'sysfs.tar.gz'
DEVICE_PATH = 0
BEFORE_PATH = 1
AFTER_PATH = 2
DIFF_PATH = 3
parameters = [
Parameter('paths', kind=list_of_strings, mandatory=True,
description="""A list of paths to be pulled from the device. These could be directories
as well as files.""",
global_alias='sysfs_extract_dirs'),
Parameter('use_tmpfs', kind=bool, default=None,
description="""
Specifies whether tmpfs should be used to cache sysfile trees and then pull them down
as a tarball. This is significantly faster then just copying the directory trees from
the device directly, bur requres root and may not work on all devices. Defaults to
``True`` if the device is rooted and ``False`` if it is not.
"""),
Parameter('tmpfs_mount_point', default=None,
description="""Mount point for tmpfs partition used to store snapshots of paths."""),
Parameter('tmpfs_size', default='32m',
description="""Size of the tempfs partition."""),
]
def initialize(self, context):
if not self.target.is_rooted and self.use_tmpfs: # pylint: disable=access-member-before-definition
raise ConfigError('use_tempfs must be False for an unrooted device.')
elif self.use_tmpfs is None: # pylint: disable=access-member-before-definition
self.use_tmpfs = self.target.is_rooted
if self.use_tmpfs:
self.on_device_before = self.target.path.join(self.tmpfs_mount_point, 'before')
self.on_device_after = self.target.path.join(self.tmpfs_mount_point, 'after')
if not self.target.file_exists(self.tmpfs_mount_point):
self.target.execute('mkdir -p {}'.format(self.tmpfs_mount_point), as_root=True)
self.target.execute(self.mount_command.format(self.tmpfs_size, self.tmpfs_mount_point),
as_root=True)
def setup(self, context):
before_dirs = [
_d(os.path.join(context.output_directory, 'before', self._local_dir(d)))
for d in self.paths
]
after_dirs = [
_d(os.path.join(context.output_directory, 'after', self._local_dir(d)))
for d in self.paths
]
diff_dirs = [
_d(os.path.join(context.output_directory, 'diff', self._local_dir(d)))
for d in self.paths
]
self.device_and_host_paths = zip(self.paths, before_dirs, after_dirs, diff_dirs)
if self.use_tmpfs:
for d in self.paths:
before_dir = self.target.path.join(self.on_device_before,
self.target.path.dirname(as_relative(d)))
after_dir = self.target.path.join(self.on_device_after,
self.target.path.dirname(as_relative(d)))
if self.target.file_exists(before_dir):
self.target.execute('rm -rf {}'.format(before_dir), as_root=True)
self.target.execute('mkdir -p {}'.format(before_dir), as_root=True)
if self.target.file_exists(after_dir):
self.target.execute('rm -rf {}'.format(after_dir), as_root=True)
self.target.execute('mkdir -p {}'.format(after_dir), as_root=True)
@slow
def start(self, context):
if self.use_tmpfs:
for d in self.paths:
dest_dir = self.target.path.join(self.on_device_before, as_relative(d))
if '*' in dest_dir:
dest_dir = self.target.path.dirname(dest_dir)
self.target.execute('{} cp -Hr {} {}'.format(self.target.busybox, d, dest_dir),
as_root=True, check_exit_code=False)
else: # not rooted
for dev_dir, before_dir, _, _ in self.device_and_host_paths:
self.target.pull(dev_dir, before_dir)
@slow
def stop(self, context):
if self.use_tmpfs:
for d in self.paths:
dest_dir = self.target.path.join(self.on_device_after, as_relative(d))
if '*' in dest_dir:
dest_dir = self.target.path.dirname(dest_dir)
self.target.execute('{} cp -Hr {} {}'.format(self.target.busybox, d, dest_dir),
as_root=True, check_exit_code=False)
else: # not using tmpfs
for dev_dir, _, after_dir, _ in self.device_and_host_paths:
self.target.pull(dev_dir, after_dir)
def update_output(self, context):
if self.use_tmpfs:
on_device_tarball = self.target.path.join(self.target.working_directory, self.tarname)
on_host_tarball = self.target.path.join(context.output_directory, self.tarname)
self.target.execute('{} tar czf {} -C {} .'.format(self.target.busybox,
on_device_tarball,
self.tmpfs_mount_point),
as_root=True)
self.target.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True)
self.target.pull(on_device_tarball, on_host_tarball)
with tarfile.open(on_host_tarball, 'r:gz') as tf:
tf.extractall(context.output_directory)
self.target.remove(on_device_tarball)
os.remove(on_host_tarball)
for paths in self.device_and_host_paths:
after_dir = paths[self.AFTER_PATH]
dev_dir = paths[self.DEVICE_PATH].strip('*') # remove potential trailing '*'
if (not os.listdir(after_dir) and
self.target.file_exists(dev_dir) and
self.target.list_directory(dev_dir)):
self.logger.error('sysfs files were not pulled from the device.')
self.device_and_host_paths.remove(paths) # Path is removed to skip diffing it
for _, before_dir, after_dir, diff_dir in self.device_and_host_paths:
_diff_sysfs_dirs(before_dir, after_dir, diff_dir)
def teardown(self, context):
self._one_time_setup_done = []
def finalize(self, context):
if self.use_tmpfs:
try:
self.target.execute('umount {}'.format(self.tmpfs_mount_point), as_root=True)
except (TargetError, CalledProcessError):
# assume a directory but not mount point
pass
self.target.execute('rm -rf {}'.format(self.tmpfs_mount_point),
as_root=True, check_exit_code=False)
def validate(self):
if not self.tmpfs_mount_point: # pylint: disable=access-member-before-definition
self.tmpfs_mount_point = self.target.get_workpath('temp-fs')
def _local_dir(self, directory):
return os.path.dirname(as_relative(directory).replace(self.target.path.sep, os.sep))
class ExecutionTimeInstrument(Instrument):
name = 'execution_time'
description = """
Measure how long it took to execute the run() methods of a Workload.
"""
def __init__(self, target, **kwargs):
super(ExecutionTimeInstrument, self).__init__(target, **kwargs)
self.start_time = None
self.end_time = None
@very_fast
def start(self, context):
self.start_time = time.time()
@very_fast
def stop(self, context):
self.end_time = time.time()
def update_output(self, context):
execution_time = self.end_time - self.start_time
context.add_metric('execution_time', execution_time, 'seconds')
class ApkVersion(Instrument):
name = 'apk_version'
description = """
Extracts APK versions for workloads that have them.
"""
def __init__(self, device, **kwargs):
super(ApkVersion, self).__init__(device, **kwargs)
self.apk_info = None
def setup(self, context):
if hasattr(context.workload, 'apk_file'):
self.apk_info = ApkInfo(context.workload.apk_file)
else:
self.apk_info = None
def update_output(self, context):
if self.apk_info:
context.result.add_metric(self.name, self.apk_info.version_name)
class InterruptStatsInstrument(Instrument):
name = 'interrupts'
description = """
Pulls the ``/proc/interrupts`` file before and after workload execution and diffs them
to show what interrupts occurred during that time.
"""
def __init__(self, target, **kwargs):
super(InterruptStatsInstrument, self).__init__(target, **kwargs)
self.before_file = None
self.after_file = None
self.diff_file = None
def setup(self, context):
self.before_file = os.path.join(context.output_directory, 'before', 'proc', 'interrupts')
self.after_file = os.path.join(context.output_directory, 'after', 'proc', 'interrupts')
self.diff_file = os.path.join(context.output_directory, 'diff', 'proc', 'interrupts')
def start(self, context):
with open(_f(self.before_file), 'w') as wfh:
wfh.write(self.target.execute('cat /proc/interrupts'))
def stop(self, context):
with open(_f(self.after_file), 'w') as wfh:
wfh.write(self.target.execute('cat /proc/interrupts'))
def update_output(self, context):
# If workload execution failed, the after_file may not have been created.
if os.path.isfile(self.after_file):
_diff_interrupt_files(self.before_file, self.after_file, _f(self.diff_file))
class DynamicFrequencyInstrument(SysfsExtractor):
name = 'cpufreq'
description = """
Collects dynamic frequency (DVFS) settings before and after workload execution.
"""
tarname = 'cpufreq.tar.gz'
parameters = [
Parameter('paths', mandatory=False, override=True),
]
def setup(self, context):
self.paths = ['/sys/devices/system/cpu']
if self.use_tmpfs:
self.paths.append('/sys/class/devfreq/*') # the '*' would cause problems for adb pull.
super(DynamicFrequencyInstrument, self).setup(context)
def validate(self):
super(DynamicFrequencyInstrument, self).validate()
if not self.tmpfs_mount_point.endswith('-cpufreq'): # pylint: disable=access-member-before-definition
self.tmpfs_mount_point += '-cpufreq'
def _diff_interrupt_files(before, after, result): # pylint: disable=R0914
output_lines = []
with open(before) as bfh:
with open(after) as ofh:
for bline, aline in izip(bfh, ofh):
bchunks = bline.strip().split()
while True:
achunks = aline.strip().split()
if achunks[0] == bchunks[0]:
diffchunks = ['']
diffchunks.append(achunks[0])
diffchunks.extend([diff_tokens(b, a) for b, a
in zip(bchunks[1:], achunks[1:])])
output_lines.append(diffchunks)
break
else: # new category appeared in the after file
diffchunks = ['>'] + achunks
output_lines.append(diffchunks)
try:
aline = ofh.next()
except StopIteration:
break
# Offset heading columns by one to allow for row labels on subsequent
# lines.
output_lines[0].insert(0, '')
# Any "columns" that do not have headings in the first row are not actually
# columns -- they are a single column where space-spearated words got
# split. Merge them back together to prevent them from being
# column-aligned by write_table.
table_rows = [output_lines[0]]
num_cols = len(output_lines[0])
for row in output_lines[1:]:
table_row = row[:num_cols]
table_row.append(' '.join(row[num_cols:]))
table_rows.append(table_row)
with open(result, 'w') as wfh:
write_table(table_rows, wfh)
def _diff_sysfs_dirs(before, after, result): # pylint: disable=R0914
before_files = []
os.path.walk(before,
lambda arg, dirname, names: arg.extend([os.path.join(dirname, f) for f in names]),
before_files
)
before_files = filter(os.path.isfile, before_files)
files = [os.path.relpath(f, before) for f in before_files]
after_files = [os.path.join(after, f) for f in files]
diff_files = [os.path.join(result, f) for f in files]
for bfile, afile, dfile in zip(before_files, after_files, diff_files):
if not os.path.isfile(afile):
logger.debug('sysfs_diff: {} does not exist or is not a file'.format(afile))
continue
with open(bfile) as bfh, open(afile) as afh: # pylint: disable=C0321
with open(_f(dfile), 'w') as dfh:
for i, (bline, aline) in enumerate(izip_longest(bfh, afh), 1):
if aline is None:
logger.debug('Lines missing from {}'.format(afile))
break
bchunks = re.split(r'(\W+)', bline)
achunks = re.split(r'(\W+)', aline)
if len(bchunks) != len(achunks):
logger.debug('Token length mismatch in {} on line {}'.format(bfile, i))
dfh.write('xxx ' + bline)
continue
if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2) and
(bchunks[0] == achunks[0])):
# if there are only two columns and the first column is the
# same, assume it's a "header" column and do not diff it.
dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])]
else:
dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)]
dfh.write(''.join(dchunks))

View File

@@ -0,0 +1,17 @@
# CROSS_COMPILE=aarch64-linux-gnu- make
#
CC=gcc
ifdef DEBUG
CFLAGS=-static -lc -g
else
CFLAGS=-static -lc -O2
endif
poller: poller.c
$(CROSS_COMPILE)$(CC) $(CFLAGS) poller.c -o poller
clean:
rm -rf poller
.PHONY: clean

View File

@@ -0,0 +1,122 @@
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=access-member-before-definition,attribute-defined-outside-init,unused-argument
import os
from wa import Instrument, Parameter, Executable
from wa.framework.exception import ConfigError, InstrumentError
from wa.utils.types import list_or_string
class FilePoller(Instrument):
name = 'file_poller'
description = """
Polls the given files at a set sample interval. The values are output in CSV format.
This instrument places a file called poller.csv in each iterations result directory.
This file will contain a timestamp column which will be in uS, the rest of the columns
will be the contents of the polled files at that time.
This instrument will strip any commas or new lines for the files' values
before writing them.
"""
parameters = [
Parameter('sample_interval', kind=int, default=1000,
description="""The interval between samples in mS."""),
Parameter('files', kind=list_or_string, mandatory=True,
description="""A list of paths to the files to be polled"""),
Parameter('labels', kind=list_or_string,
description="""A list of lables to be used in the CSV output for
the corresponding files. This cannot be used if
a `*` wildcard is used in a path."""),
Parameter('as_root', kind=bool, default=False,
description="""
Whether or not the poller will be run as root. This should be
used when the file you need to poll can only be accessed by root.
"""),
]
def validate(self):
if not self.files:
raise ConfigError('You must specify atleast one file to poll')
if self.labels and any(['*' in f for f in self.files]):
raise ConfigError('You cannot used manual labels with `*` wildcards')
def initialize(self, context):
if not self.target.is_rooted and self.as_root:
raise ConfigError('The target is not rooted, cannot run poller as root.')
host_poller = context.resolver.get(Executable(self, self.target.abi,
"poller"))
target_poller = self.target.install(host_poller)
expanded_paths = []
for path in self.files:
if "*" in path:
for p in self.target.list_directory(path):
expanded_paths.append(p)
else:
expanded_paths.append(path)
self.files = expanded_paths
if not self.labels:
self.labels = self._generate_labels()
self.target_output_path = self.target.path.join(self.target.working_directory, 'poller.csv')
self.target_log_path = self.target.path.join(self.target.working_directory, 'poller.log')
self.command = '{} -t {} -l {} {} > {} 2>{}'.format(target_poller,
self.sample_interval * 1000,
','.join(self.labels),
' '.join(self.files),
self.target_output_path,
self.target_log_path)
def start(self, context):
self.target.kick_off(self.command, as_root=self.as_root)
def stop(self, context):
self.target.killall('poller', signal='TERM', as_root=self.as_root)
def update_output(self, context):
host_output_file = os.path.join(context.output_directory, 'poller.csv')
self.target.pull(self.target_output_path, host_output_file)
context.add_artifact('poller_output', host_output_file, kind='data')
host_log_file = os.path.join(context.output_directory, 'poller.log')
self.target.pull(self.target_log_path, host_log_file)
context.add_artifact('poller_log', host_log_file, kind='log')
with open(host_log_file) as fh:
for line in fh:
if 'ERROR' in line:
raise InstrumentError(line.strip())
if 'WARNING' in line:
self.logger.warning(line.strip())
def teardown(self, context):
self.target.remove(self.target_output_path)
self.target.remove(self.target_log_path)
def _generate_labels(self):
# Split paths into their parts
path_parts = [f.split(self.target.path.sep) for f in self.files]
# Identify which parts differ between at least two of the paths
differ_map = [len(set(x)) > 1 for x in zip(*path_parts)]
# compose labels from path parts that differ
labels = []
for pp in path_parts:
label_parts = [p for i, p in enumerate(pp[:-1])
if i >= len(differ_map) or differ_map[i]]
label_parts.append(pp[-1]) # always use file name even if same for all
labels.append('-'.join(label_parts))
return labels

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,163 @@
#include <fcntl.h>
#include <stdio.h>
#include <sys/poll.h>
#include <sys/time.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <stdlib.h>
volatile sig_atomic_t done = 0;
void term(int signum)
{
done = 1;
}
void strip(char *s) {
char *stripped_s = s;
while(*s != '\0') {
if(*s != ',' && *s != '\n') {
*stripped_s++ = *s++;
} else {
++s;
}
}
*stripped_s = '\0';
}
typedef struct {
int fd;
char *path;
} poll_source_t;
int main(int argc, char ** argv) {
extern char *optarg;
extern int optind;
int c = 0;
int show_help = 0;
useconds_t interval = 1000000;
char buf[1024];
memset(buf, 0, sizeof(buf));
struct timeval current_time;
double time_float;
char *labels;
int labelCount = 0;
static char usage[] = "usage: %s [-h] [-t INTERVAL] FILE [FILE ...]\n"
"polls FILE(s) every INTERVAL microseconds and outputs\n"
"the results in CSV format including a timestamp to STDOUT\n"
"\n"
" -h Display this message\n"
" -t The polling sample interval in microseconds\n"
" Defaults to 1000000 (1 second)\n"
" -l Comma separated list of labels to use in the CSV\n"
" output. This should match the number of files\n";
//Handling command line arguments
while ((c = getopt(argc, argv, "ht:l:")) != -1)
{
switch(c) {
case 'h':
case '?':
default:
show_help = 1;
break;
case 't':
interval = (useconds_t)atoi(optarg);
break;
case 'l':
labels = optarg;
labelCount = 1;
int i;
for (i=0; labels[i]; i++)
labelCount += (labels[i] == ',');
}
}
if (show_help) {
fprintf(stderr, usage, argv[0]);
exit(1);
}
if (optind >= argc) {
fprintf(stderr, "ERROR: %s: missing file path(s)\n", argv[0]);
fprintf(stderr, usage, argv[0]);
exit(1);
}
int num_files = argc - optind;
poll_source_t files_to_poll[num_files];
if (labelCount && labelCount != num_files)
{
fprintf(stderr, "ERROR: %s: %d labels specified but %d files specified\n",
argv[0], labelCount, num_files);
fprintf(stderr, usage, argv[0]);
exit(1);
}
//Print headers and open files to poll
printf("time");
if(labelCount)
{
printf(",%s", labels);
}
int i;
for (i = 0; i < num_files; i++)
{
files_to_poll[i].path = argv[optind + i];
files_to_poll[i].fd = open(files_to_poll[i].path, O_RDONLY);
if (files_to_poll[i].fd == -1) {
fprintf(stderr, "ERROR: Could not open \"%s\", got: %s\n",
files_to_poll[i].path, strerror(errno));
exit(2);
}
if(!labelCount) {
printf(",%s", argv[optind + i]);
}
}
printf("\n");
//Setup SIGTERM handler
struct sigaction action;
memset(&action, 0, sizeof(struct sigaction));
action.sa_handler = term;
sigaction(SIGTERM, &action, NULL);
//Poll files
int bytes_read = 0;
while (!done) {
gettimeofday(&current_time, NULL);
time_float = (double)current_time.tv_sec;
time_float += ((double)current_time.tv_usec)/1000/1000;
printf("%f", time_float);
for (i = 0; i < num_files; i++) {
lseek(files_to_poll[i].fd, 0, SEEK_SET);
bytes_read = read(files_to_poll[i].fd, buf, 1024);
if (bytes_read < 0) {
fprintf(stderr, "WARNING: Read nothing from \"%s\"\n",
files_to_poll[i].path);
printf(",");
continue;
}
strip(buf);
printf(",%s", buf);
buf[0] = '\0'; // "Empty" buffer
}
printf("\n");
usleep(interval);
}
//Close files
for (i = 0; i < num_files; i++)
{
close(files_to_poll[i].fd);
}
exit(0);
}

228
wa/instruments/trace-cmd.py Normal file
View File

@@ -0,0 +1,228 @@
# Copyright 2013-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0613,E1101
from __future__ import division
import os
from devlib import FtraceCollector
from wa import Instrument, Parameter
from wa.framework import signal
from wa.framework.instruments import very_slow
from wa.framework.exception import InstrumentError
from wa.utils.types import list_of_strings
from wa.utils.misc import which
OUTPUT_TRACE_FILE = 'trace.dat'
OUTPUT_TEXT_FILE = '{}.txt'.format(os.path.splitext(OUTPUT_TRACE_FILE)[0])
TIMEOUT = 180
class TraceCmdInstrument(Instrument):
name = 'trace-cmd'
description = """
trace-cmd is an instrument which interacts with ftrace Linux kernel internal
tracer
From trace-cmd man page:
trace-cmd command interacts with the ftrace tracer that is built inside the
Linux kernel. It interfaces with the ftrace specific files found in the
debugfs file system under the tracing directory.
trace-cmd reads a list of events it will trace, which can be specified in
the config file as follows ::
trace_events = ['irq*', 'power*']
If no event is specified, a default set of events that are generally considered useful
for debugging/profiling purposes will be enabled.
The list of available events can be obtained by rooting and running the
following command line on the device ::
trace-cmd list
You may also specify ``trace_buffer_size`` setting which must be an integer
that will be used to set the ftrace buffer size. It will be interpreted as
KB::
trace_cmd_buffer_size = 8000
The maximum buffer size varies from device to device, but there is a
maximum and trying to set buffer size beyond that will fail. If you plan
on collecting a lot of trace over long periods of time, the buffer size
will not be enough and you will only get trace for the last portion of your
run. To deal with this you can set the ``trace_mode`` setting to
``'record'`` (the default is ``'start'``)::
trace_cmd_mode = 'record'
This will cause trace-cmd to trace into file(s) on disk, rather than the
buffer, and so the limit for the max size of the trace is set by the
storage available on device. Bear in mind that ``'record'`` mode *is* more
intrusive than the default, so if you do not plan on generating a lot of
trace, it is best to use the default ``'start'`` mode.
.. note:: Mode names correspond to the underlying trace-cmd executable's
command used to implement them. You can find out more about what
is happening in each case from trace-cmd documentation:
https://lwn.net/Articles/341902/.
This instrument comes with an trace-cmd binary that will be copied and used
on the device, however post-processing will be, by default, done on-host and you must
have trace-cmd installed and in your path. On Ubuntu systems, this may be
done with::
sudo apt-get install trace-cmd
Alternatively, you may set ``report_on_target`` parameter to ``True`` to enable on-target
processing (this is useful when running on non-Linux hosts, but is likely to take longer
and may fail on particularly resource-constrained targets).
"""
parameters = [
Parameter('events', kind=list_of_strings,
default=['sched*', 'irq*', 'power*', 'thermal*'],
global_alias='trace_events',
description="""
Specifies the list of events to be traced. Each event in the
list will be passed to trace-cmd with -e parameter and must
be in the format accepted by trace-cmd.
"""),
Parameter('functions', kind=list_of_strings,
global_alias='trace_functions',
description="""
Specifies the list of functions to be traced.
"""),
Parameter('buffer_size', kind=int, default=None,
global_alias='trace_buffer_size',
description="""
Attempt to set ftrace buffer size to the specified value (in
KB). Default buffer size may need to be increased for
long-running workloads, or if a large number of events have
been enabled. Note: there is a maximum size that the buffer
can be set, and that varies from device to device. Attempting
to set buffer size higher than this will fail. In that case,
this instrument will set the size to the highest possible
value by going down from the specified size in
``buffer_size_step`` intervals.
"""),
Parameter('buffer_size_step', kind=int, default=1000,
global_alias='trace_buffer_size_step',
description="""
Defines the decremental step used if the specified
``buffer_size`` could not be set. This will be subtracted
form the buffer size until set succeeds or size is reduced to
1MB.
"""),
Parameter('report', kind=bool, default=True,
description="""
Specifies whether reporting should be performed once the
binary trace has been generated.
"""),
Parameter('no_install', kind=bool, default=False,
description="""
Do not install the bundled trace-cmd and use the one on the
device instead. If there is not already a trace-cmd on the
device, an error is raised.
"""),
Parameter('report_on_target', kind=bool, default=False,
description="""
When enabled generation of reports will be done host-side
because the generated file is very large. If trace-cmd is not
available on the host device this setting can be disabled and
the report will be generated on the target device.
.. note:: This requires the latest version of trace-cmd to be
installed on the host (the one in your
distribution's repos may be too old).
"""),
]
def __init__(self, target, **kwargs):
super(TraceCmdInstrument, self).__init__(target, **kwargs)
self.collector = None
def initialize(self, context):
if not self.target.is_rooted:
raise InstrumentError('trace-cmd instrument cannot be used on an unrooted device.')
collector_params = dict(
events=self.events,
functions=self.functions,
buffer_size=self.buffer_size,
buffer_size_step=1000,
automark=False,
autoreport=True,
autoview=False,
no_install=self.no_install,
strict=False,
report_on_target=False,
)
if self.report and self.report_on_target:
collector_params['autoreport'] = True
collector_params['report_on_target'] = True
else:
collector_params['autoreport'] = False
collector_params['report_on_target'] = False
self.collector = FtraceCollector(self.target, **collector_params)
# Register ourselves as absolute last event before and
# first after so we can mark the trace at the right time
signal.connect(self.mark_start, signal.BEFORE_WORKLOAD_EXECUTION, priority=11)
signal.connect(self.mark_stop, signal.AFTER_WORKLOAD_EXECUTION, priority=11)
def setup(self, context):
self.collector.reset()
@very_slow
def start(self, context):
self.collector.start()
@very_slow
def stop(self, context):
self.collector.stop()
def update_output(self, context): # NOQA pylint: disable=R0912
outfile = os.path.join(context.output_directory, 'trace.dat')
self.collector.get_trace(outfile)
context.add_artifact('trace-cmd-bin', outfile, 'data')
if self.report:
if not self.report_on_target:
textfile = os.path.join(context.output_directory, 'trace.txt')
self.collector.report(outfile, textfile)
context.add_artifact('trace-cmd-txt', textfile, 'export')
def teardown(self, context):
path = self.target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
self.target.remove(path)
if self.report_on_target:
path = self.target.path.join(self.target.working_directory, OUTPUT_TEXT_FILE)
self.target.remove(path)
def validate(self):
if self.report and not self.report_on_target and not which('trace-cmd'):
raise InstrumentError('trace-cmd is not in PATH; is it installed?')
def mark_start(self, context):
self.collector.mark_start()
def mark_stop(self, context):
self.collector.mark_stop()