1
0
mirror of https://github.com/ARM-software/devlib.git synced 2025-09-02 18:11:53 +01:00

devlib/trace: Refactor trace to be collector

We now have have multiple `trace` instruments that do not match the
description and therefore are moved to have a more suitably named
hierarchy.
This commit is contained in:
Marc Bonnici
2019-11-07 11:18:18 +00:00
parent baa7ad1650
commit 19887de71e
9 changed files with 23 additions and 23 deletions

View File

@@ -0,0 +1,43 @@
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
class CollectorBase(object):
def __init__(self, target):
self.target = target
self.logger = logging.getLogger(self.__class__.__name__)
def reset(self):
pass
def start(self):
pass
def stop(self):
pass
def __enter__(self):
self.reset()
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def get_trace(self, outfile):
pass

200
devlib/collector/dmesg.py Normal file
View File

@@ -0,0 +1,200 @@
# Copyright 2019 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import re
from itertools import takewhile
from datetime import timedelta
from devlib.collector import CollectorBase
class KernelLogEntry(object):
"""
Entry of the kernel ring buffer.
:param facility: facility the entry comes from
:type facility: str
:param level: log level
:type level: str
:param timestamp: Timestamp of the entry
:type timestamp: datetime.timedelta
:param msg: Content of the entry
:type msg: str
"""
_TIMESTAMP_MSG_REGEX = re.compile(r'\[(.*?)\] (.*)')
_RAW_LEVEL_REGEX = re.compile(r'<([0-9]+)>(.*)')
_PRETTY_LEVEL_REGEX = re.compile(r'\s*([a-z]+)\s*:([a-z]+)\s*:\s*(.*)')
def __init__(self, facility, level, timestamp, msg):
self.facility = facility
self.level = level
self.timestamp = timestamp
self.msg = msg
@classmethod
def from_str(cls, line):
"""
Parses a "dmesg --decode" output line, formatted as following:
kern :err : [3618282.310743] nouveau 0000:01:00.0: systemd-logind[988]: nv50cal_space: -16
Or the more basic output given by "dmesg -r":
<3>[3618282.310743] nouveau 0000:01:00.0: systemd-logind[988]: nv50cal_space: -16
"""
def parse_raw_level(line):
match = cls._RAW_LEVEL_REGEX.match(line)
if not match:
raise ValueError('dmesg entry format not recognized: {}'.format(line))
level, remainder = match.groups()
levels = DmesgCollector.LOG_LEVELS
# BusyBox dmesg can output numbers that need to wrap around
level = levels[int(level) % len(levels)]
return level, remainder
def parse_pretty_level(line):
match = cls._PRETTY_LEVEL_REGEX.match(line)
facility, level, remainder = match.groups()
return facility, level, remainder
def parse_timestamp_msg(line):
match = cls._TIMESTAMP_MSG_REGEX.match(line)
timestamp, msg = match.groups()
timestamp = timedelta(seconds=float(timestamp.strip()))
return timestamp, msg
line = line.strip()
# If we can parse the raw prio directly, that is a basic line
try:
level, remainder = parse_raw_level(line)
facility = None
except ValueError:
facility, level, remainder = parse_pretty_level(line)
timestamp, msg = parse_timestamp_msg(remainder)
return cls(
facility=facility,
level=level,
timestamp=timestamp,
msg=msg.strip(),
)
@classmethod
def from_dmesg_output(cls, dmesg_out):
"""
Return a generator of :class:`KernelLogEntry` for each line of the
output of dmesg command.
.. note:: The same restrictions on the dmesg output format as for
:meth:`from_str` apply.
"""
for line in dmesg_out.splitlines():
if line.strip():
yield cls.from_str(line)
def __str__(self):
facility = self.facility + ': ' if self.facility else ''
return '{facility}{level}: [{timestamp}] {msg}'.format(
facility=facility,
level=self.level,
timestamp=self.timestamp.total_seconds(),
msg=self.msg,
)
class DmesgCollector(CollectorBase):
"""
Dmesg output collector.
:param level: Minimum log level to enable. All levels that are more
critical will be collected as well.
:type level: str
:param facility: Facility to record, see dmesg --help for the list.
:type level: str
.. warning:: If BusyBox dmesg is used, facility and level will be ignored,
and the parsed entries will also lack that information.
"""
# taken from "dmesg --help"
# This list needs to be ordered by priority
LOG_LEVELS = [
"emerg", # system is unusable
"alert", # action must be taken immediately
"crit", # critical conditions
"err", # error conditions
"warn", # warning conditions
"notice", # normal but significant condition
"info", # informational
"debug", # debug-level messages
]
def __init__(self, target, level=LOG_LEVELS[-1], facility='kern'):
super(DmesgCollector, self).__init__(target)
if level not in self.LOG_LEVELS:
raise ValueError('level needs to be one of: {}'.format(
', '.join(self.LOG_LEVELS)
))
self.level = level
# Check if dmesg is the BusyBox one, or the one from util-linux in a
# recent version.
# Note: BusyBox dmesg does not support -h, but will still print the
# help with an exit code of 1
self.basic_dmesg = '--force-prefix' not in \
self.target.execute('dmesg -h', check_exit_code=False)
self.facility = facility
self.reset()
@property
def entries(self):
return KernelLogEntry.from_dmesg_output(self.dmesg_out)
def reset(self):
self.dmesg_out = None
def start(self):
self.reset()
# Empty the dmesg ring buffer
self.target.execute('dmesg -c', as_root=True)
def stop(self):
levels_list = list(takewhile(
lambda level: level != self.level,
self.LOG_LEVELS
))
levels_list.append(self.level)
if self.basic_dmesg:
cmd = 'dmesg -r'
else:
cmd = 'dmesg --facility={facility} --force-prefix --decode --level={levels}'.format(
levels=','.join(levels_list),
facility=self.facility,
)
self.dmesg_out = self.target.execute(cmd)
def get_trace(self, outfile):
with open(outfile, 'wt') as f:
f.write(self.dmesg_out + '\n')

453
devlib/collector/ftrace.py Normal file
View File

@@ -0,0 +1,453 @@
# Copyright 2015-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import os
import json
import time
import re
import subprocess
import sys
import contextlib
from pipes import quote
from devlib.collector import CollectorBase
from devlib.host import PACKAGE_BIN_DIRECTORY
from devlib.exception import TargetStableError, HostError
from devlib.utils.misc import check_output, which, memoized
TRACE_MARKER_START = 'TRACE_MARKER_START'
TRACE_MARKER_STOP = 'TRACE_MARKER_STOP'
OUTPUT_TRACE_FILE = 'trace.dat'
OUTPUT_PROFILE_FILE = 'trace_stat.dat'
DEFAULT_EVENTS = [
'cpu_frequency',
'cpu_idle',
'sched_migrate_task',
'sched_process_exec',
'sched_process_fork',
'sched_stat_iowait',
'sched_switch',
'sched_wakeup',
'sched_wakeup_new',
]
TIMEOUT = 180
# Regexps for parsing of function profiling data
CPU_RE = re.compile(r' Function \(CPU([0-9]+)\)')
STATS_RE = re.compile(r'([^ ]*) +([0-9]+) +([0-9.]+) us +([0-9.]+) us +([0-9.]+) us')
class FtraceCollector(CollectorBase):
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
def __init__(self, target,
events=None,
functions=None,
tracer=None,
trace_children_functions=False,
buffer_size=None,
buffer_size_step=1000,
tracing_path='/sys/kernel/debug/tracing',
automark=True,
autoreport=True,
autoview=False,
no_install=False,
strict=False,
report_on_target=False,
trace_clock='local',
saved_cmdlines_nr=4096,
):
super(FtraceCollector, self).__init__(target)
self.events = events if events is not None else DEFAULT_EVENTS
self.functions = functions
self.tracer = tracer
self.trace_children_functions = trace_children_functions
self.buffer_size = buffer_size
self.buffer_size_step = buffer_size_step
self.tracing_path = tracing_path
self.automark = automark
self.autoreport = autoreport
self.autoview = autoview
self.strict = strict
self.report_on_target = report_on_target
self.target_output_file = target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
text_file_name = target.path.splitext(OUTPUT_TRACE_FILE)[0] + '.txt'
self.target_text_file = target.path.join(self.target.working_directory, text_file_name)
self.target_binary = None
self.host_binary = None
self.start_time = None
self.stop_time = None
self.event_string = None
self.function_string = None
self.trace_clock = trace_clock
self.saved_cmdlines_nr = saved_cmdlines_nr
self._reset_needed = True
# pylint: disable=bad-whitespace
# Setup tracing paths
self.available_events_file = self.target.path.join(self.tracing_path, 'available_events')
self.available_functions_file = self.target.path.join(self.tracing_path, 'available_filter_functions')
self.buffer_size_file = self.target.path.join(self.tracing_path, 'buffer_size_kb')
self.current_tracer_file = self.target.path.join(self.tracing_path, 'current_tracer')
self.function_profile_file = self.target.path.join(self.tracing_path, 'function_profile_enabled')
self.marker_file = self.target.path.join(self.tracing_path, 'trace_marker')
self.ftrace_filter_file = self.target.path.join(self.tracing_path, 'set_ftrace_filter')
self.trace_clock_file = self.target.path.join(self.tracing_path, 'trace_clock')
self.save_cmdlines_size_file = self.target.path.join(self.tracing_path, 'saved_cmdlines_size')
self.available_tracers_file = self.target.path.join(self.tracing_path, 'available_tracers')
self.host_binary = which('trace-cmd')
self.kernelshark = which('kernelshark')
if not self.target.is_rooted:
raise TargetStableError('trace-cmd instrument cannot be used on an unrooted device.')
if self.autoreport and not self.report_on_target and self.host_binary is None:
raise HostError('trace-cmd binary must be installed on the host if autoreport=True.')
if self.autoview and self.kernelshark is None:
raise HostError('kernelshark binary must be installed on the host if autoview=True.')
if not no_install:
host_file = os.path.join(PACKAGE_BIN_DIRECTORY, self.target.abi, 'trace-cmd')
self.target_binary = self.target.install(host_file)
else:
if not self.target.is_installed('trace-cmd'):
raise TargetStableError('No trace-cmd found on device and no_install=True is specified.')
self.target_binary = 'trace-cmd'
# Validate required events to be traced
def event_to_regex(event):
if not event.startswith('*'):
event = '*' + event
return re.compile(event.replace('*', '.*'))
def event_is_in_list(event, events):
return any(
event_to_regex(event).match(_event)
for _event in events
)
unavailable_events = [
event
for event in self.events
if not event_is_in_list(event, self.available_events)
]
if unavailable_events:
message = 'Events not available for tracing: {}'.format(
', '.join(unavailable_events)
)
if self.strict:
raise TargetStableError(message)
else:
self.target.logger.warning(message)
selected_events = sorted(set(self.events) - set(unavailable_events))
if self.tracer and self.tracer not in self.available_tracers:
raise TargetStableError('Unsupported tracer "{}". Available tracers: {}'.format(
self.tracer, ', '.join(self.available_tracers)))
# Check for function tracing support
if self.functions:
# Validate required functions to be traced
selected_functions = []
for function in self.functions:
if function not in self.available_functions:
message = 'Function [{}] not available for tracing/profiling'.format(function)
if self.strict:
raise TargetStableError(message)
self.target.logger.warning(message)
else:
selected_functions.append(function)
# Function profiling
if self.tracer is None:
if not self.target.file_exists(self.function_profile_file):
raise TargetStableError('Function profiling not supported. '\
'A kernel build with CONFIG_FUNCTION_PROFILER enable is required')
self.function_string = _build_trace_functions(selected_functions)
# If function profiling is enabled we always need at least one event.
# Thus, if not other events have been specified, try to add at least
# a tracepoint which is always available and possibly triggered few
# times.
if not selected_events:
selected_events = ['sched_wakeup_new']
# Function tracing
elif self.tracer == 'function':
self.function_string = _build_graph_functions(selected_functions, False)
# Function graphing
elif self.tracer == 'function_graph':
self.function_string = _build_graph_functions(selected_functions, trace_children_functions)
self.event_string = _build_trace_events(selected_events)
@property
@memoized
def available_tracers(self):
"""
List of ftrace tracers supported by the target's kernel.
"""
return self.target.read_value(self.available_tracers_file).split(' ')
@property
@memoized
def available_events(self):
"""
List of ftrace events supported by the target's kernel.
"""
return self.target.read_value(self.available_events_file).splitlines()
@property
@memoized
def available_functions(self):
"""
List of functions whose tracing/profiling is supported by the target's kernel.
"""
return self.target.read_value(self.available_functions_file).splitlines()
def reset(self):
if self.buffer_size:
self._set_buffer_size()
self.target.execute('{} reset'.format(self.target_binary),
as_root=True, timeout=TIMEOUT)
self._reset_needed = False
def start(self):
self.start_time = time.time()
if self._reset_needed:
self.reset()
if self.tracer is not None and 'function' in self.tracer:
tracecmd_functions = self.function_string
else:
tracecmd_functions = ''
tracer_string = '-p {}'.format(self.tracer) if self.tracer else ''
# Ensure kallsyms contains addresses if possible, so that function the
# collected trace contains enough data for pretty printing
with contextlib.suppress(TargetStableError):
self.target.write_value('/proc/sys/kernel/kptr_restrict', 0)
self.target.write_value(self.trace_clock_file, self.trace_clock, verify=False)
try:
self.target.write_value(self.save_cmdlines_size_file, self.saved_cmdlines_nr)
except TargetStableError as e:
message = 'Could not set "save_cmdlines_size"'
if self.strict:
self.logger.error(message)
raise e
else:
self.logger.warning(message)
self.logger.debug(e)
self.target.execute(
'{} start {events} {tracer} {functions}'.format(
self.target_binary,
events=self.event_string,
tracer=tracer_string,
functions=tracecmd_functions,
),
as_root=True,
)
if self.automark:
self.mark_start()
if 'cpufreq' in self.target.modules:
self.logger.debug('Trace CPUFreq frequencies')
self.target.cpufreq.trace_frequencies()
if 'cpuidle' in self.target.modules:
self.logger.debug('Trace CPUIdle states')
self.target.cpuidle.perturb_cpus()
# Enable kernel function profiling
if self.functions and self.tracer is None:
self.target.execute('echo nop > {}'.format(self.current_tracer_file),
as_root=True)
self.target.execute('echo 0 > {}'.format(self.function_profile_file),
as_root=True)
self.target.execute('echo {} > {}'.format(self.function_string, self.ftrace_filter_file),
as_root=True)
self.target.execute('echo 1 > {}'.format(self.function_profile_file),
as_root=True)
def stop(self):
# Disable kernel function profiling
if self.functions and self.tracer is None:
self.target.execute('echo 1 > {}'.format(self.function_profile_file),
as_root=True)
if 'cpufreq' in self.target.modules:
self.logger.debug('Trace CPUFreq frequencies')
self.target.cpufreq.trace_frequencies()
self.stop_time = time.time()
if self.automark:
self.mark_stop()
self.target.execute('{} stop'.format(self.target_binary),
timeout=TIMEOUT, as_root=True)
self._reset_needed = True
def get_trace(self, outfile):
if os.path.isdir(outfile):
outfile = os.path.join(outfile, os.path.basename(self.target_output_file))
self.target.execute('{0} extract -o {1}; chmod 666 {1}'.format(self.target_binary,
self.target_output_file),
timeout=TIMEOUT, as_root=True)
# The size of trace.dat will depend on how long trace-cmd was running.
# Therefore timout for the pull command must also be adjusted
# accordingly.
pull_timeout = 10 * (self.stop_time - self.start_time)
self.target.pull(self.target_output_file, outfile, timeout=pull_timeout)
if not os.path.isfile(outfile):
self.logger.warning('Binary trace not pulled from device.')
else:
if self.autoreport:
textfile = os.path.splitext(outfile)[0] + '.txt'
if self.report_on_target:
self.generate_report_on_target()
self.target.pull(self.target_text_file,
textfile, timeout=pull_timeout)
else:
self.report(outfile, textfile)
if self.autoview:
self.view(outfile)
def get_stats(self, outfile):
if not (self.functions and self.tracer is None):
return
if os.path.isdir(outfile):
outfile = os.path.join(outfile, OUTPUT_PROFILE_FILE)
# pylint: disable=protected-access
output = self.target._execute_util('ftrace_get_function_stats',
as_root=True)
function_stats = {}
for line in output.splitlines():
# Match a new CPU dataset
match = CPU_RE.search(line)
if match:
cpu_id = int(match.group(1))
function_stats[cpu_id] = {}
self.logger.debug("Processing stats for CPU%d...", cpu_id)
continue
# Match a new function dataset
match = STATS_RE.search(line)
if match:
fname = match.group(1)
function_stats[cpu_id][fname] = {
'hits' : int(match.group(2)),
'time' : float(match.group(3)),
'avg' : float(match.group(4)),
's_2' : float(match.group(5)),
}
self.logger.debug(" %s: %s",
fname, function_stats[cpu_id][fname])
self.logger.debug("FTrace stats output [%s]...", outfile)
with open(outfile, 'w') as fh:
json.dump(function_stats, fh, indent=4)
self.logger.debug("FTrace function stats save in [%s]", outfile)
return function_stats
def report(self, binfile, destfile):
# To get the output of trace.dat, trace-cmd must be installed
# This is done host-side because the generated file is very large
try:
command = '{} report {} > {}'.format(self.host_binary, binfile, destfile)
self.logger.debug(command)
process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
_, error = process.communicate()
if sys.version_info[0] == 3:
error = error.decode(sys.stdout.encoding or 'utf-8', 'replace')
if process.returncode:
raise TargetStableError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
if error:
# logged at debug level, as trace-cmd always outputs some
# errors that seem benign.
self.logger.debug(error)
if os.path.isfile(destfile):
self.logger.debug('Verifying traces.')
with open(destfile) as fh:
for line in fh:
if 'EVENTS DROPPED' in line:
self.logger.warning('Dropped events detected.')
break
else:
self.logger.debug('Trace verified.')
else:
self.logger.warning('Could not generate trace.txt.')
except OSError:
raise HostError('Could not find trace-cmd. Please make sure it is installed and is in PATH.')
def generate_report_on_target(self):
command = '{} report {} > {}'.format(self.target_binary,
self.target_output_file,
self.target_text_file)
self.target.execute(command, timeout=TIMEOUT)
def view(self, binfile):
check_output('{} {}'.format(self.kernelshark, binfile), shell=True)
def teardown(self):
self.target.remove(self.target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE))
def mark_start(self):
self.target.write_value(self.marker_file, TRACE_MARKER_START, verify=False)
def mark_stop(self):
self.target.write_value(self.marker_file, TRACE_MARKER_STOP, verify=False)
def _set_buffer_size(self):
target_buffer_size = self.buffer_size
attempt_buffer_size = target_buffer_size
buffer_size = 0
floor = 1000 if target_buffer_size > 1000 else target_buffer_size
while attempt_buffer_size >= floor:
self.target.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.target.read_int(self.buffer_size_file)
if buffer_size == attempt_buffer_size:
break
else:
attempt_buffer_size -= self.buffer_size_step
if buffer_size == target_buffer_size:
return
while attempt_buffer_size < target_buffer_size:
attempt_buffer_size += self.buffer_size_step
self.target.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.target.read_int(self.buffer_size_file)
if attempt_buffer_size != buffer_size:
message = 'Failed to set trace buffer size to {}, value set was {}'
self.logger.warning(message.format(target_buffer_size, buffer_size))
break
def _build_trace_events(events):
event_string = ' '.join(['-e {}'.format(e) for e in events])
return event_string
def _build_trace_functions(functions):
function_string = " ".join(functions)
return function_string
def _build_graph_functions(functions, trace_children_functions):
opt = 'g' if trace_children_functions else 'l'
return ' '.join(
'-{} {}'.format(opt, quote(f))
for f in functions
)

View File

@@ -0,0 +1,73 @@
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
from devlib.collector import CollectorBase
from devlib.utils.android import LogcatMonitor
class LogcatCollector(CollectorBase):
def __init__(self, target, regexps=None):
super(LogcatCollector, self).__init__(target)
self.regexps = regexps
self._collecting = False
self._prev_log = None
self._monitor = None
def reset(self):
"""
Clear Collector data but do not interrupt collection
"""
if not self._monitor:
return
if self._collecting:
self._monitor.clear_log()
elif self._prev_log:
os.remove(self._prev_log)
self._prev_log = None
def start(self):
"""
Start collecting logcat lines
"""
self._monitor = LogcatMonitor(self.target, self.regexps)
if self._prev_log:
# Append new data collection to previous collection
self._monitor.start(self._prev_log)
else:
self._monitor.start()
self._collecting = True
def stop(self):
"""
Stop collecting logcat lines
"""
if not self._collecting:
raise RuntimeError('Logcat monitor not running, nothing to stop')
self._monitor.stop()
self._collecting = False
self._prev_log = self._monitor.logfile
def get_trace(self, outfile):
"""
Output collected logcat lines to designated file
"""
# copy self._monitor.logfile to outfile
shutil.copy(self._monitor.logfile, outfile)

240
devlib/collector/perf.py Normal file
View File

@@ -0,0 +1,240 @@
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import time
from past.builtins import basestring, zip
from devlib.host import PACKAGE_BIN_DIRECTORY
from devlib.collector import CollectorBase
from devlib.utils.misc import ensure_file_directory_exists as _f
PERF_COMMAND_TEMPLATE = '{binary} {command} {options} {events} sleep 1000 > {outfile} 2>&1 '
PERF_REPORT_COMMAND_TEMPLATE= '{binary} report {options} -i {datafile} > {outfile} 2>&1 '
PERF_RECORD_COMMAND_TEMPLATE= '{binary} record {options} {events} -o {outfile}'
PERF_DEFAULT_EVENTS = [
'cpu-migrations',
'context-switches',
]
SIMPLEPERF_DEFAULT_EVENTS = [
'raw-cpu-cycles',
'raw-l1-dcache',
'raw-l1-dcache-refill',
'raw-br-mis-pred',
'raw-instruction-retired',
]
DEFAULT_EVENTS = {'perf':PERF_DEFAULT_EVENTS, 'simpleperf':SIMPLEPERF_DEFAULT_EVENTS}
class PerfCollector(CollectorBase):
"""
Perf is a Linux profiling with performance counters.
Simpleperf is an Android profiling tool with performance counters.
It is highly recomended to use perf_type = simpleperf when using this instrument
on android devices, since it recognises android symbols in record mode and is much more stable
when reporting record .data files. For more information see simpleperf documentation at:
https://android.googlesource.com/platform/system/extras/+/master/simpleperf/doc/README.md
Performance counters are CPU hardware registers that count hardware events
such as instructions executed, cache-misses suffered, or branches
mispredicted. They form a basis for profiling applications to trace dynamic
control flow and identify hotspots.
pref accepts options and events. If no option is given the default '-a' is
used. For events, the default events are migrations and cs for perf and raw-cpu-cycles,
raw-l1-dcache, raw-l1-dcache-refill, raw-instructions-retired. They both can
be specified in the config file.
Events must be provided as a list that contains them and they will look like
this ::
perf_events = ['migrations', 'cs']
Events can be obtained by typing the following in the command line on the
device ::
perf list
simpleperf list
Whereas options, they can be provided as a single string as following ::
perf_options = '-a -i'
Options can be obtained by running the following in the command line ::
man perf-stat
"""
def __init__(self,
target,
perf_type='perf',
command='stat',
events=None,
optionstring=None,
report_options=None,
labels=None,
force_install=False):
super(PerfCollector, self).__init__(target)
self.force_install = force_install
self.labels = labels
self.report_options = report_options
# Validate parameters
if isinstance(optionstring, list):
self.optionstrings = optionstring
else:
self.optionstrings = [optionstring]
if perf_type in ['perf', 'simpleperf']:
self.perf_type = perf_type
else:
raise ValueError('Invalid perf type: {}, must be perf or simpleperf'.format(perf_type))
if not events:
self.events = DEFAULT_EVENTS[self.perf_type]
else:
self.events = events
if isinstance(self.events, basestring):
self.events = [self.events]
if not self.labels:
self.labels = ['perf_{}'.format(i) for i in range(len(self.optionstrings))]
if len(self.labels) != len(self.optionstrings):
raise ValueError('The number of labels must match the number of optstrings provided for perf.')
if command in ['stat', 'record']:
self.command = command
else:
raise ValueError('Unsupported perf command, must be stat or record')
self.binary = self.target.get_installed(self.perf_type)
if self.force_install or not self.binary:
self.binary = self._deploy_perf()
self._validate_events(self.events)
self.commands = self._build_commands()
def reset(self):
self.target.killall(self.perf_type, as_root=self.target.is_rooted)
self.target.remove(self.target.get_workpath('TemporaryFile*'))
for label in self.labels:
filepath = self._get_target_file(label, 'data')
self.target.remove(filepath)
filepath = self._get_target_file(label, 'rpt')
self.target.remove(filepath)
def start(self):
for command in self.commands:
self.target.kick_off(command)
def stop(self):
self.target.killall(self.perf_type, signal='SIGINT',
as_root=self.target.is_rooted)
# perf doesn't transmit the signal to its sleep call so handled here:
self.target.killall('sleep', as_root=self.target.is_rooted)
# NB: we hope that no other "important" sleep is on-going
# pylint: disable=arguments-differ
def get_trace(self, outdir):
for label in self.labels:
if self.command == 'record':
self._wait_for_data_file_write(label, outdir)
self._pull_target_file_to_host(label, 'rpt', outdir)
else:
self._pull_target_file_to_host(label, 'out', outdir)
def _deploy_perf(self):
host_executable = os.path.join(PACKAGE_BIN_DIRECTORY,
self.target.abi, self.perf_type)
return self.target.install(host_executable)
def _get_target_file(self, label, extension):
return self.target.get_workpath('{}.{}'.format(label, extension))
def _build_commands(self):
commands = []
for opts, label in zip(self.optionstrings, self.labels):
if self.command == 'stat':
commands.append(self._build_perf_stat_command(opts, self.events, label))
else:
commands.append(self._build_perf_record_command(opts, label))
return commands
def _build_perf_stat_command(self, options, events, label):
event_string = ' '.join(['-e {}'.format(e) for e in events])
command = PERF_COMMAND_TEMPLATE.format(binary = self.binary,
command = self.command,
options = options or '',
events = event_string,
outfile = self._get_target_file(label, 'out'))
return command
def _build_perf_report_command(self, report_options, label):
command = PERF_REPORT_COMMAND_TEMPLATE.format(binary=self.binary,
options=report_options or '',
datafile=self._get_target_file(label, 'data'),
outfile=self._get_target_file(label, 'rpt'))
return command
def _build_perf_record_command(self, options, label):
event_string = ' '.join(['-e {}'.format(e) for e in self.events])
command = PERF_RECORD_COMMAND_TEMPLATE.format(binary=self.binary,
options=options or '',
events=event_string,
outfile=self._get_target_file(label, 'data'))
return command
def _pull_target_file_to_host(self, label, extension, outdir):
target_file = self._get_target_file(label, extension)
host_relpath = os.path.basename(target_file)
host_file = _f(os.path.join(outdir, host_relpath))
self.target.pull(target_file, host_file)
def _wait_for_data_file_write(self, label, outdir):
data_file_finished_writing = False
max_tries = 80
current_tries = 0
while not data_file_finished_writing:
files = self.target.execute('cd {} && ls'.format(self.target.get_workpath('')))
# Perf stores data in tempory files whilst writing to data output file. Check if they have been removed.
if 'TemporaryFile' in files and current_tries <= max_tries:
time.sleep(0.25)
current_tries += 1
else:
if current_tries >= max_tries:
self.logger.warning('''writing {}.data file took longer than expected,
file may not have written correctly'''.format(label))
data_file_finished_writing = True
report_command = self._build_perf_report_command(self.report_options, label)
self.target.execute(report_command)
def _validate_events(self, events):
available_events_string = self.target.execute('{} list'.format(self.perf_type))
available_events = available_events_string.splitlines()
for available_event in available_events:
if available_event == '':
continue
if 'OR' in available_event:
available_events.append(available_event.split('OR')[1])
available_events[available_events.index(available_event)] = available_event.split()[0].strip()
# Raw hex event codes can also be passed in that do not appear on perf/simpleperf list, prefixed with 'r'
raw_event_code_regex = re.compile(r"^r(0x|0X)?[A-Fa-f0-9]+$")
for event in events:
if event in available_events or re.match(raw_event_code_regex, event):
continue
else:
raise ValueError('Event: {} is not in available event list for {}'.format(event, self.perf_type))

View File

@@ -0,0 +1,98 @@
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import sys
import threading
import time
from devlib.collector import BaseCollector
from devlib.exception import WorkerThreadError
class ScreenCapturePoller(threading.Thread):
def __init__(self, target, period, output_path=None, timeout=30):
super(ScreenCapturePoller, self).__init__()
self.target = target
self.logger = logging.getLogger('screencapture')
self.period = period
self.timeout = timeout
self.stop_signal = threading.Event()
self.lock = threading.Lock()
self.last_poll = 0
self.daemon = True
self.exc = None
self.output_path = output_path
def run(self):
self.logger.debug('Starting screen capture polling')
try:
while True:
if self.stop_signal.is_set():
break
with self.lock:
current_time = time.time()
if (current_time - self.last_poll) >= self.period:
self.poll()
time.sleep(0.5)
except Exception: # pylint: disable=W0703
self.exc = WorkerThreadError(self.name, sys.exc_info())
def stop(self):
self.logger.debug('Stopping screen capture polling')
self.stop_signal.set()
self.join(self.timeout)
if self.is_alive():
self.logger.error('Could not join screen capture poller thread.')
if self.exc:
raise self.exc # pylint: disable=E0702
def poll(self):
self.last_poll = time.time()
self.target.capture_screen(os.path.join(self.output_path, "screencap_{ts}.png"))
class ScreenCaptureCollector(BaseCollector):
def __init__(self, target, output_path=None, period=None):
super(ScreenCaptureCollector, self).__init__(target)
self._collecting = False
self.output_path = output_path
self.period = period
self.target = target
self._poller = ScreenCapturePoller(self.target, self.period,
self.output_path)
def reset(self):
pass
def start(self):
"""
Start collecting the screenshots
"""
self._poller.start()
self._collecting = True
def stop(self):
"""
Stop collecting the screenshots
"""
if not self._collecting:
raise RuntimeError('Screen capture collector is not running, nothing to stop')
self._poller.stop()
self._collecting = False

View File

@@ -0,0 +1,94 @@
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
from tempfile import NamedTemporaryFile
from pexpect.exceptions import TIMEOUT
from devlib.collector import CollectorBase
from devlib.utils.serial_port import get_connection
class SerialTraceCollector(CollectorBase):
@property
def collecting(self):
return self._collecting
def __init__(self, target, serial_port, baudrate, timeout=20):
super(SerialTraceCollector, self).__init__(target)
self.serial_port = serial_port
self.baudrate = baudrate
self.timeout = timeout
self._serial_target = None
self._conn = None
self._tmpfile = None
self._collecting = False
def reset(self):
if self._collecting:
raise RuntimeError("reset was called whilst collecting")
if self._tmpfile:
self._tmpfile.close()
self._tmpfile = None
def start(self):
if self._collecting:
raise RuntimeError("start was called whilst collecting")
self._tmpfile = NamedTemporaryFile()
start_marker = "-------- Starting serial logging --------\n"
self._tmpfile.write(start_marker.encode('utf-8'))
self._serial_target, self._conn = get_connection(port=self.serial_port,
baudrate=self.baudrate,
timeout=self.timeout,
logfile=self._tmpfile,
init_dtr=0)
self._collecting = True
def stop(self):
if not self._collecting:
raise RuntimeError("stop was called whilst not collecting")
# We expect the below to fail, but we need to get pexpect to
# do something so that it interacts with the serial device,
# and hence updates the logfile.
try:
self._serial_target.expect(".", timeout=1)
except TIMEOUT:
pass
self._serial_target.close()
del self._conn
stop_marker = "-------- Stopping serial logging --------\n"
self._tmpfile.write(stop_marker.encode('utf-8'))
self._collecting = False
def get_trace(self, outfile):
if self._collecting:
raise RuntimeError("get_trace was called whilst collecting")
self._tmpfile.flush()
shutil.copy(self._tmpfile.name, outfile)
self._tmpfile.close()
self._tmpfile = None

View File

@@ -0,0 +1,161 @@
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
from shutil import copyfile
from tempfile import NamedTemporaryFile
from devlib.exception import TargetStableError, HostError
from devlib.collector import CollectorBase
import devlib.utils.android
from devlib.utils.misc import memoized
DEFAULT_CATEGORIES = [
'gfx',
'view',
'sched',
'freq',
'idle'
]
class SystraceCollector(CollectorBase):
"""
A trace collector based on Systrace
For more details, see https://developer.android.com/studio/command-line/systrace
:param target: Devlib target
:type target: AndroidTarget
:param outdir: Working directory to use on the host
:type outdir: str
:param categories: Systrace categories to trace. See `available_categories`
:type categories: list(str)
:param buffer_size: Buffer size in kb
:type buffer_size: int
:param strict: Raise an exception if any of the requested categories
are not available
:type strict: bool
"""
@property
@memoized
def available_categories(self):
lines = subprocess.check_output(
[self.systrace_binary, '-l'], universal_newlines=True
).splitlines()
return [line.split()[0] for line in lines if line]
def __init__(self, target,
categories=None,
buffer_size=None,
strict=False):
super(SystraceCollector, self).__init__(target)
self.categories = categories or DEFAULT_CATEGORIES
self.buffer_size = buffer_size
self._systrace_process = None
self._tmpfile = None
# Try to find a systrace binary
self.systrace_binary = None
platform_tools = devlib.utils.android.platform_tools
systrace_binary_path = os.path.join(platform_tools, 'systrace', 'systrace.py')
if not os.path.isfile(systrace_binary_path):
raise HostError('Could not find any systrace binary under {}'.format(platform_tools))
self.systrace_binary = systrace_binary_path
# Filter the requested categories
for category in self.categories:
if category not in self.available_categories:
message = 'Category [{}] not available for tracing'.format(category)
if strict:
raise TargetStableError(message)
self.logger.warning(message)
self.categories = list(set(self.categories) & set(self.available_categories))
if not self.categories:
raise TargetStableError('None of the requested categories are available')
def __del__(self):
self.reset()
def _build_cmd(self):
self._tmpfile = NamedTemporaryFile()
# pylint: disable=attribute-defined-outside-init
self.systrace_cmd = 'python2 -u {} -o {} -e {}'.format(
self.systrace_binary,
self._tmpfile.name,
self.target.adb_name
)
if self.buffer_size:
self.systrace_cmd += ' -b {}'.format(self.buffer_size)
self.systrace_cmd += ' {}'.format(' '.join(self.categories))
def reset(self):
if self._systrace_process:
self.stop()
if self._tmpfile:
self._tmpfile.close()
self._tmpfile = None
def start(self):
if self._systrace_process:
raise RuntimeError("Tracing is already underway, call stop() first")
self.reset()
self._build_cmd()
self._systrace_process = subprocess.Popen(
self.systrace_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True
)
self._systrace_process.stdout.read(1)
def stop(self):
if not self._systrace_process:
raise RuntimeError("No tracing to stop, call start() first")
# Systrace expects <enter> to stop
self._systrace_process.communicate('\n')
self._systrace_process = None
def get_trace(self, outfile):
if self._systrace_process:
raise RuntimeError("Tracing is underway, call stop() first")
if not self._tmpfile:
raise RuntimeError("No tracing data available")
copyfile(self._tmpfile.name, outfile)