mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-02-20 20:09:11 +00:00
commit
63ff8987ea
@ -19,7 +19,7 @@ from collections import OrderedDict
|
||||
|
||||
from wlauto import ResultProcessor, Parameter
|
||||
from wlauto.core import signal
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.exceptions import ConfigError, DeviceError
|
||||
from wlauto.instrumentation import instrument_is_installed
|
||||
from wlauto.utils.power import report_power_stats
|
||||
from wlauto.utils.misc import unique
|
||||
@ -109,6 +109,19 @@ class CpuStatesProcessor(ResultProcessor):
|
||||
The values generated are floating point numbers, normalised based on the maximum
|
||||
frequency of the cluster.
|
||||
"""),
|
||||
Parameter('start_marker_handling', kind=str, default="try",
|
||||
allowed_values=['ignore', 'try', 'error'],
|
||||
description="""
|
||||
|
||||
The trace-cmd instrument inserts a marker into the trace to indicate the beginning
|
||||
of workload execution. In some cases, this marker may be missing in the final
|
||||
output (e.g. due to trace buffer overrun). This parameter specifies how a missing
|
||||
start marker will be handled:
|
||||
|
||||
:`ignore`: The start marker will be ignored. All events in the trace will be used.
|
||||
:`error`: An error will be raised if the start marker is not found in the trace.
|
||||
:`try`: If the start marker is not found, all events in the trace will be used.
|
||||
""")
|
||||
]
|
||||
|
||||
def validate(self):
|
||||
@ -169,7 +182,10 @@ class CpuStatesProcessor(ResultProcessor):
|
||||
self.logger.debug('Nudging all cores awake...')
|
||||
for i in xrange(len(device.core_names)):
|
||||
command = device.busybox + ' taskset 0x{:x} {}'
|
||||
device.execute(command.format(1 << i, 'ls'))
|
||||
try:
|
||||
device.execute(command.format(1 << i, 'ls'))
|
||||
except DeviceError:
|
||||
self.logger.warning("Failed to nudge CPU %s, has it been hot plugged out?", i)
|
||||
|
||||
def process_iteration_result(self, result, context):
|
||||
trace = context.get_artifact('txttrace')
|
||||
@ -198,10 +214,10 @@ class CpuStatesProcessor(ResultProcessor):
|
||||
timeline_csv_file=timeline_csv_file,
|
||||
cpu_utilisation=cpu_utilisation,
|
||||
max_freq_list=self.max_freq_list,
|
||||
start_marker_handling=self.start_marker_handling,
|
||||
)
|
||||
parallel_report = reports.pop(0)
|
||||
powerstate_report = reports.pop(0)
|
||||
|
||||
if parallel_report is None:
|
||||
self.logger.warning('No power state reports generated; are power '
|
||||
'events enabled in the trace?')
|
||||
|
@ -823,3 +823,21 @@ def sha256(path, chunk=2048):
|
||||
|
||||
def urljoin(*parts):
|
||||
return '/'.join(p.rstrip('/') for p in parts)
|
||||
|
||||
|
||||
__memo_cache = {}
|
||||
|
||||
|
||||
def memoized(func):
|
||||
"""A decorator for memoizing functions and methods."""
|
||||
func_id = repr(func)
|
||||
|
||||
def memoize_wrapper(*args, **kwargs):
|
||||
id_string = func_id + ','.join([str(id(a)) for a in args])
|
||||
id_string += ','.join('{}={}'.format(k, v)
|
||||
for k, v in kwargs.iteritems())
|
||||
if id_string not in __memo_cache:
|
||||
__memo_cache[id_string] = func(*args, **kwargs)
|
||||
return __memo_cache[id_string]
|
||||
|
||||
return memoize_wrapper
|
||||
|
@ -24,6 +24,7 @@ from collections import defaultdict
|
||||
import argparse
|
||||
|
||||
from wlauto.utils.trace_cmd import TraceCmdTrace, TRACE_MARKER_START, TRACE_MARKER_STOP
|
||||
from wlauto.exceptions import DeviceError
|
||||
|
||||
|
||||
logger = logging.getLogger('power')
|
||||
@ -157,6 +158,8 @@ class PowerStateProcessor(object):
|
||||
self.requested_states = defaultdict(lambda: -1) # cpu_id -> requeseted state
|
||||
self.wait_for_start_marker = wait_for_start_marker
|
||||
self._saw_start_marker = False
|
||||
self._saw_stop_marker = False
|
||||
self.exceptions = []
|
||||
|
||||
idle_state_domains = build_idle_domains(core_clusters,
|
||||
num_states=num_idle_states,
|
||||
@ -173,9 +176,17 @@ class PowerStateProcessor(object):
|
||||
|
||||
def process(self, event_stream):
|
||||
for event in event_stream:
|
||||
next_state = self.update_power_state(event)
|
||||
if self._saw_start_marker or not self.wait_for_start_marker:
|
||||
yield next_state
|
||||
try:
|
||||
next_state = self.update_power_state(event)
|
||||
if self._saw_start_marker or not self.wait_for_start_marker:
|
||||
yield next_state
|
||||
if self._saw_stop_marker:
|
||||
break
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
self.exceptions.append(e)
|
||||
else:
|
||||
if self.wait_for_start_marker:
|
||||
logger.warning("Did not see a STOP marker in the trace")
|
||||
|
||||
def update_power_state(self, event):
|
||||
"""
|
||||
@ -191,7 +202,7 @@ class PowerStateProcessor(object):
|
||||
if event.name == 'START':
|
||||
self._saw_start_marker = True
|
||||
elif event.name == 'STOP':
|
||||
self._saw_start_marker = False
|
||||
self._saw_stop_marker = True
|
||||
else:
|
||||
raise ValueError('Unexpected event type: {}'.format(event.kind))
|
||||
return self.power_state.copy()
|
||||
@ -610,15 +621,29 @@ def build_idle_domains(core_clusters, # NOQA
|
||||
def report_power_stats(trace_file, idle_state_names, core_names, core_clusters,
|
||||
num_idle_states, first_cluster_state=sys.maxint,
|
||||
first_system_state=sys.maxint, use_ratios=False,
|
||||
timeline_csv_file=None, filter_trace=False,
|
||||
cpu_utilisation=None, max_freq_list=None):
|
||||
# pylint: disable=too-many-locals
|
||||
trace = TraceCmdTrace(filter_markers=filter_trace)
|
||||
timeline_csv_file=None, cpu_utilisation=None,
|
||||
max_freq_list=None, start_marker_handling='error'):
|
||||
# pylint: disable=too-many-locals,too-many-branches
|
||||
trace = TraceCmdTrace(trace_file,
|
||||
filter_markers=False,
|
||||
names=['cpu_idle', 'cpu_frequency', 'print'])
|
||||
|
||||
wait_for_start_marker = True
|
||||
if start_marker_handling == "error" and not trace.has_start_marker:
|
||||
raise DeviceError("Start marker was not found in the trace")
|
||||
elif start_marker_handling == "try":
|
||||
wait_for_start_marker = trace.has_start_marker
|
||||
if not wait_for_start_marker:
|
||||
logger.warning("Did not see a START marker in the trace, "
|
||||
"state residency and parallelism statistics may be inaccurate.")
|
||||
elif start_marker_handling == "ignore":
|
||||
wait_for_start_marker = False
|
||||
|
||||
ps_processor = PowerStateProcessor(core_clusters,
|
||||
num_idle_states=num_idle_states,
|
||||
first_cluster_state=first_cluster_state,
|
||||
first_system_state=first_system_state,
|
||||
wait_for_start_marker=not filter_trace)
|
||||
wait_for_start_marker=wait_for_start_marker)
|
||||
reporters = [
|
||||
ParallelStats(core_clusters, use_ratios),
|
||||
PowerStateStats(core_names, idle_state_names, use_ratios)
|
||||
@ -632,7 +657,7 @@ def report_power_stats(trace_file, idle_state_names, core_names, core_clusters,
|
||||
else:
|
||||
logger.warning('Maximum frequencies not found. Cannot normalise. Skipping CPU Utilisation Timeline')
|
||||
|
||||
event_stream = trace.parse(trace_file, names=['cpu_idle', 'cpu_frequency', 'print'])
|
||||
event_stream = trace.parse()
|
||||
transition_stream = stream_cpu_power_transitions(event_stream)
|
||||
power_state_stream = ps_processor.process(transition_stream)
|
||||
core_state_stream = gather_core_states(power_state_stream)
|
||||
@ -641,6 +666,11 @@ def report_power_stats(trace_file, idle_state_names, core_names, core_clusters,
|
||||
for reporter in reporters:
|
||||
reporter.update(timestamp, states)
|
||||
|
||||
if ps_processor.exceptions:
|
||||
logger.warning('There were errors while processing trace:')
|
||||
for e in ps_processor.exceptions:
|
||||
logger.warning(str(e))
|
||||
|
||||
reports = []
|
||||
for reporter in reporters:
|
||||
report = reporter.report()
|
||||
@ -650,6 +680,7 @@ def report_power_stats(trace_file, idle_state_names, core_names, core_clusters,
|
||||
|
||||
def main():
|
||||
# pylint: disable=unbalanced-tuple-unpacking
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
args = parse_arguments()
|
||||
parallel_report, powerstate_report = report_power_stats(
|
||||
trace_file=args.infile,
|
||||
@ -661,9 +692,9 @@ def main():
|
||||
first_system_state=args.first_system_state,
|
||||
use_ratios=args.ratios,
|
||||
timeline_csv_file=args.timeline_file,
|
||||
filter_trace=(not args.no_trace_filter),
|
||||
cpu_utilisation=args.cpu_utilisation,
|
||||
max_freq_list=args.max_freq_list,
|
||||
start_marker_handling=args.start_marker_handling,
|
||||
)
|
||||
parallel_report.write(os.path.join(args.output_directory, 'parallel.csv'))
|
||||
powerstate_report.write(os.path.join(args.output_directory, 'cpustate.csv'))
|
||||
@ -693,11 +724,6 @@ def parse_arguments(): # NOQA
|
||||
help='''
|
||||
Output directory where reports will be placed.
|
||||
''')
|
||||
parser.add_argument('-F', '--no-trace-filter', action='store_true', default=False,
|
||||
help='''
|
||||
Normally, only the trace between begin and end marker is used. This disables
|
||||
the filtering so the entire trace file is considered.
|
||||
''')
|
||||
parser.add_argument('-c', '--core-names', action=SplitListAction,
|
||||
help='''
|
||||
Comma-separated list of core names for the device on which the trace
|
||||
@ -750,6 +776,18 @@ def parse_arguments(): # NOQA
|
||||
Only required if --cpu-utilisation is set.
|
||||
This is used to normalise the frequencies to obtain percentage utilisation.
|
||||
''')
|
||||
parser.add_argument('-M', '--start-marker-handling', metavar='HANDLING', default="try",
|
||||
choices=["error", "try", "ignore"],
|
||||
help='''
|
||||
The trace-cmd instrument inserts a marker into the trace to indicate the beginning
|
||||
of workload execution. In some cases, this marker may be missing in the final
|
||||
output (e.g. due to trace buffer overrun). This parameter specifies how a missing
|
||||
start marker will be handled:
|
||||
|
||||
ignore: The start marker will be ignored. All events in the trace will be used.
|
||||
error: An error will be raised if the start marker is not found in the trace.
|
||||
try: If the start marker is not found, all events in the trace will be used.
|
||||
''')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
@ -17,7 +17,7 @@ import re
|
||||
import logging
|
||||
from itertools import chain
|
||||
|
||||
from wlauto.utils.misc import isiterable
|
||||
from wlauto.utils.misc import isiterable, memoized
|
||||
from wlauto.utils.types import numeric
|
||||
|
||||
|
||||
@ -215,37 +215,38 @@ EMPTY_CPU_REGEX = re.compile(r'CPU \d+ is empty')
|
||||
|
||||
class TraceCmdTrace(object):
|
||||
|
||||
def __init__(self, filter_markers=True):
|
||||
self.filter_markers = filter_markers
|
||||
@property
|
||||
@memoized
|
||||
def has_start_marker(self):
|
||||
with open(self.file_path) as fh:
|
||||
for line in fh:
|
||||
if TRACE_MARKER_START in line:
|
||||
return True
|
||||
return False
|
||||
|
||||
def parse(self, filepath, names=None, check_for_markers=True): # pylint: disable=too-many-branches,too-many-locals
|
||||
def __init__(self, file_path, names=None, filter_markers=True):
|
||||
self.filter_markers = filter_markers
|
||||
self.file_path = file_path
|
||||
self.names = names or []
|
||||
|
||||
def parse(self): # pylint: disable=too-many-branches,too-many-locals
|
||||
"""
|
||||
This is a generator for the trace event stream.
|
||||
|
||||
"""
|
||||
inside_maked_region = False
|
||||
filters = [re.compile('^{}$'.format(n)) for n in names or []]
|
||||
if check_for_markers:
|
||||
with open(filepath) as fh:
|
||||
for line in fh:
|
||||
if TRACE_MARKER_START in line:
|
||||
break
|
||||
else:
|
||||
# maker not found force filtering by marker to False
|
||||
self.filter_markers = False
|
||||
|
||||
with open(filepath) as fh:
|
||||
inside_marked_region = False
|
||||
filters = [re.compile('^{}$'.format(n)) for n in self.names or []]
|
||||
with open(self.file_path) as fh:
|
||||
for line in fh:
|
||||
# if processing trace markers, skip marker lines as well as all
|
||||
# lines outside marked region
|
||||
if self.filter_markers:
|
||||
if not inside_maked_region:
|
||||
if not inside_marked_region:
|
||||
if TRACE_MARKER_START in line:
|
||||
inside_maked_region = True
|
||||
inside_marked_region = True
|
||||
continue
|
||||
elif TRACE_MARKER_STOP in line:
|
||||
inside_maked_region = False
|
||||
continue
|
||||
break
|
||||
|
||||
match = DROPPED_EVENTS_REGEX.search(line)
|
||||
if match:
|
||||
@ -282,4 +283,7 @@ class TraceCmdTrace(object):
|
||||
if isinstance(body_parser, basestring) or isinstance(body_parser, re._pattern_type): # pylint: disable=protected-access
|
||||
body_parser = regex_body_parser(body_parser)
|
||||
yield TraceCmdEvent(parser=body_parser, **match.groupdict())
|
||||
else:
|
||||
if self.filter_markers and inside_marked_region:
|
||||
logger.warning('Did not encounter a stop marker in trace')
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user