1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2024-10-06 19:01:15 +01:00

cpustates: Added the ability to configure how a missing start marker is handled.

cpustates can now handle the lack of a start marker in three ways:

 - try: If the start marker is present only the correct section of the trace
        will be used, if its not the whole trace will be used.
 - error: An error will be raised if the start marker is missing
 - ignore: The markers are ignored and the whole trace is always used.
This commit is contained in:
Sebastian Goscik 2016-06-06 16:07:01 +01:00
parent 1811a8b733
commit f276d4e39f
3 changed files with 72 additions and 33 deletions

View File

@ -109,6 +109,19 @@ class CpuStatesProcessor(ResultProcessor):
The values generated are floating point numbers, normalised based on the maximum The values generated are floating point numbers, normalised based on the maximum
frequency of the cluster. frequency of the cluster.
"""), """),
Parameter('start_marker_handling', kind=str, default="try",
allowed_values=['ignore', 'try', 'error'],
description="""
The trace-cmd instrument inserts a marker into the trace to indicate the beginning
of workload execution. In some cases, this marker may be missing in the final
output (e.g. due to trace buffer overrun). This parameter specifies how a missing
start marker will be handled:
:`ignore`: The start marker will be ignored. All events in the trace will be used.
:`error`: An error will be raised if the start marker is not found in the trace.
:`try`: If the start marker is not found, all events in the trace will be used.
""")
] ]
def validate(self): def validate(self):
@ -201,10 +214,10 @@ class CpuStatesProcessor(ResultProcessor):
timeline_csv_file=timeline_csv_file, timeline_csv_file=timeline_csv_file,
cpu_utilisation=cpu_utilisation, cpu_utilisation=cpu_utilisation,
max_freq_list=self.max_freq_list, max_freq_list=self.max_freq_list,
start_marker_handling=self.start_marker_handling,
) )
parallel_report = reports.pop(0) parallel_report = reports.pop(0)
powerstate_report = reports.pop(0) powerstate_report = reports.pop(0)
if parallel_report is None: if parallel_report is None:
self.logger.warning('No power state reports generated; are power ' self.logger.warning('No power state reports generated; are power '
'events enabled in the trace?') 'events enabled in the trace?')

View File

@ -24,6 +24,7 @@ from collections import defaultdict
import argparse import argparse
from wlauto.utils.trace_cmd import TraceCmdTrace, TRACE_MARKER_START, TRACE_MARKER_STOP from wlauto.utils.trace_cmd import TraceCmdTrace, TRACE_MARKER_START, TRACE_MARKER_STOP
from wlauto.exceptions import DeviceError
logger = logging.getLogger('power') logger = logging.getLogger('power')
@ -620,15 +621,29 @@ def build_idle_domains(core_clusters, # NOQA
def report_power_stats(trace_file, idle_state_names, core_names, core_clusters, def report_power_stats(trace_file, idle_state_names, core_names, core_clusters,
num_idle_states, first_cluster_state=sys.maxint, num_idle_states, first_cluster_state=sys.maxint,
first_system_state=sys.maxint, use_ratios=False, first_system_state=sys.maxint, use_ratios=False,
timeline_csv_file=None, filter_trace=False, timeline_csv_file=None, cpu_utilisation=None,
cpu_utilisation=None, max_freq_list=None): max_freq_list=None, start_marker_handling='error'):
# pylint: disable=too-many-locals # pylint: disable=too-many-locals,too-many-branches
trace = TraceCmdTrace(filter_markers=filter_trace) trace = TraceCmdTrace(trace_file,
filter_markers=False,
names=['cpu_idle', 'cpu_frequency', 'print'])
wait_for_start_marker = True
if start_marker_handling == "error" and not trace.has_start_marker:
raise DeviceError("Start marker was not found in the trace")
elif start_marker_handling == "try":
wait_for_start_marker = trace.has_start_marker
if not wait_for_start_marker:
logger.warning("Did not see a START marker in the trace, "
"state residency and parallelism statistics may be inaccurate.")
elif start_marker_handling == "ignore":
wait_for_start_marker = False
ps_processor = PowerStateProcessor(core_clusters, ps_processor = PowerStateProcessor(core_clusters,
num_idle_states=num_idle_states, num_idle_states=num_idle_states,
first_cluster_state=first_cluster_state, first_cluster_state=first_cluster_state,
first_system_state=first_system_state, first_system_state=first_system_state,
wait_for_start_marker=not filter_trace) wait_for_start_marker=wait_for_start_marker)
reporters = [ reporters = [
ParallelStats(core_clusters, use_ratios), ParallelStats(core_clusters, use_ratios),
PowerStateStats(core_names, idle_state_names, use_ratios) PowerStateStats(core_names, idle_state_names, use_ratios)
@ -642,7 +657,7 @@ def report_power_stats(trace_file, idle_state_names, core_names, core_clusters,
else: else:
logger.warning('Maximum frequencies not found. Cannot normalise. Skipping CPU Utilisation Timeline') logger.warning('Maximum frequencies not found. Cannot normalise. Skipping CPU Utilisation Timeline')
event_stream = trace.parse(trace_file, names=['cpu_idle', 'cpu_frequency', 'print']) event_stream = trace.parse()
transition_stream = stream_cpu_power_transitions(event_stream) transition_stream = stream_cpu_power_transitions(event_stream)
power_state_stream = ps_processor.process(transition_stream) power_state_stream = ps_processor.process(transition_stream)
core_state_stream = gather_core_states(power_state_stream) core_state_stream = gather_core_states(power_state_stream)
@ -677,9 +692,9 @@ def main():
first_system_state=args.first_system_state, first_system_state=args.first_system_state,
use_ratios=args.ratios, use_ratios=args.ratios,
timeline_csv_file=args.timeline_file, timeline_csv_file=args.timeline_file,
filter_trace=(not args.no_trace_filter),
cpu_utilisation=args.cpu_utilisation, cpu_utilisation=args.cpu_utilisation,
max_freq_list=args.max_freq_list, max_freq_list=args.max_freq_list,
start_marker_handling=args.start_marker_handling,
) )
parallel_report.write(os.path.join(args.output_directory, 'parallel.csv')) parallel_report.write(os.path.join(args.output_directory, 'parallel.csv'))
powerstate_report.write(os.path.join(args.output_directory, 'cpustate.csv')) powerstate_report.write(os.path.join(args.output_directory, 'cpustate.csv'))
@ -709,11 +724,6 @@ def parse_arguments(): # NOQA
help=''' help='''
Output directory where reports will be placed. Output directory where reports will be placed.
''') ''')
parser.add_argument('-F', '--no-trace-filter', action='store_true', default=False,
help='''
Normally, only the trace between begin and end marker is used. This disables
the filtering so the entire trace file is considered.
''')
parser.add_argument('-c', '--core-names', action=SplitListAction, parser.add_argument('-c', '--core-names', action=SplitListAction,
help=''' help='''
Comma-separated list of core names for the device on which the trace Comma-separated list of core names for the device on which the trace
@ -766,6 +776,18 @@ def parse_arguments(): # NOQA
Only required if --cpu-utilisation is set. Only required if --cpu-utilisation is set.
This is used to normalise the frequencies to obtain percentage utilisation. This is used to normalise the frequencies to obtain percentage utilisation.
''') ''')
parser.add_argument('-M', '--start-marker-handling', metavar='HANDLING', default="try",
choices=["error", "try", "ignore"],
help='''
The trace-cmd instrument inserts a marker into the trace to indicate the beginning
of workload execution. In some cases, this marker may be missing in the final
output (e.g. due to trace buffer overrun). This parameter specifies how a missing
start marker will be handled:
ignore: The start marker will be ignored. All events in the trace will be used.
error: An error will be raised if the start marker is not found in the trace.
try: If the start marker is not found, all events in the trace will be used.
''')
args = parser.parse_args() args = parser.parse_args()

View File

@ -17,7 +17,7 @@ import re
import logging import logging
from itertools import chain from itertools import chain
from wlauto.utils.misc import isiterable from wlauto.utils.misc import isiterable, memoized
from wlauto.utils.types import numeric from wlauto.utils.types import numeric
@ -215,37 +215,38 @@ EMPTY_CPU_REGEX = re.compile(r'CPU \d+ is empty')
class TraceCmdTrace(object): class TraceCmdTrace(object):
def __init__(self, filter_markers=True): @property
self.filter_markers = filter_markers @memoized
def has_start_marker(self):
with open(self.file_path) as fh:
for line in fh:
if TRACE_MARKER_START in line:
return True
return False
def parse(self, filepath, names=None, check_for_markers=True): # pylint: disable=too-many-branches,too-many-locals def __init__(self, file_path, names=None, filter_markers=True):
self.filter_markers = filter_markers
self.file_path = file_path
self.names = names or []
def parse(self): # pylint: disable=too-many-branches,too-many-locals
""" """
This is a generator for the trace event stream. This is a generator for the trace event stream.
""" """
inside_maked_region = False inside_marked_region = False
filters = [re.compile('^{}$'.format(n)) for n in names or []] filters = [re.compile('^{}$'.format(n)) for n in self.names or []]
if check_for_markers: with open(self.file_path) as fh:
with open(filepath) as fh:
for line in fh:
if TRACE_MARKER_START in line:
break
else:
# maker not found force filtering by marker to False
self.filter_markers = False
with open(filepath) as fh:
for line in fh: for line in fh:
# if processing trace markers, skip marker lines as well as all # if processing trace markers, skip marker lines as well as all
# lines outside marked region # lines outside marked region
if self.filter_markers: if self.filter_markers:
if not inside_maked_region: if not inside_marked_region:
if TRACE_MARKER_START in line: if TRACE_MARKER_START in line:
inside_maked_region = True inside_marked_region = True
continue continue
elif TRACE_MARKER_STOP in line: elif TRACE_MARKER_STOP in line:
inside_maked_region = False break
continue
match = DROPPED_EVENTS_REGEX.search(line) match = DROPPED_EVENTS_REGEX.search(line)
if match: if match:
@ -282,4 +283,7 @@ class TraceCmdTrace(object):
if isinstance(body_parser, basestring) or isinstance(body_parser, re._pattern_type): # pylint: disable=protected-access if isinstance(body_parser, basestring) or isinstance(body_parser, re._pattern_type): # pylint: disable=protected-access
body_parser = regex_body_parser(body_parser) body_parser = regex_body_parser(body_parser)
yield TraceCmdEvent(parser=body_parser, **match.groupdict()) yield TraceCmdEvent(parser=body_parser, **match.groupdict())
else:
if self.filter_markers and inside_marked_region:
logger.warning('Did not encounter a stop marker in trace')