1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2024-10-06 10:51:13 +01:00
This commit is contained in:
Sebastian Goscik 2016-06-30 17:29:59 +01:00
parent e258999e0a
commit b0e500e2a8
11 changed files with 301 additions and 41 deletions

View File

@ -20,11 +20,11 @@ import shutil
import wlauto
from wlauto import Command, settings
from wlauto.core.agenda import Agenda
from wlauto.core.execution import Executor
from wlauto.utils.log import add_log_file
from wlauto.core.configuration import RunConfiguration
from wlauto.core.configuration import RunConfiguration, WAConfiguration
from wlauto.core import pluginloader
from wlauto.core.configuration_parsers import Agenda, ConfigFile, EnvrironmentVars, CommandLineArgs
class RunCommand(Command):
@ -32,6 +32,103 @@ class RunCommand(Command):
name = 'run'
description = 'Execute automated workloads on a remote device and process the resulting output.'
def initialize(self, context):
self.parser.add_argument('agenda', metavar='AGENDA',
help="""
Agenda for this workload automation run. This defines which
workloads will be executed, how many times, with which
tunables, etc. See example agendas in {} for an example of
how this file should be structured.
""".format(os.path.dirname(wlauto.__file__)))
self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None,
help="""
Specify a directory where the output will be generated. If
the directory already exists, the script will abort unless -f
option (see below) is used, in which case the contents of the
directory will be overwritten. If this option is not specified,
then {} will be used instead.
""".format("settings.default_output_directory")) # TODO: Fix this!
self.parser.add_argument('-f', '--force', action='store_true',
help="""
Overwrite output directory if it exists. By default, the script
will abort in this situation to prevent accidental data loss.
""")
self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID',
help="""
Specify a workload spec ID from an agenda to run. If this is
specified, only that particular spec will be run, and other
workloads in the agenda will be ignored. This option may be
used to specify multiple IDs.
""")
self.parser.add_argument('--disable', action='append', dest='instruments_to_disable',
metavar='INSTRUMENT', help="""
Specify an instrument to disable from the command line. This
equivalent to adding "~{metavar}" to the instrumentation list in
the agenda. This can be used to temporarily disable a troublesome
instrument for a particular run without introducing permanent
change to the config (which one might then forget to revert).
This option may be specified multiple times.
""")
def execute(self, args):
# STAGE 1: Gather configuratation
env = EnvrironmentVars()
args = CommandLineArgs(args)
# STAGE 2.1a: Early WAConfiguration, required to find config files
if env.user_directory:
settings.set("user_directory", env.user_directory)
if env.plugin_paths:
settings.set("plugin_paths", env.plugin_paths)
# STAGE 1 continued
# TODO: Check for config.py and convert to yaml, if it fails, warn user.
configs = [ConfigFile(os.path.join(settings.user_directory, 'config.yaml'))]
for c in args.config:
configs.append(ConfigFile(c))
agenda = Agenda(args.agenda)
configs.append(Agenda.config)
# STAGE 2: Sending configuration to the correct place & merging in
# order of priority.
#
# Priorities (lowest to highest):
# - Enviroment Variables
# - config.yaml from `user_directory`
# - config files passed via command line
# (the first specified will be the first to be applied)
# - Agenda
# - Command line configuration e.g. disabled instrumentation.
# STAGE 2.1b: WAConfiguration
for config in configs:
for config_point in settings.configuration.keys():
if hasattr(config, config_point):
settings.set(config_point, config.getattr(config_point))
def _parse_config(self):
pass
def _serialize_raw_config(self, env, args, agenda, configs):
pod = {}
pod['environment_variables'] = env.to_pod()
pod['commandline_arguments'] = args.to_pod()
pod['agenda'] = agenda.to_pod()
pod['config_files'] = [c.to_pod() for c in configs]
return pod
def _serialize_final_config(self):
pass
class OldRunCommand(Command):
name = 'old_run'
description = 'Execute automated workloads on a remote device and process the resulting output.'
def initialize(self, context):
self.parser.add_argument('agenda', metavar='AGENDA',
help="""

View File

@ -34,3 +34,7 @@ class JarFile(FileResource):
class ApkFile(FileResource):
name = 'apk'
def __init__(self, owner, version):
super(ApkFile, self).__init__(owner)
self.version = version

View File

@ -9,7 +9,7 @@ Add your configuration to that file instead.
# configuration for WA and gives EXAMPLES of other configuration available. It
# is not supposed to be an exhaustive list.
# PLEASE REFER TO WA DOCUMENTATION FOR THE COMPLETE LIST OF AVAILABLE
# EXTENSIONS AND THEIR CONFIGURATION.
# EXTENSIONS AND THEIR configuration.
# This defines when the device will be rebooted during Workload Automation execution. #

View File

@ -56,13 +56,16 @@ def main():
init_argument_parser(parser)
commands = load_commands(parser.add_subparsers(dest='command')) # each command will add its own subparser
args = parser.parse_args()
settings.set("verbosity", args.verbose)
settings.load_user_config()
#TODO: Set this stuff properly, i.e dont use settings (if possible)
#settings.set("verbosity", args.verbose)
#settings.load_user_config()
#settings.debug = args.debug
if args.config:
if not os.path.exists(args.config):
raise ConfigError("Config file {} not found".format(args.config))
settings.load_config_file(args.config)
for config in args.config:
if not os.path.exists(config):
raise ConfigError("Config file {} not found".format(config))
init_logging(settings.verbosity)
command = commands[args.command]

View File

@ -56,7 +56,7 @@ from wlauto.core.resolver import ResourceResolver
from wlauto.core.result import ResultManager, IterationResult, RunResult
from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError,
DeviceError, DeviceNotRespondingError)
from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, merge_dicts, format_duration
from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, format_duration
from wlauto.utils.serializer import json
# The maximum number of reboot attempts for an iteration.
@ -92,10 +92,8 @@ class RunInfo(object):
def to_dict(self):
d = copy(self.__dict__)
d['uuid'] = str(self.uuid)
del d['config']
d = merge_dicts(d, self.config.to_dict())
return d
#TODO: pod
class ExecutionContext(object):
"""

View File

@ -26,10 +26,11 @@ from itertools import chain
from copy import copy
from wlauto.exceptions import NotFoundError, LoaderError, ValidationError, ConfigError
from wlauto.utils.misc import isiterable, ensure_directory_exists as _d, walk_modules, load_class, merge_dicts, get_article
from wlauto.utils.misc import (isiterable, ensure_directory_exists as _d,
walk_modules, load_class, merge_dicts_simple, get_article)
from wlauto.core.configuration import settings
from wlauto.utils.types import identifier, integer, boolean
from wlauto.core.configuration import ConfigurationPoint, ConfigurationPointCollection
from wlauto.core.configuration import ConfigurationPoint
MODNAME_TRANS = string.maketrans(':/\\.', '____')
@ -711,7 +712,8 @@ class PluginLoader(object):
"""
real_name, alias_config = self.resolve_alias(name)
base_default_config = self.get_plugin_class(real_name).get_default_config()
return merge_dicts(base_default_config, alias_config, list_duplicates='last', dict_type=OrderedDict)
return merge_dicts_simple(base_default_config, alias_config)
def list_plugins(self, kind=None):
"""

View File

@ -44,7 +44,7 @@ from datetime import datetime
from wlauto.core.plugin import Plugin
from wlauto.exceptions import WAError
from wlauto.utils.types import numeric
from wlauto.utils.misc import enum_metaclass, merge_dicts
from wlauto.utils.misc import enum_metaclass, merge_dicts_simple
class ResultManager(object):
@ -263,9 +263,8 @@ class IterationResult(object):
self.artifacts = []
def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
classifiers = merge_dicts(self.classifiers, classifiers or {},
list_duplicates='last', should_normalize=False)
self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
self.metrics.append(Metric(name, value, units, lower_is_better,
merge_dicts_simple(self.classifiers, classifiers)))
def has_metric(self, name):
for metric in self.metrics:

View File

@ -17,7 +17,6 @@ import os
from collections import OrderedDict
from wlauto import Instrument, Parameter
from wlauto.exceptions import ConfigError, InstrumentError
from wlauto.utils.misc import merge_dicts
from wlauto.utils.types import caseless_string
@ -132,13 +131,13 @@ class FreqSweep(Instrument):
for freq in sweep_spec['frequencies']:
spec = old_spec.copy()
if 'runtime_params' in sweep_spec:
spec.runtime_parameters = merge_dicts(spec.runtime_parameters,
sweep_spec['runtime_params'],
dict_type=OrderedDict)
spec.runtime_parameters = spec.runtime_parameters.copy()
spec.runtime_parameters.update(sweep_spec['runtime_params'])
if 'workload_params' in sweep_spec:
spec.workload_parameters = merge_dicts(spec.workload_parameters,
sweep_spec['workload_params'],
dict_type=OrderedDict)
spec.workload_parameters = spec.workload_parameters.copy()
spec.workload_parameters.update(sweep_spec['workload_params'])
spec.runtime_parameters['{}_governor'.format(sweep_spec['cluster'])] = "userspace"
spec.runtime_parameters['{}_frequency'.format(sweep_spec['cluster'])] = freq
spec.id = '{}_{}_{}'.format(spec.id, sweep_spec['label'], freq)

View File

@ -20,8 +20,5 @@ def init_argument_parser(parser):
parser.add_argument('-c', '--config', help='specify an additional config.py')
parser.add_argument('-v', '--verbose', action='count',
help='The scripts will produce verbose output.')
parser.add_argument('--debug', action='store_true',
help='Enable debug mode. Note: this implies --verbose.')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(get_wa_version()))
return parser

View File

@ -36,20 +36,19 @@ import hashlib
from datetime import datetime, timedelta
from operator import mul, itemgetter
from StringIO import StringIO
from itertools import cycle, groupby
from itertools import cycle, groupby, chain
from functools import partial
from distutils.spawn import find_executable
import yaml
from dateutil import tz
from devlib.utils.misc import ABI_MAP, check_output, walk_modules, \
ensure_directory_exists, ensure_file_directory_exists, \
merge_dicts, merge_lists, normalize, convert_new_lines, \
escape_quotes, escape_single_quotes, escape_double_quotes, \
isiterable, getch, as_relative, ranges_to_list, \
list_to_ranges, list_to_mask, mask_to_list, which, \
get_cpu_mask, unique
from devlib.utils.misc import (ABI_MAP, check_output, walk_modules,
ensure_directory_exists, ensure_file_directory_exists,
normalize, convert_new_lines, get_cpu_mask, unique,
escape_quotes, escape_single_quotes, escape_double_quotes,
isiterable, getch, as_relative, ranges_to_list,
list_to_ranges, list_to_mask, mask_to_list, which)
check_output_logger = logging.getLogger('check_output')
@ -469,3 +468,128 @@ def istextfile(fileobj, blocksize=512):
# occurrences of _text_characters from the block
nontext = block.translate(None, _text_characters)
return float(len(nontext)) / len(block) <= 0.30
def categorize(v):
if hasattr(v, 'merge_with') and hasattr(v, 'merge_into'):
return 'o'
elif hasattr(v, 'iteritems'):
return 'm'
elif isiterable(v):
return 's'
elif v is None:
return 'n'
else:
return 'c'
def merge_config_values(base, other):
"""
This is used to merge two objects, typically when setting the value of a
``ConfigurationPoint``. First, both objects are categorized into
c: A scalar value. Basically, most objects. These values
are treated as atomic, and not mergeable.
s: A sequence. Anything iterable that is not a dict or
a string (strings are considered scalars).
m: A key-value mapping. ``dict`` and it's derivatives.
n: ``None``.
o: A mergeable object; this is an object that implements both
``merge_with`` and ``merge_into`` methods.
The merge rules based on the two categories are then as follows:
(c1, c2) --> c2
(s1, s2) --> s1 . s2
(m1, m2) --> m1 . m2
(c, s) --> [c] . s
(s, c) --> s . [c]
(s, m) --> s . [m]
(m, s) --> [m] . s
(m, c) --> ERROR
(c, m) --> ERROR
(o, X) --> o.merge_with(X)
(X, o) --> o.merge_into(X)
(X, n) --> X
(n, X) --> X
where:
'.' means concatenation (for maps, contcationation of (k, v) streams
then converted back into a map). If the types of the two objects
differ, the type of ``other`` is used for the result.
'X' means "any category"
'[]' used to indicate a literal sequence (not necessarily a ``list``).
when this is concatenated with an actual sequence, that sequencies
type is used.
notes:
- When a mapping is combined with a sequence, that mapping is
treated as a scalar value.
- When combining two mergeable objects, they're combined using
``o1.merge_with(o2)`` (_not_ using o2.merge_into(o1)).
- Combining anything with ``None`` yields that value, irrespective
of the order. So a ``None`` value is eqivalent to the corresponding
item being omitted.
- When both values are scalars, merging is equivalent to overwriting.
- There is no recursion (e.g. if map values are lists, they will not
be merged; ``other`` will overwrite ``base`` values). If complicated
merging semantics (such as recursion) are required, they should be
implemented within custom mergeable types (i.e. those that implement
``merge_with`` and ``merge_into``).
While this can be used as a generic "combine any two arbitry objects"
function, the semantics have been selected specifically for merging
configuration point values.
"""
cat_base = categorize(base)
cat_other = categorize(other)
if cat_base == 'n':
return other
elif cat_other == 'n':
return base
if cat_base == 'o':
return base.merge_with(other)
elif cat_other == 'o':
return other.merge_into(base)
if cat_base == 'm':
if cat_other == 's':
return merge_sequencies([base], other)
elif cat_other == 'm':
return merge_maps(base, other)
else:
message = 'merge error ({}, {}): "{}" and "{}"'
raise ValueError(message.format(cat_base, cat_other, base, other))
elif cat_base == 's':
if cat_other == 's':
return merge_sequencies(base, other)
else:
return merge_sequencies(base, [other])
else: # cat_base == 'c'
if cat_other == 's':
return merge_sequencies([base], other)
elif cat_other == 'm':
message = 'merge error ({}, {}): "{}" and "{}"'
raise ValueError(message.format(cat_base, cat_other, base, other))
else:
return other
def merge_sequencies(s1, s2):
return type(s2)(unique(chain(s1, s2)))
def merge_maps(m1, m2):
return type(m2)(chain(m1.iteritems(), m2.iteritems()))
def merge_dicts_simple(base, other):
result = base.copy()
for key, value in (base or {}).iteritems():
result[key] = merge_config_values(result.get(key), value)
return result

View File

@ -341,14 +341,22 @@ class prioritylist(object):
return self.size
class enable_disable_list(list):
class toggle_set(set):
"""
A list that contains items to enable or disable something.
A prefix of ``~`` is used to denote disabling something, for example
the list ['apples', '~oranges', 'cherries'] enables both ``apples``
and ``cherries`` but disables ``oranges``.
"""
def merge_with(self, other):
new_self = copy(self)
return enable_disable_list.merge(other, new_self)
return toggle_set.merge(other, new_self)
def merge_into(self, other):
other = copy(other)
return enable_disable_list.merge(self, other)
return toggle_set.merge(self, other)
@staticmethod
def merge(source, dest):
@ -364,4 +372,33 @@ class enable_disable_list(list):
return dest
def values(self):
"""
returns a list of enabled items.
"""
return [item for item in self if not item.startswith('~')]
def conflicts_with(self, other):
"""
Checks if any items in ``other`` conflict with items already in this list.
Args:
other (list): The list to be checked against
Returns:
A list of items in ``other`` that conflict with items in this list
"""
conflicts = []
for item in other:
if item.startswith('~') and item[1:] in self:
conflicts.append(item)
if not item.startswith('~') and ('~' + item) in self:
conflicts.append(item)
return conflicts
class ID(str):
def merge_with(self, other):
return '_'.join(self, other)
def merge_into(self, other):
return '_'.join(other, self)