mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-02-20 20:09:11 +00:00
commit
067f76adf3
2
setup.py
2
setup.py
@ -82,7 +82,7 @@ params = dict(
|
||||
],
|
||||
extras_require={
|
||||
'other': ['jinja2', 'pandas>=0.13.1'],
|
||||
'test': ['nose'],
|
||||
'test': ['nose', 'mock'],
|
||||
'mongodb': ['pymongo'],
|
||||
'notify': ['notify2'],
|
||||
'doc': ['sphinx'],
|
||||
|
@ -14,10 +14,11 @@
|
||||
#
|
||||
|
||||
from wlauto.core.configuration import settings # NOQA
|
||||
from wlauto.core.device_manager import DeviceManager, RuntimeParameter, CoreParameter # NOQA
|
||||
from wlauto.core.device_manager import DeviceManager # NOQA
|
||||
from wlauto.core.command import Command # NOQA
|
||||
from wlauto.core.workload import Workload # NOQA
|
||||
from wlauto.core.plugin import Parameter, Artifact, Alias # NOQA
|
||||
from wlauto.core.plugin import Artifact, Alias # NOQA
|
||||
from wlauto.core.configuration.configuration import ConfigurationPoint as Parameter
|
||||
import wlauto.core.pluginloader as PluginLoader # NOQA
|
||||
from wlauto.core.instrumentation import Instrument # NOQA
|
||||
from wlauto.core.result import ResultProcessor, IterationResult # NOQA
|
||||
|
@ -29,7 +29,7 @@ import yaml
|
||||
|
||||
from wlauto import PluginLoader, Command, settings
|
||||
from wlauto.exceptions import CommandError, ConfigError
|
||||
from wlauto.utils.cli import init_argument_parser
|
||||
from wlauto.core.command import init_argument_parser
|
||||
from wlauto.utils.misc import (capitalize, check_output,
|
||||
ensure_file_directory_exists as _f, ensure_directory_exists as _d)
|
||||
from wlauto.utils.types import identifier
|
||||
|
@ -23,7 +23,6 @@ from wlauto.common.resources import Executable
|
||||
from wlauto.core.resource import NO_ONE
|
||||
from wlauto.core.resolver import ResourceResolver
|
||||
from wlauto.core.configuration import RunConfiguration
|
||||
from wlauto.core.agenda import Agenda
|
||||
from wlauto.common.android.workload import ApkWorkload
|
||||
|
||||
|
||||
|
@ -20,11 +20,11 @@ import shutil
|
||||
|
||||
import wlauto
|
||||
from wlauto import Command, settings
|
||||
from wlauto.core.agenda import Agenda
|
||||
from wlauto.core.execution import Executor
|
||||
from wlauto.utils.log import add_log_file
|
||||
from wlauto.core.configuration import RunConfiguration
|
||||
from wlauto.core.configuration import RunConfiguration, WAConfiguration
|
||||
from wlauto.core import pluginloader
|
||||
from wlauto.core.configuration_parsers import Agenda, ConfigFile, EnvrironmentVars, CommandLineArgs
|
||||
|
||||
|
||||
class RunCommand(Command):
|
||||
@ -32,6 +32,103 @@ class RunCommand(Command):
|
||||
name = 'run'
|
||||
description = 'Execute automated workloads on a remote device and process the resulting output.'
|
||||
|
||||
def initialize(self, context):
|
||||
self.parser.add_argument('agenda', metavar='AGENDA',
|
||||
help="""
|
||||
Agenda for this workload automation run. This defines which
|
||||
workloads will be executed, how many times, with which
|
||||
tunables, etc. See example agendas in {} for an example of
|
||||
how this file should be structured.
|
||||
""".format(os.path.dirname(wlauto.__file__)))
|
||||
self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None,
|
||||
help="""
|
||||
Specify a directory where the output will be generated. If
|
||||
the directory already exists, the script will abort unless -f
|
||||
option (see below) is used, in which case the contents of the
|
||||
directory will be overwritten. If this option is not specified,
|
||||
then {} will be used instead.
|
||||
""".format("settings.default_output_directory")) # TODO: Fix this!
|
||||
self.parser.add_argument('-f', '--force', action='store_true',
|
||||
help="""
|
||||
Overwrite output directory if it exists. By default, the script
|
||||
will abort in this situation to prevent accidental data loss.
|
||||
""")
|
||||
self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID',
|
||||
help="""
|
||||
Specify a workload spec ID from an agenda to run. If this is
|
||||
specified, only that particular spec will be run, and other
|
||||
workloads in the agenda will be ignored. This option may be
|
||||
used to specify multiple IDs.
|
||||
""")
|
||||
self.parser.add_argument('--disable', action='append', dest='instruments_to_disable',
|
||||
metavar='INSTRUMENT', help="""
|
||||
Specify an instrument to disable from the command line. This
|
||||
equivalent to adding "~{metavar}" to the instrumentation list in
|
||||
the agenda. This can be used to temporarily disable a troublesome
|
||||
instrument for a particular run without introducing permanent
|
||||
change to the config (which one might then forget to revert).
|
||||
This option may be specified multiple times.
|
||||
""")
|
||||
|
||||
def execute(self, args):
|
||||
|
||||
# STAGE 1: Gather configuratation
|
||||
|
||||
env = EnvrironmentVars()
|
||||
args = CommandLineArgs(args)
|
||||
|
||||
# STAGE 2.1a: Early WAConfiguration, required to find config files
|
||||
if env.user_directory:
|
||||
settings.set("user_directory", env.user_directory)
|
||||
if env.plugin_paths:
|
||||
settings.set("plugin_paths", env.plugin_paths)
|
||||
# STAGE 1 continued
|
||||
|
||||
# TODO: Check for config.py and convert to yaml, if it fails, warn user.
|
||||
configs = [ConfigFile(os.path.join(settings.user_directory, 'config.yaml'))]
|
||||
for c in args.config:
|
||||
configs.append(ConfigFile(c))
|
||||
agenda = Agenda(args.agenda)
|
||||
configs.append(Agenda.config)
|
||||
|
||||
# STAGE 2: Sending configuration to the correct place & merging in
|
||||
# order of priority.
|
||||
#
|
||||
# Priorities (lowest to highest):
|
||||
# - Enviroment Variables
|
||||
# - config.yaml from `user_directory`
|
||||
# - config files passed via command line
|
||||
# (the first specified will be the first to be applied)
|
||||
# - Agenda
|
||||
# - Command line configuration e.g. disabled instrumentation.
|
||||
|
||||
# STAGE 2.1b: WAConfiguration
|
||||
for config in configs:
|
||||
for config_point in settings.configuration.keys():
|
||||
if hasattr(config, config_point):
|
||||
settings.set(config_point, config.getattr(config_point))
|
||||
|
||||
|
||||
def _parse_config(self):
|
||||
pass
|
||||
|
||||
def _serialize_raw_config(self, env, args, agenda, configs):
|
||||
pod = {}
|
||||
pod['environment_variables'] = env.to_pod()
|
||||
pod['commandline_arguments'] = args.to_pod()
|
||||
pod['agenda'] = agenda.to_pod()
|
||||
pod['config_files'] = [c.to_pod() for c in configs]
|
||||
return pod
|
||||
|
||||
def _serialize_final_config(self):
|
||||
pass
|
||||
|
||||
|
||||
class OldRunCommand(Command):
|
||||
|
||||
name = 'old_run'
|
||||
description = 'Execute automated workloads on a remote device and process the resulting output.'
|
||||
|
||||
def initialize(self, context):
|
||||
self.parser.add_argument('agenda', metavar='AGENDA',
|
||||
help="""
|
||||
|
@ -34,3 +34,7 @@ class JarFile(FileResource):
|
||||
class ApkFile(FileResource):
|
||||
|
||||
name = 'apk'
|
||||
|
||||
def __init__(self, owner, version):
|
||||
super(ApkFile, self).__init__(owner)
|
||||
self.version = version
|
||||
|
@ -9,7 +9,7 @@ Add your configuration to that file instead.
|
||||
# configuration for WA and gives EXAMPLES of other configuration available. It
|
||||
# is not supposed to be an exhaustive list.
|
||||
# PLEASE REFER TO WA DOCUMENTATION FOR THE COMPLETE LIST OF AVAILABLE
|
||||
# EXTENSIONS AND THEIR CONFIGURATION.
|
||||
# EXTENSIONS AND THEIR configuration.
|
||||
|
||||
|
||||
# This defines when the device will be rebooted during Workload Automation execution. #
|
||||
|
@ -1,261 +0,0 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
from copy import copy
|
||||
from collections import OrderedDict, defaultdict
|
||||
import yaml
|
||||
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.utils.misc import load_struct_from_yaml, LoadSyntaxError
|
||||
from wlauto.utils.types import counter, reset_counter
|
||||
|
||||
|
||||
def get_aliased_param(d, aliases, default=None, pop=True):
|
||||
alias_map = [i for i, a in enumerate(aliases) if a in d]
|
||||
if len(alias_map) > 1:
|
||||
message = 'Only one of {} may be specified in a single entry'
|
||||
raise ConfigError(message.format(aliases))
|
||||
elif alias_map:
|
||||
if pop:
|
||||
return d.pop(aliases[alias_map[0]])
|
||||
else:
|
||||
return d[aliases[alias_map[0]]]
|
||||
else:
|
||||
return default
|
||||
|
||||
|
||||
class AgendaEntry(object):
|
||||
|
||||
def to_dict(self):
|
||||
return copy(self.__dict__)
|
||||
|
||||
|
||||
class AgendaWorkloadEntry(AgendaEntry):
|
||||
"""
|
||||
Specifies execution of a workload, including things like the number of
|
||||
iterations, device runtime_parameters configuration, etc.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(AgendaWorkloadEntry, self).__init__()
|
||||
self.id = kwargs.pop('id')
|
||||
self.workload_name = get_aliased_param(kwargs, ['workload_name', 'name'])
|
||||
if not self.workload_name:
|
||||
raise ConfigError('No workload name specified in entry {}'.format(self.id))
|
||||
self.label = kwargs.pop('label', self.workload_name)
|
||||
self.number_of_iterations = kwargs.pop('iterations', None)
|
||||
self.boot_parameters = get_aliased_param(kwargs,
|
||||
['boot_parameters', 'boot_params'],
|
||||
default=OrderedDict())
|
||||
self.runtime_parameters = get_aliased_param(kwargs,
|
||||
['runtime_parameters', 'runtime_params'],
|
||||
default=OrderedDict())
|
||||
self.workload_parameters = get_aliased_param(kwargs,
|
||||
['workload_parameters', 'workload_params', 'params'],
|
||||
default=OrderedDict())
|
||||
self.instrumentation = kwargs.pop('instrumentation', [])
|
||||
self.flash = kwargs.pop('flash', OrderedDict())
|
||||
self.classifiers = kwargs.pop('classifiers', OrderedDict())
|
||||
if kwargs:
|
||||
raise ConfigError('Invalid entry(ies) in workload {}: {}'.format(self.id, ', '.join(kwargs.keys())))
|
||||
|
||||
|
||||
class AgendaSectionEntry(AgendaEntry):
|
||||
"""
|
||||
Specifies execution of a workload, including things like the number of
|
||||
iterations, device runtime_parameters configuration, etc.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, agenda, **kwargs):
|
||||
super(AgendaSectionEntry, self).__init__()
|
||||
self.id = kwargs.pop('id')
|
||||
self.number_of_iterations = kwargs.pop('iterations', None)
|
||||
self.boot_parameters = get_aliased_param(kwargs,
|
||||
['boot_parameters', 'boot_params'],
|
||||
default=OrderedDict())
|
||||
self.runtime_parameters = get_aliased_param(kwargs,
|
||||
['runtime_parameters', 'runtime_params', 'params'],
|
||||
default=OrderedDict())
|
||||
self.workload_parameters = get_aliased_param(kwargs,
|
||||
['workload_parameters', 'workload_params'],
|
||||
default=OrderedDict())
|
||||
self.instrumentation = kwargs.pop('instrumentation', [])
|
||||
self.flash = kwargs.pop('flash', OrderedDict())
|
||||
self.classifiers = kwargs.pop('classifiers', OrderedDict())
|
||||
self.workloads = []
|
||||
for w in kwargs.pop('workloads', []):
|
||||
self.workloads.append(agenda.get_workload_entry(w))
|
||||
if kwargs:
|
||||
raise ConfigError('Invalid entry(ies) in section {}: {}'.format(self.id, ', '.join(kwargs.keys())))
|
||||
|
||||
def to_dict(self):
|
||||
d = copy(self.__dict__)
|
||||
d['workloads'] = [w.to_dict() for w in self.workloads]
|
||||
return d
|
||||
|
||||
|
||||
class AgendaGlobalEntry(AgendaEntry):
|
||||
"""
|
||||
Workload configuration global to all workloads.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(AgendaGlobalEntry, self).__init__()
|
||||
self.number_of_iterations = kwargs.pop('iterations', None)
|
||||
self.boot_parameters = get_aliased_param(kwargs,
|
||||
['boot_parameters', 'boot_params'],
|
||||
default=OrderedDict())
|
||||
self.runtime_parameters = get_aliased_param(kwargs,
|
||||
['runtime_parameters', 'runtime_params', 'params'],
|
||||
default=OrderedDict())
|
||||
self.workload_parameters = get_aliased_param(kwargs,
|
||||
['workload_parameters', 'workload_params'],
|
||||
default=OrderedDict())
|
||||
self.instrumentation = kwargs.pop('instrumentation', [])
|
||||
self.flash = kwargs.pop('flash', OrderedDict())
|
||||
self.classifiers = kwargs.pop('classifiers', OrderedDict())
|
||||
if kwargs:
|
||||
raise ConfigError('Invalid entries in global section: {}'.format(kwargs))
|
||||
|
||||
|
||||
class Agenda(object):
|
||||
|
||||
def __init__(self, source=None):
|
||||
self.filepath = None
|
||||
self.config = {}
|
||||
self.global_ = None
|
||||
self.sections = []
|
||||
self.workloads = []
|
||||
self._seen_ids = defaultdict(set)
|
||||
if source:
|
||||
try:
|
||||
reset_counter('section')
|
||||
reset_counter('workload')
|
||||
self._load(source)
|
||||
except (ConfigError, LoadSyntaxError, SyntaxError), e:
|
||||
raise ConfigError(str(e))
|
||||
|
||||
def add_workload_entry(self, w):
|
||||
entry = self.get_workload_entry(w)
|
||||
self.workloads.append(entry)
|
||||
|
||||
def get_workload_entry(self, w):
|
||||
if isinstance(w, basestring):
|
||||
w = {'name': w}
|
||||
if not isinstance(w, dict):
|
||||
raise ConfigError('Invalid workload entry: "{}" in {}'.format(w, self.filepath))
|
||||
self._assign_id_if_needed(w, 'workload')
|
||||
return AgendaWorkloadEntry(**w)
|
||||
|
||||
def _load(self, source): # pylint: disable=too-many-branches
|
||||
try:
|
||||
raw = self._load_raw_from_source(source)
|
||||
except ValueError as e:
|
||||
name = getattr(source, 'name', '')
|
||||
raise ConfigError('Error parsing agenda {}: {}'.format(name, e))
|
||||
if not isinstance(raw, dict):
|
||||
message = '{} does not contain a valid agenda structure; top level must be a dict.'
|
||||
raise ConfigError(message.format(self.filepath))
|
||||
for k, v in raw.iteritems():
|
||||
if v is None:
|
||||
raise ConfigError('Empty "{}" entry in {}'.format(k, self.filepath))
|
||||
|
||||
if k == 'config':
|
||||
if not isinstance(v, dict):
|
||||
raise ConfigError('Invalid agenda: "config" entry must be a dict')
|
||||
self.config = v
|
||||
elif k == 'global':
|
||||
self.global_ = AgendaGlobalEntry(**v)
|
||||
elif k == 'sections':
|
||||
self._collect_existing_ids(v, 'section')
|
||||
for s in v:
|
||||
if not isinstance(s, dict):
|
||||
raise ConfigError('Invalid section entry: "{}" in {}'.format(s, self.filepath))
|
||||
self._collect_existing_ids(s.get('workloads', []), 'workload')
|
||||
for s in v:
|
||||
self._assign_id_if_needed(s, 'section')
|
||||
self.sections.append(AgendaSectionEntry(self, **s))
|
||||
elif k == 'workloads':
|
||||
self._collect_existing_ids(v, 'workload')
|
||||
for w in v:
|
||||
self.workloads.append(self.get_workload_entry(w))
|
||||
else:
|
||||
raise ConfigError('Unexpected agenda entry "{}" in {}'.format(k, self.filepath))
|
||||
|
||||
def _load_raw_from_source(self, source):
|
||||
if hasattr(source, 'read') and hasattr(source, 'name'): # file-like object
|
||||
self.filepath = source.name
|
||||
raw = load_struct_from_yaml(text=source.read())
|
||||
elif isinstance(source, basestring):
|
||||
if os.path.isfile(source):
|
||||
self.filepath = source
|
||||
raw = load_struct_from_yaml(filepath=self.filepath)
|
||||
else: # assume YAML text
|
||||
raw = load_struct_from_yaml(text=source)
|
||||
else:
|
||||
raise ConfigError('Unknown agenda source: {}'.format(source))
|
||||
return raw
|
||||
|
||||
def _collect_existing_ids(self, ds, pool):
|
||||
# Collection needs to take place first so that auto IDs can be
|
||||
# correctly assigned, e.g. if someone explicitly specified an ID
|
||||
# of '1' for one of the workloads.
|
||||
for d in ds:
|
||||
if isinstance(d, dict) and 'id' in d:
|
||||
did = str(d['id'])
|
||||
if did in self._seen_ids[pool]:
|
||||
raise ConfigError('Duplicate {} ID: {}'.format(pool, did))
|
||||
self._seen_ids[pool].add(did)
|
||||
|
||||
def _assign_id_if_needed(self, d, pool):
|
||||
# Also enforces string IDs
|
||||
if d.get('id') is None:
|
||||
did = str(counter(pool))
|
||||
while did in self._seen_ids[pool]:
|
||||
did = str(counter(pool))
|
||||
d['id'] = did
|
||||
self._seen_ids[pool].add(did)
|
||||
else:
|
||||
d['id'] = str(d['id'])
|
||||
|
||||
|
||||
# Modifying the yaml parser to use an OrderedDict, rather then regular Python
|
||||
# dict for mappings. This preservers the order in which the items are
|
||||
# specified. See
|
||||
# http://stackoverflow.com/a/21048064
|
||||
|
||||
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
|
||||
|
||||
|
||||
def dict_representer(dumper, data):
|
||||
return dumper.represent_mapping(_mapping_tag, data.iteritems())
|
||||
|
||||
|
||||
def dict_constructor(loader, node):
|
||||
pairs = loader.construct_pairs(node)
|
||||
seen_keys = set()
|
||||
for k, _ in pairs:
|
||||
if k in seen_keys:
|
||||
raise ValueError('Duplicate entry: {}'.format(k))
|
||||
seen_keys.add(k)
|
||||
return OrderedDict(pairs)
|
||||
|
||||
|
||||
yaml.add_representer(OrderedDict, dict_representer)
|
||||
yaml.add_constructor(_mapping_tag, dict_constructor)
|
@ -16,8 +16,16 @@
|
||||
import textwrap
|
||||
|
||||
from wlauto.core.plugin import Plugin
|
||||
from wlauto.core.entry_point import init_argument_parser
|
||||
from wlauto.utils.doc import format_body
|
||||
from wlauto.core.version import get_wa_version
|
||||
|
||||
|
||||
def init_argument_parser(parser):
|
||||
parser.add_argument('-c', '--config', help='specify an additional config.py', action='append')
|
||||
parser.add_argument('-v', '--verbose', action='count',
|
||||
help='The scripts will produce verbose output.')
|
||||
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(get_wa_version()))
|
||||
return parser
|
||||
|
||||
|
||||
class Command(Plugin):
|
||||
|
File diff suppressed because it is too large
Load Diff
20
wlauto/core/configuration/__init__.py
Normal file
20
wlauto/core/configuration/__init__.py
Normal file
@ -0,0 +1,20 @@
|
||||
# Copyright 2013-2016 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from wlauto.core.configuration.configuration import (settings,
|
||||
WAConfiguration,
|
||||
RunConfiguration,
|
||||
JobGenerator,
|
||||
ConfigurationPoint)
|
||||
from wlauto.core.configuration.plugin_cache import PluginCache
|
1032
wlauto/core/configuration/configuration.py
Normal file
1032
wlauto/core/configuration/configuration.py
Normal file
File diff suppressed because it is too large
Load Diff
42
wlauto/core/configuration/default.py
Normal file
42
wlauto/core/configuration/default.py
Normal file
@ -0,0 +1,42 @@
|
||||
from wlauto.core.configuration.configuration import WAConfiguration, RunConfiguration
|
||||
from wlauto.core.configuration.plugin_cache import PluginCache
|
||||
from wlauto.utils.serializer import yaml
|
||||
from wlauto.utils.doc import strip_inlined_text
|
||||
|
||||
DEFAULT_INSTRUMENTS = ['execution_time',
|
||||
'interrupts',
|
||||
'cpufreq',
|
||||
'status',
|
||||
'standard',
|
||||
'csv']
|
||||
|
||||
|
||||
def _format_yaml_comment(param, short_description=False):
|
||||
comment = param.description
|
||||
comment = strip_inlined_text(comment)
|
||||
if short_description:
|
||||
comment = comment.split('\n\n')[0]
|
||||
comment = comment.replace('\n', '\n# ')
|
||||
comment = "# {}\n".format(comment)
|
||||
return comment
|
||||
|
||||
|
||||
def _format_instruments(output):
|
||||
plugin_cache = PluginCache()
|
||||
output.write("instrumentation:\n")
|
||||
for plugin in DEFAULT_INSTRUMENTS:
|
||||
plugin_cls = plugin_cache.loader.get_plugin_class(plugin)
|
||||
output.writelines(_format_yaml_comment(plugin_cls, short_description=True))
|
||||
output.write(" - {}\n".format(plugin))
|
||||
output.write("\n")
|
||||
|
||||
|
||||
def generate_default_config(path):
|
||||
with open(path, 'w') as output:
|
||||
for param in WAConfiguration.config_points + RunConfiguration.config_points:
|
||||
entry = {param.name: param.default}
|
||||
comment = _format_yaml_comment(param)
|
||||
output.writelines(comment)
|
||||
yaml.dump(entry, output, default_flow_style=False)
|
||||
output.write("\n")
|
||||
_format_instruments(output)
|
308
wlauto/core/configuration/parsers.py
Normal file
308
wlauto/core/configuration/parsers.py
Normal file
@ -0,0 +1,308 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.utils.serializer import read_pod, SerializerSyntaxError
|
||||
from wlauto.utils.types import toggle_set, counter
|
||||
from wlauto.core.configuration.configuration import JobSpec
|
||||
|
||||
########################
|
||||
### Helper functions ###
|
||||
########################
|
||||
|
||||
DUPLICATE_ENTRY_ERROR = 'Only one of {} may be specified in a single entry'
|
||||
|
||||
|
||||
def get_aliased_param(cfg_point, d, default=None, pop=True):
|
||||
"""
|
||||
Given a ConfigurationPoint and a dict, this function will search the dict for
|
||||
the ConfigurationPoint's name/aliases. If more than one is found it will raise
|
||||
a ConfigError. If one (and only one) is found then it will return the value
|
||||
for the ConfigurationPoint. If the name or aliases are present in the dict it will
|
||||
return the "default" parameter of this function.
|
||||
"""
|
||||
aliases = [cfg_point.name] + cfg_point.aliases
|
||||
alias_map = [a for a in aliases if a in d]
|
||||
if len(alias_map) > 1:
|
||||
raise ConfigError(DUPLICATE_ENTRY_ERROR.format(aliases))
|
||||
elif alias_map:
|
||||
if pop:
|
||||
return d.pop(alias_map[0])
|
||||
else:
|
||||
return d[alias_map[0]]
|
||||
else:
|
||||
return default
|
||||
|
||||
|
||||
def _load_file(filepath, error_name):
|
||||
if not os.path.isfile(filepath):
|
||||
raise ValueError("{} does not exist".format(filepath))
|
||||
try:
|
||||
raw = read_pod(filepath)
|
||||
except SerializerSyntaxError as e:
|
||||
raise ConfigError('Error parsing {} {}: {}'.format(error_name, filepath, e))
|
||||
if not isinstance(raw, dict):
|
||||
message = '{} does not contain a valid {} structure; top level must be a dict.'
|
||||
raise ConfigError(message.format(filepath, error_name))
|
||||
return raw
|
||||
|
||||
|
||||
def merge_result_processors_instruments(raw):
|
||||
instruments = toggle_set(get_aliased_param(JobSpec.configuration['instrumentation'],
|
||||
raw, default=[]))
|
||||
result_processors = toggle_set(raw.pop('result_processors', []))
|
||||
if instruments and result_processors:
|
||||
conflicts = instruments.conflicts_with(result_processors)
|
||||
if conflicts:
|
||||
msg = '"instrumentation" and "result_processors" have conflicting entries: {}'
|
||||
entires = ', '.join('"{}"'.format(c.strip("~")) for c in conflicts)
|
||||
raise ConfigError(msg.format(entires))
|
||||
raw['instrumentation'] = instruments.merge_with(result_processors)
|
||||
|
||||
|
||||
def _construct_valid_entry(raw, seen_ids, counter_name, jobs_config):
|
||||
entries = {}
|
||||
|
||||
# Generate an automatic ID if the entry doesn't already have one
|
||||
if "id" not in raw:
|
||||
while True:
|
||||
new_id = "{}{}".format(counter_name, counter(name=counter_name))
|
||||
if new_id not in seen_ids:
|
||||
break
|
||||
entries["id"] = new_id
|
||||
seen_ids.add(new_id)
|
||||
else:
|
||||
entries["id"] = raw.pop("id")
|
||||
|
||||
# Process instrumentation
|
||||
merge_result_processors_instruments(raw)
|
||||
|
||||
# Validate all entries
|
||||
for name, cfg_point in JobSpec.configuration.iteritems():
|
||||
value = get_aliased_param(cfg_point, raw)
|
||||
if value is not None:
|
||||
value = cfg_point.kind(value)
|
||||
cfg_point.validate_value(name, value)
|
||||
entries[name] = value
|
||||
entries["workload_parameters"] = raw.pop("workload_parameters", None)
|
||||
entries["runtime_parameters"] = raw.pop("runtime_parameters", None)
|
||||
entries["boot_parameters"] = raw.pop("boot_parameters", None)
|
||||
|
||||
if "instrumentation" in entries:
|
||||
jobs_config.update_enabled_instruments(entries["instrumentation"])
|
||||
|
||||
# error if there are unknown entries
|
||||
if raw:
|
||||
msg = 'Invalid entry(ies) in "{}": "{}"'
|
||||
raise ConfigError(msg.format(entries['id'], ', '.join(raw.keys())))
|
||||
|
||||
return entries
|
||||
|
||||
|
||||
def _collect_valid_id(entry_id, seen_ids, entry_type):
|
||||
if entry_id is None:
|
||||
return
|
||||
if entry_id in seen_ids:
|
||||
raise ConfigError('Duplicate {} ID "{}".'.format(entry_type, entry_id))
|
||||
# "-" is reserved for joining section and workload IDs
|
||||
if "-" in entry_id:
|
||||
msg = 'Invalid {} ID "{}"; IDs cannot contain a "-"'
|
||||
raise ConfigError(msg.format(entry_type, entry_id))
|
||||
if entry_id == "global":
|
||||
msg = 'Invalid {} ID "global"; is a reserved ID'
|
||||
raise ConfigError(msg.format(entry_type))
|
||||
seen_ids.add(entry_id)
|
||||
|
||||
|
||||
def _resolve_params_alias(entry, param_alias):
|
||||
possible_names = {"params", "{}_params".format(param_alias), "{}_parameters".format(param_alias)}
|
||||
duplicate_entries = possible_names.intersection(set(entry.keys()))
|
||||
if len(duplicate_entries) > 1:
|
||||
raise ConfigError(DUPLICATE_ENTRY_ERROR.format(list(possible_names)))
|
||||
for name in duplicate_entries:
|
||||
entry["{}_parameters".format(param_alias)] = entry.pop(name)
|
||||
|
||||
|
||||
def _get_workload_entry(workload):
|
||||
if isinstance(workload, basestring):
|
||||
workload = {'name': workload}
|
||||
elif not isinstance(workload, dict):
|
||||
raise ConfigError('Invalid workload entry: "{}"')
|
||||
return workload
|
||||
|
||||
|
||||
def _process_workload_entry(workload, seen_workload_ids, jobs_config):
|
||||
workload = _get_workload_entry(workload)
|
||||
_resolve_params_alias(workload, "workload")
|
||||
workload = _construct_valid_entry(workload, seen_workload_ids, "wk", jobs_config)
|
||||
return workload
|
||||
|
||||
###############
|
||||
### Parsers ###
|
||||
###############
|
||||
|
||||
|
||||
class ConfigParser(object):
|
||||
|
||||
def __init__(self, wa_config, run_config, jobs_config, plugin_cache):
|
||||
self.wa_config = wa_config
|
||||
self.run_config = run_config
|
||||
self.jobs_config = jobs_config
|
||||
self.plugin_cache = plugin_cache
|
||||
|
||||
def load_from_path(self, filepath):
|
||||
self.load(_load_file(filepath, "Config"), filepath)
|
||||
|
||||
def load(self, raw, source, wrap_exceptions=True): # pylint: disable=too-many-branches
|
||||
try:
|
||||
if 'run_name' in raw:
|
||||
msg = '"run_name" can only be specified in the config section of an agenda'
|
||||
raise ConfigError(msg)
|
||||
if 'id' in raw:
|
||||
raise ConfigError('"id" cannot be set globally')
|
||||
|
||||
merge_result_processors_instruments(raw)
|
||||
|
||||
# Get WA core configuration
|
||||
for cfg_point in self.wa_config.configuration.itervalues():
|
||||
value = get_aliased_param(cfg_point, raw)
|
||||
if value is not None:
|
||||
self.wa_config.set(cfg_point.name, value)
|
||||
|
||||
# Get run specific configuration
|
||||
for cfg_point in self.run_config.configuration.itervalues():
|
||||
value = get_aliased_param(cfg_point, raw)
|
||||
if value is not None:
|
||||
self.run_config.set(cfg_point.name, value)
|
||||
|
||||
# Get global job spec configuration
|
||||
for cfg_point in JobSpec.configuration.itervalues():
|
||||
value = get_aliased_param(cfg_point, raw)
|
||||
if value is not None:
|
||||
self.jobs_config.set_global_value(cfg_point.name, value)
|
||||
|
||||
for name, values in raw.iteritems():
|
||||
# Assume that all leftover config is for a plug-in or a global
|
||||
# alias it is up to PluginCache to assert this assumption
|
||||
self.plugin_cache.add_configs(name, values, source)
|
||||
|
||||
except ConfigError as e:
|
||||
if wrap_exceptions:
|
||||
raise ConfigError('Error in "{}":\n{}'.format(source, str(e)))
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
class AgendaParser(object):
|
||||
|
||||
def __init__(self, wa_config, run_config, jobs_config, plugin_cache):
|
||||
self.wa_config = wa_config
|
||||
self.run_config = run_config
|
||||
self.jobs_config = jobs_config
|
||||
self.plugin_cache = plugin_cache
|
||||
|
||||
def load_from_path(self, filepath):
|
||||
raw = _load_file(filepath, 'Agenda')
|
||||
self.load(raw, filepath)
|
||||
|
||||
def load(self, raw, source): # pylint: disable=too-many-branches, too-many-locals
|
||||
try:
|
||||
if not isinstance(raw, dict):
|
||||
raise ConfigError('Invalid agenda, top level entry must be a dict')
|
||||
|
||||
# PHASE 1: Populate and validate configuration.
|
||||
for name in ['config', 'global']:
|
||||
entry = raw.pop(name, {})
|
||||
if not isinstance(entry, dict):
|
||||
raise ConfigError('Invalid entry "{}" - must be a dict'.format(name))
|
||||
if 'run_name' in entry:
|
||||
self.run_config.set('run_name', entry.pop('run_name'))
|
||||
config_parser = ConfigParser(self.wa_config, self.run_config,
|
||||
self.jobs_config, self.plugin_cache)
|
||||
config_parser.load(entry, source, wrap_exceptions=False)
|
||||
|
||||
# PHASE 2: Getting "section" and "workload" entries.
|
||||
sections = raw.pop("sections", [])
|
||||
if not isinstance(sections, list):
|
||||
raise ConfigError('Invalid entry "sections" - must be a list')
|
||||
global_workloads = raw.pop("workloads", [])
|
||||
if not isinstance(global_workloads, list):
|
||||
raise ConfigError('Invalid entry "workloads" - must be a list')
|
||||
if raw:
|
||||
msg = 'Invalid top level agenda entry(ies): "{}"'
|
||||
raise ConfigError(msg.format('", "'.join(raw.keys())))
|
||||
|
||||
# PHASE 3: Collecting existing workload and section IDs
|
||||
seen_section_ids = set()
|
||||
seen_workload_ids = set()
|
||||
|
||||
for workload in global_workloads:
|
||||
workload = _get_workload_entry(workload)
|
||||
_collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
|
||||
|
||||
for section in sections:
|
||||
_collect_valid_id(section.get("id"), seen_section_ids, "section")
|
||||
for workload in section["workloads"] if "workloads" in section else []:
|
||||
workload = _get_workload_entry(workload)
|
||||
_collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
|
||||
|
||||
# PHASE 4: Assigning IDs and validating entries
|
||||
# TODO: Error handling for workload errors vs section errors ect
|
||||
for workload in global_workloads:
|
||||
self.jobs_config.add_workload(_process_workload_entry(workload,
|
||||
seen_workload_ids,
|
||||
self.jobs_config))
|
||||
|
||||
for section in sections:
|
||||
workloads = []
|
||||
for workload in section.pop("workloads", []):
|
||||
workloads.append(_process_workload_entry(workload,
|
||||
seen_workload_ids,
|
||||
self.jobs_config))
|
||||
|
||||
_resolve_params_alias(section, seen_section_ids)
|
||||
section = _construct_valid_entry(section, seen_section_ids, "s", self.jobs_config)
|
||||
self.jobs_config.add_section(section, workloads)
|
||||
|
||||
return seen_workload_ids, seen_section_ids
|
||||
except (ConfigError, SerializerSyntaxError) as e:
|
||||
raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
|
||||
|
||||
|
||||
class EnvironmentVarsParser(object):
|
||||
def __init__(self, wa_config, environ):
|
||||
user_directory = environ.pop('WA_USER_DIRECTORY', '')
|
||||
if user_directory:
|
||||
wa_config.set('user_directory', user_directory)
|
||||
plugin_paths = environ.pop('WA_PLUGIN_PATHS', '')
|
||||
if plugin_paths:
|
||||
wa_config.set('plugin_paths', plugin_paths.split(os.pathsep))
|
||||
ext_paths = environ.pop('WA_EXTENSION_PATHS', '')
|
||||
if ext_paths:
|
||||
wa_config.set('plugin_paths', ext_paths.split(os.pathsep))
|
||||
|
||||
|
||||
# Command line options are parsed in the "run" command. This is used to send
|
||||
# certain arguments to the correct configuration points and keep a record of
|
||||
# how WA was invoked
|
||||
class CommandLineArgsParser(object):
|
||||
def __init__(self, cmd_args, wa_config, jobs_config):
|
||||
wa_config.set("verbosity", cmd_args.verbosity)
|
||||
# TODO: Is this correct? Does there need to be a third output dir param
|
||||
disabled_instruments = toggle_set(["~{}".format(i) for i in cmd_args.instruments_to_disable])
|
||||
jobs_config.disable_instruments(disabled_instruments)
|
||||
jobs_config.only_run_ids(cmd_args.only_run_ids)
|
196
wlauto/core/configuration/plugin_cache.py
Normal file
196
wlauto/core/configuration/plugin_cache.py
Normal file
@ -0,0 +1,196 @@
|
||||
# Copyright 2016 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from copy import copy
|
||||
from collections import defaultdict
|
||||
|
||||
from wlauto.core import pluginloader
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.utils.types import obj_dict
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
GENERIC_CONFIGS = ["device_config", "workload_parameters",
|
||||
"boot_parameters", "runtime_parameters"]
|
||||
|
||||
|
||||
class PluginCache(object):
|
||||
"""
|
||||
The plugin cache is used to store configuration that cannot be processed at
|
||||
this stage, whether thats because it is unknown if its needed
|
||||
(in the case of disabled plug-ins) or it is not know what it belongs to (in
|
||||
the case of "device-config" ect.). It also maintains where configuration came
|
||||
from, and the priority order of said sources.
|
||||
"""
|
||||
|
||||
def __init__(self, loader=pluginloader):
|
||||
self.loader = loader
|
||||
self.sources = []
|
||||
self.plugin_configs = defaultdict(lambda: defaultdict(dict))
|
||||
self.global_alias_values = defaultdict(dict)
|
||||
|
||||
# Generate a mapping of what global aliases belong to
|
||||
self._global_alias_map = defaultdict(dict)
|
||||
self._list_of_global_aliases = set()
|
||||
for plugin in self.loader.list_plugins():
|
||||
for param in plugin.parameters:
|
||||
if param.global_alias:
|
||||
self._global_alias_map[plugin.name][param.global_alias] = param
|
||||
self._list_of_global_aliases.add(param.global_alias)
|
||||
|
||||
def add_source(self, source):
|
||||
if source in self.sources:
|
||||
raise Exception("Source has already been added.")
|
||||
self.sources.append(source)
|
||||
|
||||
def add_global_alias(self, alias, value, source):
|
||||
if source not in self.sources:
|
||||
msg = "Source '{}' has not been added to the plugin cache."
|
||||
raise RuntimeError(msg.format(source))
|
||||
|
||||
if not self.is_global_alias(alias):
|
||||
msg = "'{} is not a valid global alias'"
|
||||
raise RuntimeError(msg.format(alias))
|
||||
|
||||
self.global_alias_values[alias][source] = value
|
||||
|
||||
def add_configs(self, plugin_name, values, source):
|
||||
if self.is_global_alias(plugin_name):
|
||||
self.add_global_alias(plugin_name, values, source)
|
||||
return
|
||||
for name, value in values.iteritems():
|
||||
self.add_config(plugin_name, name, value, source)
|
||||
|
||||
def add_config(self, plugin_name, name, value, source):
|
||||
if source not in self.sources:
|
||||
msg = "Source '{}' has not been added to the plugin cache."
|
||||
raise RuntimeError(msg.format(source))
|
||||
|
||||
if not self.loader.has_plugin(plugin_name) and plugin_name not in GENERIC_CONFIGS:
|
||||
msg = 'configuration provided for unknown plugin "{}"'
|
||||
raise ConfigError(msg.format(plugin_name))
|
||||
|
||||
if (plugin_name not in GENERIC_CONFIGS and
|
||||
name not in self.get_plugin_parameters(plugin_name)):
|
||||
msg = "'{}' is not a valid parameter for '{}'"
|
||||
raise ConfigError(msg.format(name, plugin_name))
|
||||
|
||||
self.plugin_configs[plugin_name][source][name] = value
|
||||
|
||||
def is_global_alias(self, name):
|
||||
return name in self._list_of_global_aliases
|
||||
|
||||
def get_plugin_config(self, plugin_name, generic_name=None):
|
||||
config = obj_dict(not_in_dict=['name'])
|
||||
config.name = plugin_name
|
||||
|
||||
# Load plugin defaults
|
||||
cfg_points = self.get_plugin_parameters(plugin_name)
|
||||
for cfg_point in cfg_points.itervalues():
|
||||
cfg_point.set_value(config, check_mandatory=False)
|
||||
|
||||
# Merge global aliases
|
||||
for alias, param in self._global_alias_map[plugin_name].iteritems():
|
||||
if alias in self.global_alias_values:
|
||||
for source in self.sources:
|
||||
if source not in self.global_alias_values[alias]:
|
||||
continue
|
||||
param.set_value(config, value=self.global_alias_values[alias][source])
|
||||
|
||||
# Merge user config
|
||||
# Perform a simple merge with the order of sources representing priority
|
||||
if generic_name is None:
|
||||
plugin_config = self.plugin_configs[plugin_name]
|
||||
for source in self.sources:
|
||||
if source not in plugin_config:
|
||||
continue
|
||||
for name, value in plugin_config[source].iteritems():
|
||||
cfg_points[name].set_value(config, value=value)
|
||||
# A more complicated merge that involves priority of sources and specificity
|
||||
else:
|
||||
self._merge_using_priority_specificity(plugin_name, generic_name, config)
|
||||
|
||||
return config
|
||||
|
||||
@memoized
|
||||
def get_plugin_parameters(self, name):
|
||||
params = self.loader.get_plugin_class(name).parameters
|
||||
return {param.name: param for param in params}
|
||||
|
||||
# pylint: disable=too-many-nested-blocks, too-many-branches
|
||||
def _merge_using_priority_specificity(self, specific_name, generic_name, final_config):
|
||||
"""
|
||||
WA configuration can come from various sources of increasing priority, as well
|
||||
as being specified in a generic and specific manner (e.g. ``device_config``
|
||||
and ``nexus10`` respectivly). WA has two rules for the priority of configuration:
|
||||
|
||||
- Configuration from higher priority sources overrides configuration from
|
||||
lower priority sources.
|
||||
- More specific configuration overrides less specific configuration.
|
||||
|
||||
There is a situation where these two rules come into conflict. When a generic
|
||||
configuration is given in config source of high priority and a specific
|
||||
configuration is given in a config source of lower priority. In this situation
|
||||
it is not possible to know the end users intention and WA will error.
|
||||
|
||||
:param generic_name: The name of the generic configuration e.g ``device_config``
|
||||
:param specific_name: The name of the specific configuration used, e.g ``nexus10``
|
||||
:param cfg_point: A dict of ``ConfigurationPoint``s to be used when merging configuration.
|
||||
keys=config point name, values=config point
|
||||
|
||||
:rtype: A fully merged and validated configuration in the form of a obj_dict.
|
||||
"""
|
||||
generic_config = copy(self.plugin_configs[generic_name])
|
||||
specific_config = copy(self.plugin_configs[specific_name])
|
||||
cfg_points = self.get_plugin_parameters(specific_name)
|
||||
sources = self.sources
|
||||
seen_specific_config = defaultdict(list)
|
||||
|
||||
# set_value uses the 'name' attribute of the passed object in it error
|
||||
# messages, to ensure these messages make sense the name will have to be
|
||||
# changed several times during this function.
|
||||
final_config.name = specific_name
|
||||
|
||||
# pylint: disable=too-many-nested-blocks
|
||||
for source in sources:
|
||||
try:
|
||||
if source in generic_config:
|
||||
final_config.name = generic_name
|
||||
for name, cfg_point in cfg_points.iteritems():
|
||||
if name in generic_config[source]:
|
||||
if name in seen_specific_config:
|
||||
msg = ('"{generic_name}" configuration "{config_name}" has already been '
|
||||
'specified more specifically for {specific_name} in:\n\t\t{sources}')
|
||||
msg = msg.format(generic_name=generic_name,
|
||||
config_name=name,
|
||||
specific_name=specific_name,
|
||||
sources=", ".join(seen_specific_config[name]))
|
||||
raise ConfigError(msg)
|
||||
value = generic_config[source][name]
|
||||
cfg_point.set_value(final_config, value, check_mandatory=False)
|
||||
|
||||
if source in specific_config:
|
||||
final_config.name = specific_name
|
||||
for name, cfg_point in cfg_points.iteritems():
|
||||
if name in specific_config[source]:
|
||||
seen_specific_config[name].append(str(source))
|
||||
value = specific_config[source][name]
|
||||
cfg_point.set_value(final_config, value, check_mandatory=False)
|
||||
|
||||
except ConfigError as e:
|
||||
raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
|
||||
|
||||
# Validate final configuration
|
||||
final_config.name = specific_name
|
||||
for cfg_point in cfg_points.itervalues():
|
||||
cfg_point.validate(final_config)
|
89
wlauto/core/configuration/tree.py
Normal file
89
wlauto/core/configuration/tree.py
Normal file
@ -0,0 +1,89 @@
|
||||
# Copyright 2016 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class JobSpecSource(object):
|
||||
|
||||
kind = ""
|
||||
|
||||
def __init__(self, config, parent=None):
|
||||
self.config = config
|
||||
self.parent = parent
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.config['id']
|
||||
|
||||
def name(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class WorkloadEntry(JobSpecSource):
|
||||
kind = "workload"
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
if self.parent.id == "global":
|
||||
return 'workload "{}"'.format(self.id)
|
||||
else:
|
||||
return 'workload "{}" from section "{}"'.format(self.id, self.parent.id)
|
||||
|
||||
|
||||
class SectionNode(JobSpecSource):
|
||||
|
||||
kind = "section"
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
if self.id == "global":
|
||||
return "globally specified configuration"
|
||||
else:
|
||||
return 'section "{}"'.format(self.id)
|
||||
|
||||
@property
|
||||
def is_leaf(self):
|
||||
return not bool(self.children)
|
||||
|
||||
def __init__(self, config, parent=None):
|
||||
super(SectionNode, self).__init__(config, parent=parent)
|
||||
self.workload_entries = []
|
||||
self.children = []
|
||||
|
||||
def add_section(self, section):
|
||||
new_node = SectionNode(section, parent=self)
|
||||
self.children.append(new_node)
|
||||
return new_node
|
||||
|
||||
def add_workload(self, workload_config):
|
||||
self.workload_entries.append(WorkloadEntry(workload_config, self))
|
||||
|
||||
def descendants(self):
|
||||
for child in self.children:
|
||||
for n in child.descendants():
|
||||
yield n
|
||||
yield child
|
||||
|
||||
def ancestors(self):
|
||||
if self.parent is not None:
|
||||
yield self.parent
|
||||
for ancestor in self.parent.ancestors():
|
||||
yield ancestor
|
||||
|
||||
def leaves(self):
|
||||
if self.is_leaf:
|
||||
yield self
|
||||
else:
|
||||
for n in self.descendants():
|
||||
if n.is_leaf:
|
||||
yield n
|
@ -1,7 +1,8 @@
|
||||
import string
|
||||
from collections import OrderedDict
|
||||
from copy import copy
|
||||
|
||||
from wlauto.core.plugin import Plugin, Parameter
|
||||
from wlauto.core.configuration.configuration import RuntimeParameter
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.utils.types import list_of_integers, list_of, caseless_string
|
||||
|
||||
@ -10,56 +11,7 @@ from devlib.target import AndroidTarget, Cpuinfo, KernelVersion, KernelConfig
|
||||
|
||||
__all__ = ['RuntimeParameter', 'CoreParameter', 'DeviceManager', 'TargetInfo']
|
||||
|
||||
|
||||
class RuntimeParameter(object):
|
||||
"""
|
||||
A runtime parameter which has its getter and setter methods associated it
|
||||
with it.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, name, getter, setter,
|
||||
getter_args=None, setter_args=None,
|
||||
value_name='value', override=False):
|
||||
"""
|
||||
:param name: the name of the parameter.
|
||||
:param getter: the getter method which returns the value of this parameter.
|
||||
:param setter: the setter method which sets the value of this parameter. The setter
|
||||
always expects to be passed one argument when it is called.
|
||||
:param getter_args: keyword arguments to be used when invoking the getter.
|
||||
:param setter_args: keyword arguments to be used when invoking the setter.
|
||||
:param override: A ``bool`` that specifies whether a parameter of the same name further up the
|
||||
hierarchy should be overridden. If this is ``False`` (the default), an exception
|
||||
will be raised by the ``AttributeCollection`` instead.
|
||||
|
||||
"""
|
||||
self.name = name
|
||||
self.getter = getter
|
||||
self.setter = setter
|
||||
self.getter_args = getter_args or {}
|
||||
self.setter_args = setter_args or {}
|
||||
self.value_name = value_name
|
||||
self.override = override
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
class CoreParameter(RuntimeParameter):
|
||||
"""A runtime parameter that will get expanded into a RuntimeParameter for each core type."""
|
||||
|
||||
def get_runtime_parameters(self, core_names):
|
||||
params = []
|
||||
for core in set(core_names):
|
||||
name = string.Template(self.name).substitute(core=core)
|
||||
getter = string.Template(self.getter).substitute(core=core)
|
||||
setter = string.Template(self.setter).substitute(core=core)
|
||||
getargs = dict(self.getter_args.items() + [('core', core)])
|
||||
setargs = dict(self.setter_args.items() + [('core', core)])
|
||||
params.append(RuntimeParameter(name, getter, setter, getargs, setargs, self.value_name, self.override))
|
||||
return params
|
||||
UNKOWN_RTP = 'Unknown runtime parameter "{}"'
|
||||
|
||||
|
||||
class TargetInfo(object):
|
||||
@ -174,22 +126,13 @@ class DeviceManager(Plugin):
|
||||
]
|
||||
modules = []
|
||||
|
||||
runtime_parameters = [
|
||||
RuntimeParameter('sysfile_values', 'get_sysfile_values', 'set_sysfile_values', value_name='params'),
|
||||
CoreParameter('${core}_cores', 'get_number_of_online_cpus', 'set_number_of_online_cpus',
|
||||
value_name='number'),
|
||||
CoreParameter('${core}_min_frequency', 'get_core_min_frequency', 'set_core_min_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_max_frequency', 'get_core_max_frequency', 'set_core_max_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_frequency', 'get_core_cur_frequency', 'set_core_cur_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_governor', 'get_core_governor', 'set_core_governor',
|
||||
value_name='governor'),
|
||||
CoreParameter('${core}_governor_tunables', 'get_core_governor_tunables', 'set_core_governor_tunables',
|
||||
value_name='tunables'),
|
||||
runtime_parameter_managers = [
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
super(DeviceManager, self).__init__()
|
||||
self.runtime_parameter_values = None
|
||||
|
||||
# Framework
|
||||
|
||||
def connect(self):
|
||||
@ -211,57 +154,37 @@ class DeviceManager(Plugin):
|
||||
|
||||
# Runtime Parameters
|
||||
|
||||
def get_runtime_parameter_names(self):
|
||||
return [p.name for p in self._expand_runtime_parameters()]
|
||||
def merge_runtime_parameters(self, params):
|
||||
merged_values = {}
|
||||
for source, values in params.iteritems():
|
||||
for name, value in values:
|
||||
for rtpm in self.runtime_parameter_managers:
|
||||
if rtpm.match(name):
|
||||
rtpm.update_value(name, value, source, merged_values)
|
||||
break
|
||||
else:
|
||||
msg = 'Unknown runtime parameter "{}" in "{}"'
|
||||
raise ConfigError(msg.format(name, source))
|
||||
return merged_values
|
||||
|
||||
def get_runtime_parameters(self):
|
||||
""" returns the runtime parameters that have been set. """
|
||||
# pylint: disable=cell-var-from-loop
|
||||
runtime_parameters = OrderedDict()
|
||||
for rtp in self._expand_runtime_parameters():
|
||||
if not rtp.getter:
|
||||
continue
|
||||
getter = getattr(self, rtp.getter)
|
||||
rtp_value = getter(**rtp.getter_args)
|
||||
runtime_parameters[rtp.name] = rtp_value
|
||||
return runtime_parameters
|
||||
def static_runtime_parameter_validation(self, params):
|
||||
params = copy(params)
|
||||
for rtpm in self.runtime_parameters_managers:
|
||||
rtpm.static_validation(params)
|
||||
if params:
|
||||
msg = 'Unknown runtime_parameters for "{}": "{}"'
|
||||
raise ConfigError(msg.format(self.name, '", "'.join(params.iterkeys())))
|
||||
|
||||
def set_runtime_parameters(self, params):
|
||||
"""
|
||||
The parameters are taken from the keyword arguments and are specific to
|
||||
a particular device. See the device documentation.
|
||||
def dynamic_runtime_parameter_validation(self, params):
|
||||
for rtpm in self.runtime_parameters_managers:
|
||||
rtpm.dynamic_validation(params)
|
||||
|
||||
"""
|
||||
runtime_parameters = self._expand_runtime_parameters()
|
||||
rtp_map = {rtp.name.lower(): rtp for rtp in runtime_parameters}
|
||||
|
||||
params = OrderedDict((k.lower(), v) for k, v in params.iteritems() if v is not None)
|
||||
|
||||
expected_keys = rtp_map.keys()
|
||||
if not set(params.keys()).issubset(set(expected_keys)):
|
||||
unknown_params = list(set(params.keys()).difference(set(expected_keys)))
|
||||
raise ConfigError('Unknown runtime parameter(s): {}'.format(unknown_params))
|
||||
|
||||
for param in params:
|
||||
self.logger.debug('Setting runtime parameter "{}"'.format(param))
|
||||
rtp = rtp_map[param]
|
||||
setter = getattr(self, rtp.setter)
|
||||
args = dict(rtp.setter_args.items() + [(rtp.value_name, params[rtp.name.lower()])])
|
||||
setter(**args)
|
||||
|
||||
def _expand_runtime_parameters(self):
|
||||
expanded_params = []
|
||||
for param in self.runtime_parameters:
|
||||
if isinstance(param, CoreParameter):
|
||||
expanded_params.extend(param.get_runtime_parameters(self.target.core_names)) # pylint: disable=no-member
|
||||
else:
|
||||
expanded_params.append(param)
|
||||
return expanded_params
|
||||
def commit_runtime_parameters(self, params):
|
||||
params = copy(params)
|
||||
for rtpm in self.runtime_parameters_managers:
|
||||
rtpm.commit(params)
|
||||
|
||||
#Runtime parameter getters/setters
|
||||
|
||||
_written_sysfiles = []
|
||||
|
||||
def get_sysfile_values(self):
|
||||
return self._written_sysfiles
|
||||
|
||||
@ -271,49 +194,3 @@ class DeviceManager(Plugin):
|
||||
sysfile = sysfile.rstrip('!')
|
||||
self._written_sysfiles.append((sysfile, value))
|
||||
self.target.write_value(sysfile, value, verify=verify)
|
||||
|
||||
# pylint: disable=E1101
|
||||
|
||||
def _get_core_online_cpu(self, core):
|
||||
try:
|
||||
return self.target.list_online_core_cpus(core)[0]
|
||||
except IndexError:
|
||||
raise ValueError("No {} cores are online".format(core))
|
||||
|
||||
def get_number_of_online_cpus(self, core):
|
||||
return len(self._get_core_online_cpu(core))
|
||||
|
||||
def set_number_of_online_cpus(self, core, number):
|
||||
for cpu in self.target.core_cpus(core)[:number]:
|
||||
self.target.hotplug.online(cpu)
|
||||
|
||||
def get_core_min_frequency(self, core):
|
||||
return self.target.cpufreq.get_min_frequency(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_min_frequency(self, core, frequency):
|
||||
self.target.cpufreq.set_min_frequency(self._get_core_online_cpu(core), frequency)
|
||||
|
||||
def get_core_max_frequency(self, core):
|
||||
return self.target.cpufreq.get_max_frequency(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_max_frequency(self, core, frequency):
|
||||
self.target.cpufreq.set_max_frequency(self._get_core_online_cpu(core), frequency)
|
||||
|
||||
def get_core_frequency(self, core):
|
||||
return self.target.cpufreq.get_frequency(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_frequency(self, core, frequency):
|
||||
self.target.cpufreq.set_frequency(self._get_core_online_cpu(core), frequency)
|
||||
|
||||
def get_core_governor(self, core):
|
||||
return self.target.cpufreq.get_cpu_governor(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_governor(self, core, governor):
|
||||
self.target.cpufreq.set_cpu_governor(self._get_core_online_cpu(core), governor)
|
||||
|
||||
def get_core_governor_tunables(self, core):
|
||||
return self.target.cpufreq.get_governor_tunables(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_governor_tunables(self, core, tunables):
|
||||
self.target.cpufreq.set_governor_tunables(self._get_core_online_cpu(core),
|
||||
*tunables)
|
||||
|
@ -23,10 +23,10 @@ import warnings
|
||||
|
||||
from wlauto.core.configuration import settings
|
||||
from wlauto.core import pluginloader
|
||||
from wlauto.exceptions import WAError
|
||||
from wlauto.core.command import init_argument_parser
|
||||
from wlauto.exceptions import WAError, ConfigError
|
||||
from wlauto.utils.misc import get_traceback
|
||||
from wlauto.utils.log import init_logging
|
||||
from wlauto.utils.cli import init_argument_parser
|
||||
from wlauto.utils.doc import format_body
|
||||
|
||||
from devlib import DevlibError
|
||||
@ -56,13 +56,16 @@ def main():
|
||||
init_argument_parser(parser)
|
||||
commands = load_commands(parser.add_subparsers(dest='command')) # each command will add its own subparser
|
||||
args = parser.parse_args()
|
||||
settings.set("verbosity", args.verbose)
|
||||
settings.load_user_config()
|
||||
|
||||
#TODO: Set this stuff properly, i.e dont use settings (if possible)
|
||||
#settings.set("verbosity", args.verbose)
|
||||
#settings.load_user_config()
|
||||
#settings.debug = args.debug
|
||||
if args.config:
|
||||
if not os.path.exists(args.config):
|
||||
raise ConfigError("Config file {} not found".format(args.config))
|
||||
settings.load_config_file(args.config)
|
||||
|
||||
for config in args.config:
|
||||
if not os.path.exists(config):
|
||||
raise ConfigError("Config file {} not found".format(config))
|
||||
|
||||
init_logging(settings.verbosity)
|
||||
|
||||
command = commands[args.command]
|
||||
|
@ -56,7 +56,7 @@ from wlauto.core.resolver import ResourceResolver
|
||||
from wlauto.core.result import ResultManager, IterationResult, RunResult
|
||||
from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError,
|
||||
DeviceError, DeviceNotRespondingError)
|
||||
from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, merge_dicts, format_duration
|
||||
from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, format_duration
|
||||
from wlauto.utils.serializer import json
|
||||
|
||||
# The maximum number of reboot attempts for an iteration.
|
||||
@ -92,10 +92,8 @@ class RunInfo(object):
|
||||
def to_dict(self):
|
||||
d = copy(self.__dict__)
|
||||
d['uuid'] = str(self.uuid)
|
||||
del d['config']
|
||||
d = merge_dicts(d, self.config.to_dict())
|
||||
return d
|
||||
|
||||
#TODO: pod
|
||||
|
||||
class ExecutionContext(object):
|
||||
"""
|
||||
|
@ -26,10 +26,11 @@ from itertools import chain
|
||||
from copy import copy
|
||||
|
||||
from wlauto.exceptions import NotFoundError, LoaderError, ValidationError, ConfigError
|
||||
from wlauto.utils.misc import isiterable, ensure_directory_exists as _d, walk_modules, load_class, merge_dicts, get_article
|
||||
from wlauto.utils.misc import (ensure_directory_exists as _d,
|
||||
walk_modules, load_class, merge_dicts_simple, get_article)
|
||||
from wlauto.core.configuration import settings
|
||||
from wlauto.utils.types import identifier, integer, boolean
|
||||
from wlauto.core.configuration import ConfigurationPoint, ConfigurationPointCollection
|
||||
from wlauto.utils.types import identifier, boolean
|
||||
from wlauto.core.configuration.configuration import ConfigurationPoint as Parameter
|
||||
|
||||
MODNAME_TRANS = string.maketrans(':/\\.', '____')
|
||||
|
||||
@ -132,55 +133,6 @@ class ListCollection(list):
|
||||
super(ListCollection, self).__init__()
|
||||
|
||||
|
||||
class Parameter(ConfigurationPoint):
|
||||
|
||||
is_runtime = False
|
||||
|
||||
def __init__(self, name,
|
||||
kind=None,
|
||||
mandatory=None,
|
||||
default=None,
|
||||
override=False,
|
||||
allowed_values=None,
|
||||
description=None,
|
||||
constraint=None,
|
||||
convert_types=True,
|
||||
global_alias=None,
|
||||
reconfigurable=True):
|
||||
"""
|
||||
:param global_alias: This is an alternative alias for this parameter,
|
||||
unlike the name, this alias will not be
|
||||
namespaced under the owning extension's name
|
||||
(hence the global part). This is introduced
|
||||
primarily for backward compatibility -- so that
|
||||
old extension settings names still work. This
|
||||
should not be used for new parameters.
|
||||
|
||||
:param reconfigurable: This indicated whether this parameter may be
|
||||
reconfigured during the run (e.g. between different
|
||||
iterations). This determines where in run configruation
|
||||
this parameter may appear.
|
||||
|
||||
For other parameters, see docstring for
|
||||
``wa.framework.config.core.ConfigurationPoint``
|
||||
|
||||
"""
|
||||
super(Parameter, self).__init__(name, kind, mandatory,
|
||||
default, override, allowed_values,
|
||||
description, constraint,
|
||||
convert_types)
|
||||
self.global_alias = global_alias
|
||||
self.reconfigurable = reconfigurable
|
||||
|
||||
def __repr__(self):
|
||||
d = copy(self.__dict__)
|
||||
del d['description']
|
||||
return 'Param({})'.format(d)
|
||||
|
||||
|
||||
Param = Parameter
|
||||
|
||||
|
||||
class Artifact(object):
|
||||
"""
|
||||
This is an artifact generated during execution/post-processing of a workload.
|
||||
@ -566,55 +518,6 @@ class PluginLoaderItem(object):
|
||||
self.cls = load_class(ext_tuple.cls)
|
||||
|
||||
|
||||
class GlobalParameterAlias(object):
|
||||
"""
|
||||
Represents a "global alias" for an plugin parameter. A global alias
|
||||
is specified at the top-level of config rather namespaced under an plugin
|
||||
name.
|
||||
|
||||
Multiple plugins may have parameters with the same global_alias if they are
|
||||
part of the same inheritance hierarchy and one parameter is an override of the
|
||||
other. This class keeps track of all such cases in its plugins dict.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.plugins = {}
|
||||
|
||||
def iteritems(self):
|
||||
for ext in self.plugins.itervalues():
|
||||
yield (self.get_param(ext), ext)
|
||||
|
||||
def get_param(self, ext):
|
||||
for param in ext.parameters:
|
||||
if param.global_alias == self.name:
|
||||
return param
|
||||
message = 'Plugin {} does not have a parameter with global alias {}'
|
||||
raise ValueError(message.format(ext.name, self.name))
|
||||
|
||||
def update(self, other_ext):
|
||||
self._validate_ext(other_ext)
|
||||
self.plugins[other_ext.name] = other_ext
|
||||
|
||||
def _validate_ext(self, other_ext):
|
||||
other_param = self.get_param(other_ext)
|
||||
for param, ext in self.iteritems():
|
||||
if ((not (issubclass(ext, other_ext) or issubclass(other_ext, ext))) and
|
||||
other_param.kind != param.kind):
|
||||
message = 'Duplicate global alias {} declared in {} and {} plugins with different types'
|
||||
raise LoaderError(message.format(self.name, ext.name, other_ext.name))
|
||||
if param.kind != other_param.kind:
|
||||
message = 'Two params {} in {} and {} in {} both declare global alias {}, and are of different kinds'
|
||||
raise LoaderError(message.format(param.name, ext.name,
|
||||
other_param.name, other_ext.name, self.name))
|
||||
|
||||
def __str__(self):
|
||||
text = 'GlobalAlias({} => {})'
|
||||
extlist = ', '.join(['{}.{}'.format(e.name, p.name) for p, e in self.iteritems()])
|
||||
return text.format(self.name, extlist)
|
||||
|
||||
|
||||
class PluginLoader(object):
|
||||
"""
|
||||
Discovers, enumerates and loads available devices, configs, etc.
|
||||
@ -711,7 +614,7 @@ class PluginLoader(object):
|
||||
"""
|
||||
real_name, alias_config = self.resolve_alias(name)
|
||||
base_default_config = self.get_plugin_class(real_name).get_default_config()
|
||||
return merge_dicts(base_default_config, alias_config, list_duplicates='last', dict_type=OrderedDict)
|
||||
return merge_dicts_simple(base_default_config, alias_config)
|
||||
|
||||
def list_plugins(self, kind=None):
|
||||
"""
|
||||
@ -884,15 +787,3 @@ class PluginLoader(object):
|
||||
if alias_id in self.plugins or alias_id in self.aliases:
|
||||
raise LoaderError('{} "{}" already exists.'.format(obj.kind, obj.name))
|
||||
self.aliases[alias_id] = alias
|
||||
|
||||
# Update global aliases list. If a global alias is already in the list,
|
||||
# then make sure this plugin is in the same parent/child hierarchy
|
||||
# as the one already found.
|
||||
for param in obj.parameters:
|
||||
if param.global_alias:
|
||||
if param.global_alias not in self.global_param_aliases:
|
||||
ga = GlobalParameterAlias(param.global_alias)
|
||||
ga.update(obj)
|
||||
self.global_param_aliases[ga.name] = ga
|
||||
else: # global alias already exists.
|
||||
self.global_param_aliases[param.global_alias].update(obj)
|
||||
|
@ -42,9 +42,10 @@ from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
|
||||
from wlauto.core.plugin import Plugin
|
||||
from wlauto.core.configuration.configuration import ITERATION_STATUS
|
||||
from wlauto.exceptions import WAError
|
||||
from wlauto.utils.types import numeric
|
||||
from wlauto.utils.misc import enum_metaclass, merge_dicts
|
||||
from wlauto.utils.misc import enum_metaclass, merge_dicts_simple
|
||||
|
||||
|
||||
class ResultManager(object):
|
||||
@ -238,17 +239,7 @@ class IterationResult(object):
|
||||
|
||||
__metaclass__ = enum_metaclass('values', return_name=True)
|
||||
|
||||
values = [
|
||||
'NOT_STARTED',
|
||||
'RUNNING',
|
||||
|
||||
'OK',
|
||||
'NONCRITICAL',
|
||||
'PARTIAL',
|
||||
'FAILED',
|
||||
'ABORTED',
|
||||
'SKIPPED',
|
||||
]
|
||||
values = ITERATION_STATUS
|
||||
|
||||
def __init__(self, spec):
|
||||
self.spec = spec
|
||||
@ -263,9 +254,8 @@ class IterationResult(object):
|
||||
self.artifacts = []
|
||||
|
||||
def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
|
||||
classifiers = merge_dicts(self.classifiers, classifiers or {},
|
||||
list_duplicates='last', should_normalize=False)
|
||||
self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
|
||||
self.metrics.append(Metric(name, value, units, lower_is_better,
|
||||
merge_dicts_simple(self.classifiers, classifiers)))
|
||||
|
||||
def has_metric(self, name):
|
||||
for metric in self.metrics:
|
||||
|
@ -17,7 +17,6 @@ import os
|
||||
from collections import OrderedDict
|
||||
from wlauto import Instrument, Parameter
|
||||
from wlauto.exceptions import ConfigError, InstrumentError
|
||||
from wlauto.utils.misc import merge_dicts
|
||||
from wlauto.utils.types import caseless_string
|
||||
|
||||
|
||||
@ -132,13 +131,13 @@ class FreqSweep(Instrument):
|
||||
for freq in sweep_spec['frequencies']:
|
||||
spec = old_spec.copy()
|
||||
if 'runtime_params' in sweep_spec:
|
||||
spec.runtime_parameters = merge_dicts(spec.runtime_parameters,
|
||||
sweep_spec['runtime_params'],
|
||||
dict_type=OrderedDict)
|
||||
spec.runtime_parameters = spec.runtime_parameters.copy()
|
||||
spec.runtime_parameters.update(sweep_spec['runtime_params'])
|
||||
|
||||
if 'workload_params' in sweep_spec:
|
||||
spec.workload_parameters = merge_dicts(spec.workload_parameters,
|
||||
sweep_spec['workload_params'],
|
||||
dict_type=OrderedDict)
|
||||
spec.workload_parameters = spec.workload_parameters.copy()
|
||||
spec.workload_parameters.update(sweep_spec['workload_params'])
|
||||
|
||||
spec.runtime_parameters['{}_governor'.format(sweep_spec['cluster'])] = "userspace"
|
||||
spec.runtime_parameters['{}_frequency'.format(sweep_spec['cluster'])] = freq
|
||||
spec.id = '{}_{}_{}'.format(spec.id, sweep_spec['label'], freq)
|
||||
|
@ -56,14 +56,14 @@ class StandardProcessor(ResultProcessor):
|
||||
|
||||
|
||||
class CsvReportProcessor(ResultProcessor):
|
||||
"""
|
||||
|
||||
name = 'csv'
|
||||
description = """
|
||||
Creates a ``results.csv`` in the output directory containing results for
|
||||
all iterations in CSV format, each line containing a single metric.
|
||||
|
||||
"""
|
||||
|
||||
name = 'csv'
|
||||
|
||||
parameters = [
|
||||
Parameter('use_all_classifiers', kind=bool, default=False,
|
||||
global_alias='use_all_classifiers',
|
||||
|
1
wlauto/tests/data/test-agenda-bad-syntax.yaml
Normal file
1
wlauto/tests/data/test-agenda-bad-syntax.yaml
Normal file
@ -0,0 +1 @@
|
||||
[ewqh
|
1
wlauto/tests/data/test-agenda-not-dict.yaml
Normal file
1
wlauto/tests/data/test-agenda-not-dict.yaml
Normal file
@ -0,0 +1 @@
|
||||
Test
|
@ -24,6 +24,7 @@ from nose.tools import assert_equal, assert_in, raises
|
||||
|
||||
from wlauto.core.agenda import Agenda
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.utils.serializer import SerializerSyntaxError
|
||||
|
||||
|
||||
YAML_TEST_FILE = os.path.join(os.path.dirname(__file__), 'data', 'test-agenda.yaml')
|
||||
@ -35,7 +36,7 @@ workloads:
|
||||
test: 1
|
||||
"""
|
||||
invalid_agenda = StringIO(invalid_agenda_text)
|
||||
invalid_agenda.name = 'invalid1'
|
||||
invalid_agenda.name = 'invalid1.yaml'
|
||||
|
||||
duplicate_agenda_text = """
|
||||
global:
|
||||
@ -49,13 +50,13 @@ workloads:
|
||||
workload_name: andebench
|
||||
"""
|
||||
duplicate_agenda = StringIO(duplicate_agenda_text)
|
||||
duplicate_agenda.name = 'invalid2'
|
||||
duplicate_agenda.name = 'invalid2.yaml'
|
||||
|
||||
short_agenda_text = """
|
||||
workloads: [antutu, linpack, andebench]
|
||||
"""
|
||||
short_agenda = StringIO(short_agenda_text)
|
||||
short_agenda.name = 'short'
|
||||
short_agenda.name = 'short.yaml'
|
||||
|
||||
default_ids_agenda_text = """
|
||||
workloads:
|
||||
@ -69,7 +70,7 @@ workloads:
|
||||
- vellamo
|
||||
"""
|
||||
default_ids_agenda = StringIO(default_ids_agenda_text)
|
||||
default_ids_agenda.name = 'default_ids'
|
||||
default_ids_agenda.name = 'default_ids.yaml'
|
||||
|
||||
sectioned_agenda_text = """
|
||||
sections:
|
||||
@ -91,7 +92,7 @@ workloads:
|
||||
- nenamark
|
||||
"""
|
||||
sectioned_agenda = StringIO(sectioned_agenda_text)
|
||||
sectioned_agenda.name = 'sectioned'
|
||||
sectioned_agenda.name = 'sectioned.yaml'
|
||||
|
||||
dup_sectioned_agenda_text = """
|
||||
sections:
|
||||
@ -105,7 +106,7 @@ workloads:
|
||||
- nenamark
|
||||
"""
|
||||
dup_sectioned_agenda = StringIO(dup_sectioned_agenda_text)
|
||||
dup_sectioned_agenda.name = 'dup-sectioned'
|
||||
dup_sectioned_agenda.name = 'dup-sectioned.yaml'
|
||||
|
||||
caps_agenda_text = """
|
||||
config:
|
||||
@ -120,17 +121,17 @@ workloads:
|
||||
name: linpack
|
||||
"""
|
||||
caps_agenda = StringIO(caps_agenda_text)
|
||||
caps_agenda.name = 'caps'
|
||||
caps_agenda.name = 'caps.yaml'
|
||||
|
||||
bad_syntax_agenda_text = """
|
||||
config:
|
||||
# tab on the following line
|
||||
reboot_policy: never
|
||||
reboot_policy: never
|
||||
workloads:
|
||||
- antutu
|
||||
"""
|
||||
bad_syntax_agenda = StringIO(bad_syntax_agenda_text)
|
||||
bad_syntax_agenda.name = 'bad_syntax'
|
||||
bad_syntax_agenda.name = 'bad_syntax.yaml'
|
||||
|
||||
section_ids_test_text = """
|
||||
config:
|
||||
@ -145,7 +146,7 @@ sections:
|
||||
- id: bar
|
||||
"""
|
||||
section_ids_agenda = StringIO(section_ids_test_text)
|
||||
section_ids_agenda.name = 'section_ids'
|
||||
section_ids_agenda.name = 'section_ids.yaml'
|
||||
|
||||
|
||||
class AgendaTest(TestCase):
|
||||
@ -154,42 +155,18 @@ class AgendaTest(TestCase):
|
||||
agenda = Agenda(YAML_TEST_FILE)
|
||||
assert_equal(len(agenda.workloads), 4)
|
||||
|
||||
def test_duplicate_id(self):
|
||||
try:
|
||||
Agenda(duplicate_agenda)
|
||||
except ConfigError, e:
|
||||
assert_in('duplicate', e.message.lower()) # pylint: disable=E1101
|
||||
else:
|
||||
raise Exception('ConfigError was not raised for an agenda with duplicate ids.')
|
||||
|
||||
def test_yaml_missing_field(self):
|
||||
try:
|
||||
Agenda(invalid_agenda_text)
|
||||
Agenda(invalid_agenda)
|
||||
except ConfigError, e:
|
||||
assert_in('workload name', e.message)
|
||||
else:
|
||||
raise Exception('ConfigError was not raised for an invalid agenda.')
|
||||
|
||||
def test_defaults(self):
|
||||
agenda = Agenda(short_agenda)
|
||||
assert_equal(len(agenda.workloads), 3)
|
||||
assert_equal(agenda.workloads[0].workload_name, 'antutu')
|
||||
assert_equal(agenda.workloads[0].id, '1')
|
||||
|
||||
def test_default_id_assignment(self):
|
||||
agenda = Agenda(default_ids_agenda)
|
||||
assert_equal(agenda.workloads[0].id, '2')
|
||||
assert_equal(agenda.workloads[3].id, '3')
|
||||
|
||||
def test_sections(self):
|
||||
agenda = Agenda(sectioned_agenda)
|
||||
assert_equal(agenda.sections[0].workloads[0].workload_name, 'antutu')
|
||||
assert_equal(agenda.sections[1].runtime_parameters['dp'], 'three')
|
||||
|
||||
@raises(ConfigError)
|
||||
def test_dup_sections(self):
|
||||
Agenda(dup_sectioned_agenda)
|
||||
|
||||
@raises(ConfigError)
|
||||
@raises(SerializerSyntaxError)
|
||||
def test_bad_syntax(self):
|
||||
Agenda(bad_syntax_agenda)
|
||||
|
621
wlauto/tests/test_configuration.py
Normal file
621
wlauto/tests/test_configuration.py
Normal file
@ -0,0 +1,621 @@
|
||||
# pylint: disable=R0201
|
||||
from copy import deepcopy, copy
|
||||
|
||||
from unittest import TestCase
|
||||
|
||||
from nose.tools import assert_equal, assert_is
|
||||
from mock.mock import Mock
|
||||
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.core.configuration.tree import SectionNode
|
||||
from wlauto.core.configuration.configuration import (ConfigurationPoint,
|
||||
Configuration,
|
||||
RunConfiguration,
|
||||
merge_using_priority_specificity,
|
||||
get_type_name)
|
||||
from wlauto.core.configuration.plugin_cache import PluginCache, GENERIC_CONFIGS
|
||||
from wlauto.utils.types import obj_dict
|
||||
# A1
|
||||
# / \
|
||||
# B1 B2
|
||||
# / \ / \
|
||||
# C1 C2 C3 C4
|
||||
# \
|
||||
# D1
|
||||
a1 = SectionNode({"id": "A1"})
|
||||
b1 = a1.add_section({"id": "B1"})
|
||||
b2 = a1.add_section({"id": "B2"})
|
||||
c1 = b1.add_section({"id": "C1"})
|
||||
c2 = b1.add_section({"id": "C2"})
|
||||
c3 = b2.add_section({"id": "C3"})
|
||||
c4 = b2.add_section({"id": "C4"})
|
||||
d1 = c2.add_section({"id": "D1"})
|
||||
|
||||
DEFAULT_PLUGIN_CONFIG = {
|
||||
"device_config": {
|
||||
"a": {
|
||||
"test3": ["there"],
|
||||
"test5": [5, 4, 3],
|
||||
},
|
||||
"b": {
|
||||
"test4": 1234,
|
||||
},
|
||||
},
|
||||
"some_device": {
|
||||
"a": {
|
||||
"test3": ["how are"],
|
||||
"test2": "MANDATORY",
|
||||
},
|
||||
"b": {
|
||||
"test3": ["you?"],
|
||||
"test5": [1, 2, 3],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _construct_mock_plugin_cache(values=None):
|
||||
if values is None:
|
||||
values = deepcopy(DEFAULT_PLUGIN_CONFIG)
|
||||
|
||||
plugin_cache = Mock(spec=PluginCache)
|
||||
plugin_cache.sources = ["a", "b", "c", "d", "e"]
|
||||
|
||||
def get_plugin_config(plugin_name):
|
||||
return values[plugin_name]
|
||||
plugin_cache.get_plugin_config.side_effect = get_plugin_config
|
||||
|
||||
def get_plugin_parameters(_):
|
||||
return TestConfiguration.configuration
|
||||
plugin_cache.get_plugin_parameters.side_effect = get_plugin_parameters
|
||||
|
||||
return plugin_cache
|
||||
|
||||
|
||||
class TreeTest(TestCase):
|
||||
|
||||
def test_node(self):
|
||||
node = SectionNode(1)
|
||||
assert_equal(node.config, 1)
|
||||
assert_is(node.parent, None)
|
||||
assert_equal(node.workload_entries, [])
|
||||
assert_equal(node.children, [])
|
||||
|
||||
def test_add_workload(self):
|
||||
node = SectionNode(1)
|
||||
node.add_workload(2)
|
||||
assert_equal(len(node.workload_entries), 1)
|
||||
wk = node.workload_entries[0]
|
||||
assert_equal(wk.config, 2)
|
||||
assert_is(wk.parent, node)
|
||||
|
||||
def test_add_section(self):
|
||||
node = SectionNode(1)
|
||||
new_node = node.add_section(2)
|
||||
assert_equal(len(node.children), 1)
|
||||
assert_is(node.children[0], new_node)
|
||||
assert_is(new_node.parent, node)
|
||||
assert_equal(node.is_leaf, False)
|
||||
assert_equal(new_node.is_leaf, True)
|
||||
|
||||
def test_descendants(self):
|
||||
for got, expected in zip(b1.descendants(), [c1, d1, c2]):
|
||||
assert_equal(got.config, expected.config)
|
||||
for got, expected in zip(a1.descendants(), [c1, d1, c2, b1, c3, c4, b2]):
|
||||
assert_equal(got.config, expected.config)
|
||||
|
||||
def test_ancestors(self):
|
||||
for got, expected in zip(d1.ancestors(), [c2, b1, a1]):
|
||||
assert_equal(got.config, expected.config)
|
||||
for _ in a1.ancestors():
|
||||
raise Exception("A1 is the root, it shouldn't have ancestors")
|
||||
|
||||
def test_leaves(self):
|
||||
for got, expected in zip(a1.leaves(), [c1, d1, c3, c4]):
|
||||
assert_equal(got.config, expected.config)
|
||||
for got, expected in zip(d1.leaves(), [d1]):
|
||||
assert_equal(got.config, expected.config)
|
||||
|
||||
def test_source_name(self):
|
||||
assert_equal(a1.name, 'section "A1"')
|
||||
global_section = SectionNode({"id": "global"})
|
||||
assert_equal(global_section.name, "globally specified configuration")
|
||||
|
||||
a1.add_workload({'id': 'wk1'})
|
||||
assert_equal(a1.workload_entries[0].name, 'workload "wk1" from section "A1"')
|
||||
global_section.add_workload({'id': 'wk2'})
|
||||
assert_equal(global_section.workload_entries[0].name, 'workload "wk2"')
|
||||
|
||||
|
||||
class ConfigurationPointTest(TestCase):
|
||||
|
||||
def test_match(self):
|
||||
cp1 = ConfigurationPoint("test1", aliases=["foo", "bar"])
|
||||
cp2 = ConfigurationPoint("test2", aliases=["fizz", "buzz"])
|
||||
|
||||
assert_equal(cp1.match("test1"), True)
|
||||
assert_equal(cp1.match("foo"), True)
|
||||
assert_equal(cp1.match("bar"), True)
|
||||
assert_equal(cp1.match("fizz"), False)
|
||||
assert_equal(cp1.match("NOT VALID"), False)
|
||||
|
||||
assert_equal(cp2.match("test2"), True)
|
||||
assert_equal(cp2.match("fizz"), True)
|
||||
assert_equal(cp2.match("buzz"), True)
|
||||
assert_equal(cp2.match("foo"), False)
|
||||
assert_equal(cp2.match("NOT VALID"), False)
|
||||
|
||||
def test_set_value(self):
|
||||
cp1 = ConfigurationPoint("test", default="hello")
|
||||
cp2 = ConfigurationPoint("test", mandatory=True)
|
||||
cp3 = ConfigurationPoint("test", mandatory=True, default="Hello")
|
||||
cp4 = ConfigurationPoint("test", default=["hello"], merge=True, kind=list)
|
||||
cp5 = ConfigurationPoint("test", kind=int)
|
||||
cp6 = ConfigurationPoint("test5", kind=list, allowed_values=[1, 2, 3, 4, 5])
|
||||
|
||||
mock = Mock()
|
||||
mock.name = "ConfigurationPoint Unit Test"
|
||||
|
||||
# Testing defaults and basic functionality
|
||||
cp1.set_value(mock)
|
||||
assert_equal(mock.test, "hello")
|
||||
cp1.set_value(mock, value="there")
|
||||
assert_equal(mock.test, "there")
|
||||
|
||||
# Testing mandatory flag
|
||||
err_msg = 'No values specified for mandatory parameter "test" in ' \
|
||||
'ConfigurationPoint Unit Test'
|
||||
with self.assertRaisesRegexp(ConfigError, err_msg):
|
||||
cp2.set_value(mock)
|
||||
cp3.set_value(mock) # Should ignore mandatory
|
||||
assert_equal(mock.test, "Hello")
|
||||
|
||||
# Testing Merging - not in depth that is done in the unit test for merge_config
|
||||
cp4.set_value(mock, value=["there"])
|
||||
assert_equal(mock.test, ["Hello", "there"])
|
||||
|
||||
# Testing type conversion
|
||||
cp5.set_value(mock, value="100")
|
||||
assert_equal(isinstance(mock.test, int), True)
|
||||
msg = 'Bad value "abc" for test; must be an integer'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
cp5.set_value(mock, value="abc")
|
||||
|
||||
# Testing that validation is not called when no value is set
|
||||
# if it is it will error because it cannot iterate over None
|
||||
cp6.set_value(mock)
|
||||
|
||||
def test_validation(self):
|
||||
#Test invalid default
|
||||
with self.assertRaises(ValueError):
|
||||
# pylint: disable=W0612
|
||||
bad_cp = ConfigurationPoint("test", allowed_values=[1], default=100)
|
||||
|
||||
def is_even(value):
|
||||
if value % 2:
|
||||
return False
|
||||
return True
|
||||
|
||||
cp1 = ConfigurationPoint("test", kind=int, allowed_values=[1, 2, 3, 4, 5])
|
||||
cp2 = ConfigurationPoint("test", kind=list, allowed_values=[1, 2, 3, 4, 5])
|
||||
cp3 = ConfigurationPoint("test", kind=int, constraint=is_even)
|
||||
cp4 = ConfigurationPoint("test", kind=list, mandatory=True, allowed_values=[1, 99])
|
||||
mock = obj_dict()
|
||||
mock.name = "ConfigurationPoint Validation Unit Test"
|
||||
|
||||
# Test allowed values
|
||||
cp1.validate_value(mock.name, 1)
|
||||
with self.assertRaises(ConfigError):
|
||||
cp1.validate_value(mock.name, 100)
|
||||
with self.assertRaises(ConfigError):
|
||||
cp1.validate_value(mock.name, [1, 2, 3])
|
||||
|
||||
# Test allowed values for lists
|
||||
cp2.validate_value(mock.name, [1, 2, 3])
|
||||
with self.assertRaises(ConfigError):
|
||||
cp2.validate_value(mock.name, [1, 2, 100])
|
||||
|
||||
# Test constraints
|
||||
cp3.validate_value(mock.name, 2)
|
||||
cp3.validate_value(mock.name, 4)
|
||||
cp3.validate_value(mock.name, 6)
|
||||
msg = '"3" failed constraint validation for "test" in "ConfigurationPoint' \
|
||||
' Validation Unit Test".'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
cp3.validate_value(mock.name, 3)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
ConfigurationPoint("test", constraint=100)
|
||||
|
||||
# Test "validate" methods
|
||||
mock.test = None
|
||||
# Mandatory config point not set
|
||||
with self.assertRaises(ConfigError):
|
||||
cp4.validate(mock)
|
||||
cp1.validate(mock) # cp1 doesnt have mandatory set
|
||||
cp4.set_value(mock, value=[99])
|
||||
cp4.validate(mock)
|
||||
|
||||
def test_get_type_name(self):
|
||||
def dummy():
|
||||
pass
|
||||
types = [str, list, int, dummy]
|
||||
names = ["str", "list", "integer", "dummy"]
|
||||
for kind, name in zip(types, names):
|
||||
cp = ConfigurationPoint("test", kind=kind)
|
||||
assert_equal(get_type_name(cp.kind), name)
|
||||
|
||||
|
||||
# Subclass to add some config points for use in testing
|
||||
class TestConfiguration(Configuration):
|
||||
name = "Test Config"
|
||||
__configuration = [
|
||||
ConfigurationPoint("test1", default="hello"),
|
||||
ConfigurationPoint("test2", mandatory=True),
|
||||
ConfigurationPoint("test3", default=["hello"], merge=True, kind=list),
|
||||
ConfigurationPoint("test4", kind=int, default=123),
|
||||
ConfigurationPoint("test5", kind=list, allowed_values=[1, 2, 3, 4, 5]),
|
||||
]
|
||||
configuration = {cp.name: cp for cp in __configuration}
|
||||
|
||||
|
||||
class ConfigurationTest(TestCase):
|
||||
|
||||
def test_merge_using_priority_specificity(self):
|
||||
# Test good configs
|
||||
plugin_cache = _construct_mock_plugin_cache()
|
||||
expected_result = {
|
||||
"test1": "hello",
|
||||
"test2": "MANDATORY",
|
||||
"test3": ["hello", "there", "how are", "you?"],
|
||||
"test4": 1234,
|
||||
"test5": [1, 2, 3],
|
||||
}
|
||||
result = merge_using_priority_specificity("device_config", "some_device", plugin_cache)
|
||||
assert_equal(result, expected_result)
|
||||
|
||||
# Test missing mandatory parameter
|
||||
plugin_cache = _construct_mock_plugin_cache(values={
|
||||
"device_config": {
|
||||
"a": {
|
||||
"test1": "abc",
|
||||
},
|
||||
},
|
||||
"some_device": {
|
||||
"b": {
|
||||
"test5": [1, 2, 3],
|
||||
}
|
||||
}
|
||||
})
|
||||
msg = 'No value specified for mandatory parameter "test2" in some_device.'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
merge_using_priority_specificity("device_config", "some_device", plugin_cache)
|
||||
|
||||
# Test conflict
|
||||
plugin_cache = _construct_mock_plugin_cache(values={
|
||||
"device_config": {
|
||||
"e": {
|
||||
'test2': "NOT_CONFLICTING"
|
||||
}
|
||||
},
|
||||
"some_device": {
|
||||
'a': {
|
||||
'test2': "CONFLICT1"
|
||||
},
|
||||
'b': {
|
||||
'test2': "CONFLICT2"
|
||||
},
|
||||
'c': {
|
||||
'test2': "CONFLICT3"
|
||||
},
|
||||
},
|
||||
})
|
||||
msg = ('Error in "e":\n'
|
||||
'\t"device_config" configuration "test2" has already been specified more specifically for some_device in:\n'
|
||||
'\t\ta\n'
|
||||
'\t\tb\n'
|
||||
'\t\tc')
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
merge_using_priority_specificity("device_config", "some_device", plugin_cache)
|
||||
|
||||
# Test invalid entries
|
||||
plugin_cache = _construct_mock_plugin_cache(values={
|
||||
"device_config": {
|
||||
"a": {
|
||||
"NOT_A_CFG_POINT": "nope"
|
||||
}
|
||||
},
|
||||
"some_device": {}
|
||||
})
|
||||
msg = ('Error in "a":\n\t'
|
||||
'Invalid entry\(ies\) for "some_device" in "device_config": "NOT_A_CFG_POINT"')
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
merge_using_priority_specificity("device_config", "some_device", plugin_cache)
|
||||
|
||||
plugin_cache = _construct_mock_plugin_cache(values={
|
||||
"some_device": {
|
||||
"a": {
|
||||
"NOT_A_CFG_POINT": "nope"
|
||||
}
|
||||
},
|
||||
"device_config": {}
|
||||
})
|
||||
msg = ('Error in "a":\n\t'
|
||||
'Invalid entry\(ies\) for "some_device": "NOT_A_CFG_POINT"')
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
merge_using_priority_specificity("device_config", "some_device", plugin_cache)
|
||||
|
||||
# pylint: disable=no-member
|
||||
def test_configuration(self):
|
||||
# Test loading defaults
|
||||
cfg = TestConfiguration()
|
||||
expected = {
|
||||
"test1": "hello",
|
||||
"test3": ["hello"],
|
||||
"test4": 123,
|
||||
}
|
||||
assert_equal(cfg.to_pod(), expected)
|
||||
# If a cfg point is not set an attribute with value None should still be created
|
||||
assert_is(cfg.test2, None)
|
||||
assert_is(cfg.test5, None)
|
||||
|
||||
# Testing set
|
||||
# Good value
|
||||
cfg.set("test1", "there")
|
||||
assert_equal(cfg.test1, "there") # pylint: disable=E1101
|
||||
# Unknown value
|
||||
with self.assertRaisesRegexp(ConfigError, 'Unknown Test Config configuration "nope"'):
|
||||
cfg.set("nope", 123)
|
||||
# check_mandatory
|
||||
with self.assertRaises(ConfigError):
|
||||
cfg.set("test2", value=None)
|
||||
cfg.set("test2", value=None, check_mandatory=False)
|
||||
# parameter constraints are tested in the ConfigurationPoint unit test
|
||||
# since this just calls through to `ConfigurationPoint.set_value`
|
||||
|
||||
# Test validation
|
||||
msg = 'No value specified for mandatory parameter "test2" in Test Config'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
cfg.validate()
|
||||
cfg.set("test2", 1)
|
||||
cfg.validate()
|
||||
|
||||
# Testing setting values from a dict
|
||||
new_values = {
|
||||
"test1": "This",
|
||||
"test2": "is",
|
||||
"test3": ["a"],
|
||||
"test4": 7357,
|
||||
"test5": [5],
|
||||
}
|
||||
cfg.update_config(new_values)
|
||||
new_values["test3"] = ["hello", "a"] # This cfg point has merge == True
|
||||
for k, v in new_values.iteritems():
|
||||
assert_equal(getattr(cfg, k), v)
|
||||
|
||||
#Testing podding
|
||||
pod = cfg.to_pod()
|
||||
new_pod = TestConfiguration.from_pod(copy(pod), None).to_pod()
|
||||
assert_equal(pod, new_pod)
|
||||
|
||||
#invalid pod entry
|
||||
pod = {'invalid_entry': "nope"}
|
||||
msg = 'Invalid entry\(ies\) for "Test Config": "invalid_entry"'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
TestConfiguration.from_pod(pod, None)
|
||||
|
||||
#failed pod validation
|
||||
pod = {"test1": "testing"}
|
||||
msg = 'No value specified for mandatory parameter "test2" in Test Config.'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
TestConfiguration.from_pod(pod, None)
|
||||
|
||||
def test_run_configuration(self):
|
||||
plugin_cache = _construct_mock_plugin_cache()
|
||||
|
||||
# Test `merge_device_config``
|
||||
run_config = RunConfiguration()
|
||||
run_config.set("device", "some_device")
|
||||
run_config.merge_device_config(plugin_cache)
|
||||
|
||||
# Test `to_pod`
|
||||
expected_pod = {
|
||||
"device": "some_device",
|
||||
"device_config": {
|
||||
"test1": "hello",
|
||||
"test2": "MANDATORY",
|
||||
"test3": ["hello", "there", "how are", "you?"],
|
||||
"test4": 1234,
|
||||
"test5": [1, 2, 3],
|
||||
},
|
||||
"execution_order": "by_iteration",
|
||||
"reboot_policy": "as_needed",
|
||||
"retry_on_status": ['FAILED', 'PARTIAL'],
|
||||
"max_retries": 3,
|
||||
}
|
||||
pod = run_config.to_pod()
|
||||
assert_equal(pod, expected_pod)
|
||||
|
||||
# Test to_pod > from_pod
|
||||
new_pod = RunConfiguration.from_pod(copy(pod), plugin_cache).to_pod()
|
||||
assert_equal(pod, new_pod)
|
||||
|
||||
# from_pod with invalid device_config
|
||||
pod['device_config']['invalid_entry'] = "nope"
|
||||
msg = 'Invalid entry "invalid_entry" for device "some_device".'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
RunConfiguration.from_pod(copy(pod), plugin_cache)
|
||||
|
||||
# from_pod with no device_config
|
||||
pod.pop("device_config")
|
||||
msg = 'No value specified for mandatory parameter "device_config".'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
RunConfiguration.from_pod(copy(pod), plugin_cache)
|
||||
|
||||
def test_generate_job_spec(self):
|
||||
pass
|
||||
|
||||
|
||||
class PluginCacheTest(TestCase):
|
||||
|
||||
param1 = ConfigurationPoint("param1", aliases="test_global_alias")
|
||||
param2 = ConfigurationPoint("param2", aliases="some_other_alias")
|
||||
param3 = ConfigurationPoint("param3")
|
||||
|
||||
plugin1 = obj_dict(values={
|
||||
"name": "plugin 1",
|
||||
"parameters": [
|
||||
param1,
|
||||
param2,
|
||||
]
|
||||
})
|
||||
plugin2 = obj_dict(values={
|
||||
"name": "plugin 2",
|
||||
"parameters": [
|
||||
param1,
|
||||
param3,
|
||||
]
|
||||
})
|
||||
|
||||
def get_plugin(self, name):
|
||||
if name == "plugin 1":
|
||||
return self.plugin1
|
||||
if name == "plugin 2":
|
||||
return self.plugin2
|
||||
|
||||
def has_plugin(self, name):
|
||||
return name in ["plugin 1", "plugin 2"]
|
||||
|
||||
def make_mock_cache(self):
|
||||
mock_loader = Mock()
|
||||
mock_loader.get_plugin_class.side_effect = self.get_plugin
|
||||
mock_loader.list_plugins = Mock(return_value=[self.plugin1, self.plugin2])
|
||||
mock_loader.has_plugin.side_effect = self.has_plugin
|
||||
return PluginCache(loader=mock_loader)
|
||||
|
||||
def test_get_params(self):
|
||||
plugin_cache = self.make_mock_cache()
|
||||
|
||||
expected_params = {
|
||||
self.param1.name: self.param1,
|
||||
self.param2.name: self.param2,
|
||||
}
|
||||
|
||||
assert_equal(expected_params, plugin_cache.get_plugin_parameters("plugin 1"))
|
||||
|
||||
def test_global_aliases(self):
|
||||
plugin_cache = self.make_mock_cache()
|
||||
|
||||
# Check the alias map
|
||||
expected_map = {
|
||||
"plugin 1": {
|
||||
self.param1.aliases: self.param1,
|
||||
self.param2.aliases: self.param2,
|
||||
},
|
||||
"plugin 2": {
|
||||
self.param1.aliases: self.param1,
|
||||
}
|
||||
}
|
||||
expected_set = set(["test_global_alias", "some_other_alias"])
|
||||
|
||||
assert_equal(expected_map, plugin_cache._global_alias_map)
|
||||
assert_equal(expected_set, plugin_cache._list_of_global_aliases)
|
||||
assert_equal(True, plugin_cache.is_global_alias("test_global_alias"))
|
||||
assert_equal(False, plugin_cache.is_global_alias("not_a_global_alias"))
|
||||
|
||||
# Error when adding to unknown source
|
||||
with self.assertRaises(RuntimeError):
|
||||
plugin_cache.add_global_alias("adding", "too", "early")
|
||||
|
||||
# Test adding sources
|
||||
for x in xrange(5):
|
||||
plugin_cache.add_source(x)
|
||||
assert_equal([0, 1, 2, 3, 4], plugin_cache.sources)
|
||||
|
||||
# Error when adding non plugin/global alias/generic
|
||||
with self.assertRaises(RuntimeError):
|
||||
plugin_cache.add_global_alias("unknow_alias", "some_value", 0)
|
||||
|
||||
# Test adding global alias values
|
||||
plugin_cache.add_global_alias("test_global_alias", "some_value", 0)
|
||||
expected_aliases = {"test_global_alias": {0: "some_value"}}
|
||||
assert_equal(expected_aliases, plugin_cache.global_alias_values)
|
||||
|
||||
def test_add_config(self):
|
||||
plugin_cache = self.make_mock_cache()
|
||||
|
||||
# Test adding sources
|
||||
for x in xrange(5):
|
||||
plugin_cache.add_source(x)
|
||||
assert_equal([0, 1, 2, 3, 4], plugin_cache.sources)
|
||||
|
||||
# Test adding plugin config
|
||||
plugin_cache.add_config("plugin 1", "param1", "some_other_value", 0)
|
||||
expected_plugin_config = {"plugin 1": {0: {"param1": "some_other_value"}}}
|
||||
assert_equal(expected_plugin_config, plugin_cache.plugin_configs)
|
||||
|
||||
# Test adding generic config
|
||||
for name in GENERIC_CONFIGS:
|
||||
plugin_cache.add_config(name, "param1", "some_value", 0)
|
||||
expected_plugin_config[name] = {}
|
||||
expected_plugin_config[name][0] = {"param1": "some_value"}
|
||||
assert_equal(expected_plugin_config, plugin_cache.plugin_configs)
|
||||
|
||||
def test_get_plugin_config(self):
|
||||
plugin_cache = self.make_mock_cache()
|
||||
for x in xrange(5):
|
||||
plugin_cache.add_source(x)
|
||||
|
||||
# Add some global aliases
|
||||
plugin_cache.add_global_alias("test_global_alias", "1", 0)
|
||||
plugin_cache.add_global_alias("test_global_alias", "2", 4)
|
||||
plugin_cache.add_global_alias("test_global_alias", "3", 3)
|
||||
|
||||
# Test if they are being merged in source order
|
||||
expected_config = {
|
||||
"param1": "2",
|
||||
"param2": None,
|
||||
}
|
||||
assert_equal(expected_config, plugin_cache.get_plugin_config("plugin 1"))
|
||||
|
||||
# Add some plugin specific config
|
||||
plugin_cache.add_config("plugin 1", "param1", "3", 0)
|
||||
plugin_cache.add_config("plugin 1", "param1", "4", 2)
|
||||
plugin_cache.add_config("plugin 1", "param1", "5", 1)
|
||||
|
||||
# Test if they are being merged in source order on top of the global aliases
|
||||
expected_config = {
|
||||
"param1": "4",
|
||||
"param2": None,
|
||||
}
|
||||
assert_equal(expected_config, plugin_cache.get_plugin_config("plugin 1"))
|
||||
|
||||
def test_merge_using_priority_specificity(self):
|
||||
plugin_cache = self.make_mock_cache()
|
||||
for x in xrange(5):
|
||||
plugin_cache.add_source(x)
|
||||
|
||||
# Add generic configs
|
||||
plugin_cache.add_config("device_config", "param1", '1', 1)
|
||||
plugin_cache.add_config("device_config", "param1", '2', 2)
|
||||
assert_equal(plugin_cache.get_plugin_config("plugin 1", generic_name="device_config"),
|
||||
{"param1": '2', "param2": None})
|
||||
|
||||
# Add specific configs at same level as generic config
|
||||
plugin_cache.add_config("plugin 1", "param1", '3', 2)
|
||||
assert_equal(plugin_cache.get_plugin_config("plugin 1", generic_name="device_config"),
|
||||
{"param1": '3', "param2": None})
|
||||
|
||||
# Add specific config at higher level
|
||||
plugin_cache.add_config("plugin 1", "param1", '4', 3)
|
||||
assert_equal(plugin_cache.get_plugin_config("plugin 1", generic_name="device_config"),
|
||||
{"param1": '4', "param2": None})
|
||||
|
||||
# Add generic config at higher level - should be an error
|
||||
plugin_cache.add_config("device_config", "param1", '5', 4)
|
||||
msg = 'Error in "4":\n' \
|
||||
'\t"device_config" configuration "param1" has already been specified' \
|
||||
' more specifically for plugin 1 in:\n' \
|
||||
'\t\t2, 3'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
plugin_cache.get_plugin_config("plugin 1", generic_name="device_config")
|
422
wlauto/tests/test_parsers.py
Normal file
422
wlauto/tests/test_parsers.py
Normal file
@ -0,0 +1,422 @@
|
||||
import os
|
||||
from unittest import TestCase
|
||||
from copy import copy
|
||||
|
||||
from nose.tools import assert_equal # pylint: disable=E0611
|
||||
from mock.mock import Mock, MagicMock, call
|
||||
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.core.configuration.parsers import * # pylint: disable=wildcard-import
|
||||
from wlauto.core.configuration.parsers import _load_file, _collect_valid_id, _resolve_params_alias
|
||||
from wlauto.core.configuration import (WAConfiguration, RunConfiguration, JobGenerator,
|
||||
PluginCache, ConfigurationPoint)
|
||||
from wlauto.utils.types import toggle_set, reset_counter
|
||||
|
||||
|
||||
class TestFunctions(TestCase):
|
||||
|
||||
def test_load_file(self):
|
||||
# This does not test read_pod
|
||||
|
||||
# Non-existant file
|
||||
with self.assertRaises(ValueError):
|
||||
_load_file("THIS-IS-NOT-A-FILE", "test file")
|
||||
base_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
# Top level entry not a dict
|
||||
with self.assertRaisesRegexp(ConfigError, r".+ does not contain a valid test file structure; top level must be a dict\."):
|
||||
_load_file(os.path.join(base_path, "data", "test-agenda-not-dict.yaml"), "test file")
|
||||
|
||||
# Yaml syntax error
|
||||
with self.assertRaisesRegexp(ConfigError, r"Error parsing test file .+: Syntax Error on line 1"):
|
||||
_load_file(os.path.join(base_path, "data", "test-agenda-bad-syntax.yaml"), "test file")
|
||||
|
||||
# Ideal case
|
||||
_load_file(os.path.join(base_path, "data", "test-agenda.yaml"), "test file")
|
||||
|
||||
def test_get_aliased_param(self):
|
||||
# Ideal case
|
||||
cp1 = ConfigurationPoint("test", aliases=[
|
||||
'workload_parameters',
|
||||
'workload_params',
|
||||
'params'
|
||||
])
|
||||
|
||||
d_correct = {"workload_parameters": [1, 2, 3],
|
||||
"instruments": [2, 3, 4],
|
||||
"some_other_param": 1234}
|
||||
assert_equal(get_aliased_param(cp1, d_correct, default=[], pop=False), [1, 2, 3])
|
||||
|
||||
# Two aliases for the same parameter given
|
||||
d_duplicate = {"workload_parameters": [1, 2, 3],
|
||||
"workload_params": [2, 3, 4]}
|
||||
with self.assertRaises(ConfigError):
|
||||
get_aliased_param(cp1, d_duplicate, default=[])
|
||||
|
||||
# Empty dict
|
||||
d_none = {}
|
||||
assert_equal(get_aliased_param(cp1, d_none, default=[]), [])
|
||||
|
||||
# Aliased parameter not present in dict
|
||||
d_not_present = {"instruments": [2, 3, 4],
|
||||
"some_other_param": 1234}
|
||||
assert_equal(get_aliased_param(cp1, d_not_present, default=1), 1)
|
||||
|
||||
# Testing pop functionality
|
||||
assert_equal("workload_parameters" in d_correct, True)
|
||||
get_aliased_param(cp1, d_correct, default=[])
|
||||
assert_equal("workload_parameters" in d_correct, False)
|
||||
|
||||
def test_merge_result_processor_instruments(self):
|
||||
non_merge = {
|
||||
"instrumentation": toggle_set(["one", "two"]),
|
||||
}
|
||||
expected_non_merge = copy(non_merge)
|
||||
merge_result_processors_instruments(non_merge)
|
||||
assert_equal(non_merge, expected_non_merge)
|
||||
|
||||
no_overlap = {
|
||||
"instrumentation": ["one", "two"],
|
||||
"result_processors": ["three", "~four"],
|
||||
}
|
||||
expected_no_overlap = {"instrumentation": toggle_set(["one", "two", "three", "~four"])}
|
||||
merge_result_processors_instruments(no_overlap)
|
||||
assert_equal(no_overlap, expected_no_overlap)
|
||||
|
||||
non_conflicting = {
|
||||
"instrumentation": ["one", "two"],
|
||||
"result_processors": ["two", "three"],
|
||||
}
|
||||
expected_non_conflicting = {"instrumentation": toggle_set(["one", "two", "three"])}
|
||||
merge_result_processors_instruments(non_conflicting)
|
||||
assert_equal(non_conflicting, expected_non_conflicting)
|
||||
|
||||
conflict = {
|
||||
"instrumentation": ["one", "two"],
|
||||
"result_processors": ["~two", "three"],
|
||||
}
|
||||
with self.assertRaises(ConfigError):
|
||||
merge_result_processors_instruments(conflict)
|
||||
|
||||
def test_collect_valid_id(self):
|
||||
|
||||
msg = 'Invalid unit_test ID "uses-a-dash"; IDs cannot contain a "-"'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
_collect_valid_id("uses-a-dash", set(), "unit_test")
|
||||
|
||||
msg = 'Invalid unit_test ID "global"; is a reserved ID'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
_collect_valid_id("global", set(), "unit_test")
|
||||
|
||||
msg = 'Duplicate unit_test ID "duplicate"'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
_collect_valid_id("duplicate", set(["duplicate"]), "unit_test")
|
||||
|
||||
def test_resolve_params_alias(self):
|
||||
test = {"params": "some_value"}
|
||||
_resolve_params_alias(test, "new_name")
|
||||
assert_equal(test, {"new_name_parameters": "some_value"})
|
||||
|
||||
# Test it only affects "params"
|
||||
_resolve_params_alias(test, "new_name")
|
||||
assert_equal(test, {"new_name_parameters": "some_value"})
|
||||
|
||||
test["params"] = "some_other_value"
|
||||
with self.assertRaises(ConfigError):
|
||||
_resolve_params_alias(test, "new_name")
|
||||
|
||||
def test_construct_valid_entry(self):
|
||||
raise Exception()
|
||||
|
||||
|
||||
class TestConfigParser(TestCase):
|
||||
|
||||
def test_error_cases(self):
|
||||
wa_config = Mock(spec=WAConfiguration)
|
||||
wa_config.configuration = WAConfiguration.configuration
|
||||
run_config = Mock(spec=RunConfiguration)
|
||||
run_config.configuration = RunConfiguration.configuration
|
||||
config_parser = ConfigParser(wa_config,
|
||||
run_config,
|
||||
Mock(spec=JobGenerator),
|
||||
Mock(spec=PluginCache))
|
||||
|
||||
# "run_name" can only be in agenda config sections
|
||||
#' and is handled by AgendaParser
|
||||
err = 'Error in "Unit test":\n' \
|
||||
'"run_name" can only be specified in the config section of an agenda'
|
||||
with self.assertRaisesRegexp(ConfigError, err):
|
||||
config_parser.load({"run_name": "test"}, "Unit test")
|
||||
|
||||
# Instrument and result_processor lists in the same config cannot
|
||||
# have conflicting entries.
|
||||
err = 'Error in "Unit test":\n' \
|
||||
'"instrumentation" and "result_processors" have conflicting entries:'
|
||||
with self.assertRaisesRegexp(ConfigError, err):
|
||||
config_parser.load({"instruments": ["one", "two", "three"],
|
||||
"result_processors": ["~one", "~two", "~three"]},
|
||||
"Unit test")
|
||||
|
||||
def test_config_points(self):
|
||||
wa_config = Mock(spec=WAConfiguration)
|
||||
wa_config.configuration = WAConfiguration.configuration
|
||||
|
||||
run_config = Mock(spec=RunConfiguration)
|
||||
run_config.configuration = RunConfiguration.configuration
|
||||
|
||||
jobs_config = Mock(spec=JobGenerator)
|
||||
plugin_cache = Mock(spec=PluginCache)
|
||||
config_parser = ConfigParser(wa_config, run_config, jobs_config, plugin_cache)
|
||||
|
||||
cfg = {
|
||||
"assets_repository": "/somewhere/",
|
||||
"logging": "verbose",
|
||||
"project": "some project",
|
||||
"project_stage": "stage 1",
|
||||
"iterations": 9001,
|
||||
"workload_name": "name"
|
||||
}
|
||||
config_parser.load(cfg, "Unit test")
|
||||
wa_config.set.assert_has_calls([
|
||||
call("assets_repository", "/somewhere/"),
|
||||
call("logging", "verbose")
|
||||
], any_order=True)
|
||||
run_config.set.assert_has_calls([
|
||||
call("project", "some project"),
|
||||
call("project_stage", "stage 1")
|
||||
], any_order=True)
|
||||
jobs_config.set_global_value.assert_has_calls([
|
||||
call("iterations", 9001),
|
||||
call("workload_name", "name"),
|
||||
call("instrumentation", toggle_set())
|
||||
], any_order=True)
|
||||
|
||||
# Test setting global instruments including a non-conflicting duplicate ("two")
|
||||
jobs_config.reset_mock()
|
||||
instruments_and_result_processors = {
|
||||
"instruments": ["one", "two"],
|
||||
"result_processors": ["two", "three"]
|
||||
}
|
||||
config_parser.load(instruments_and_result_processors, "Unit test")
|
||||
jobs_config.set_global_value.assert_has_calls([
|
||||
call("instrumentation", toggle_set(["one", "two", "three"]))
|
||||
], any_order=True)
|
||||
|
||||
# Testing a empty config
|
||||
jobs_config.reset_mock()
|
||||
config_parser.load({}, "Unit test")
|
||||
jobs_config.set_global_value.assert_has_calls([], any_order=True)
|
||||
wa_config.set.assert_has_calls([], any_order=True)
|
||||
run_config.set.assert_has_calls([], any_order=True)
|
||||
|
||||
|
||||
class TestAgendaParser(TestCase):
|
||||
|
||||
# Tests Phase 1 & 2
|
||||
def test_valid_structures(self):
|
||||
wa_config = Mock(spec=WAConfiguration)
|
||||
wa_config.configuration = WAConfiguration.configuration
|
||||
run_config = Mock(spec=RunConfiguration)
|
||||
run_config.configuration = RunConfiguration.configuration
|
||||
jobs_config = Mock(spec=JobGenerator)
|
||||
plugin_cache = Mock(spec=PluginCache)
|
||||
agenda_parser = AgendaParser(wa_config, run_config, jobs_config, plugin_cache)
|
||||
|
||||
msg = 'Error in "Unit Test":\n\tInvalid agenda, top level entry must be a dict'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
agenda_parser.load(123, "Unit Test")
|
||||
|
||||
def _test_bad_type(name, source, msg):
|
||||
error_msg = msg.format(source=source, name=name)
|
||||
with self.assertRaisesRegexp(ConfigError, error_msg):
|
||||
agenda_parser.load({name: 123}, source)
|
||||
|
||||
msg = 'Error in "{source}":\n\tInvalid entry "{name}" - must be a dict'
|
||||
_test_bad_type("config", "Unit Test", msg)
|
||||
_test_bad_type("global", "Unit Test", msg)
|
||||
|
||||
msg = 'Error in "Unit Test":\n\tInvalid entry "{name}" - must be a list'
|
||||
_test_bad_type("sections", "Unit Test", msg)
|
||||
_test_bad_type("workloads", "Unit Test", msg)
|
||||
|
||||
msg = 'Error in "Unit Test":\n\tInvalid top level agenda entry\(ies\): "{name}"'
|
||||
_test_bad_type("not_a_valid_entry", "Unit Test", msg)
|
||||
|
||||
# Test Phase 3
|
||||
def test_id_collection(self):
|
||||
wa_config = Mock(spec=WAConfiguration)
|
||||
wa_config.configuration = WAConfiguration.configuration
|
||||
run_config = Mock(spec=RunConfiguration)
|
||||
run_config.configuration = RunConfiguration.configuration
|
||||
jobs_config = Mock(spec=JobGenerator)
|
||||
plugin_cache = Mock(spec=PluginCache)
|
||||
agenda_parser = AgendaParser(wa_config, run_config, jobs_config, plugin_cache)
|
||||
|
||||
agenda = {
|
||||
"workloads": [
|
||||
{"id": "test1"},
|
||||
{"id": "test2"},
|
||||
],
|
||||
"sections": [
|
||||
{"id": "section1",
|
||||
"workloads": [
|
||||
{"id": "section1_workload"}
|
||||
]}
|
||||
]
|
||||
}
|
||||
workloads, sections = agenda_parser.load(agenda, "Unit Test")
|
||||
assert_equal(sections, set(["section1"]))
|
||||
assert_equal(workloads, set(["test1", "test2", "section1_workload"]))
|
||||
|
||||
# Test Phase 4
|
||||
def test_id_assignment(self):
|
||||
wa_config = Mock(spec=WAConfiguration)
|
||||
wa_config.configuration = WAConfiguration.configuration
|
||||
run_config = Mock(spec=RunConfiguration)
|
||||
run_config.configuration = RunConfiguration.configuration
|
||||
jobs_config = Mock(spec=JobGenerator)
|
||||
plugin_cache = Mock(spec=PluginCache)
|
||||
agenda_parser = AgendaParser(wa_config, run_config, jobs_config, plugin_cache)
|
||||
|
||||
# Helper function
|
||||
def _assert_ids(ids, expected):
|
||||
ids_set = set(ids)
|
||||
assert_equal(len(ids), len(ids_set))
|
||||
assert_equal(ids_set, set(expected))
|
||||
|
||||
def _assert_workloads_sections(jobs_config, expected_sect, expected_wk):
|
||||
wk_ids = [wk[0][0]['id'] for wk in jobs_config.add_workload.call_args_list]
|
||||
# section workloads
|
||||
for s in jobs_config.add_section.call_args_list:
|
||||
wk_ids += [wk['id'] for wk in s[0][1]]
|
||||
#sections
|
||||
sec_ids = set([s[0][0]['id'] for s in jobs_config.add_section.call_args_list])
|
||||
_assert_ids(wk_ids, set(expected_wk))
|
||||
_assert_ids(sec_ids, set(expected_sect))
|
||||
_reset_jobs_config(jobs_config)
|
||||
|
||||
def _reset_jobs_config(jobs_config):
|
||||
jobs_config.reset_mock()
|
||||
reset_counter("wk")
|
||||
reset_counter("s")
|
||||
|
||||
# Test auto id assignment
|
||||
auto_id = {
|
||||
"workloads": [
|
||||
{"name": 1},
|
||||
{"name": 2},
|
||||
{"name": 3},
|
||||
],
|
||||
"sections": [
|
||||
{"name": 4,
|
||||
"workloads": [
|
||||
{"name": 7},
|
||||
{"name": 8},
|
||||
{"name": 9},
|
||||
]},
|
||||
{"name": 5},
|
||||
{"name": 6},
|
||||
]
|
||||
}
|
||||
agenda_parser.load(auto_id, "Unit Test")
|
||||
_assert_workloads_sections(jobs_config, ["s1", "s2", "s3"],
|
||||
["wk1", "wk2", "wk3", "wk4", "wk5", "wk6"])
|
||||
|
||||
# Test user defined IDs
|
||||
user_ids = {
|
||||
"workloads": [
|
||||
{"id": "user1"},
|
||||
{"name": "autoid1"},
|
||||
],
|
||||
"sections": [
|
||||
{"id": "user_section1",
|
||||
"workloads": [
|
||||
{"name": "autoid2"}
|
||||
]}
|
||||
]
|
||||
}
|
||||
agenda_parser.load(user_ids, "Unit Test")
|
||||
_assert_workloads_sections(jobs_config, ["user_section1"],
|
||||
["user1", "wk1", "wk2"])
|
||||
|
||||
# Test auto asigned ID already present
|
||||
used_auto_id = {
|
||||
"workloads": [
|
||||
{"id": "wk2"},
|
||||
{"name": 2},
|
||||
{"name": 3},
|
||||
],
|
||||
}
|
||||
agenda_parser.load(used_auto_id, "Unit Test")
|
||||
_assert_workloads_sections(jobs_config, [], ["wk1", "wk2", "wk3"])
|
||||
|
||||
# Test string workload
|
||||
string = {
|
||||
"workloads": [
|
||||
"test"
|
||||
]
|
||||
}
|
||||
agenda_parser.load(string, "Unit Test")
|
||||
workload = jobs_config.add_workload.call_args_list[0][0][0]
|
||||
assert_equal(isinstance(workload, dict), True)
|
||||
assert_equal(workload['workload_name'], "test")
|
||||
|
||||
|
||||
class TestEnvironmentVarsParser(TestCase):
|
||||
|
||||
def test_environmentvarsparser(self):
|
||||
wa_config = Mock(spec=WAConfiguration)
|
||||
calls = [call('user_directory', '/testdir'),
|
||||
call('plugin_paths', ['/test', '/some/other/path', '/testy/mc/test/face'])]
|
||||
|
||||
# Valid env vars
|
||||
valid_environ = {"WA_USER_DIRECTORY": "/testdir",
|
||||
"WA_PLUGIN_PATHS": "/test:/some/other/path:/testy/mc/test/face"}
|
||||
EnvironmentVarsParser(wa_config, valid_environ)
|
||||
wa_config.set.assert_has_calls(calls)
|
||||
|
||||
# Alternative env var name
|
||||
wa_config.reset_mock()
|
||||
alt_valid_environ = {"WA_USER_DIRECTORY": "/testdir",
|
||||
"WA_EXTENSION_PATHS": "/test:/some/other/path:/testy/mc/test/face"}
|
||||
EnvironmentVarsParser(wa_config, alt_valid_environ)
|
||||
wa_config.set.assert_has_calls(calls)
|
||||
|
||||
# Test that WA_EXTENSION_PATHS gets merged with WA_PLUGIN_PATHS.
|
||||
# Also checks that other enviroment variables don't cause errors
|
||||
wa_config.reset_mock()
|
||||
calls = [call('user_directory', '/testdir'),
|
||||
call('plugin_paths', ['/test', '/some/other/path']),
|
||||
call('plugin_paths', ['/testy/mc/test/face'])]
|
||||
ext_and_plgin = {"WA_USER_DIRECTORY": "/testdir",
|
||||
"WA_PLUGIN_PATHS": "/test:/some/other/path",
|
||||
"WA_EXTENSION_PATHS": "/testy/mc/test/face",
|
||||
"RANDOM_VAR": "random_value"}
|
||||
EnvironmentVarsParser(wa_config, ext_and_plgin)
|
||||
# If any_order=True then the calls can be in any order, but they must all appear
|
||||
wa_config.set.assert_has_calls(calls, any_order=True)
|
||||
|
||||
# No WA enviroment variables present
|
||||
wa_config.reset_mock()
|
||||
EnvironmentVarsParser(wa_config, {"RANDOM_VAR": "random_value"})
|
||||
wa_config.set.assert_not_called()
|
||||
|
||||
|
||||
class TestCommandLineArgsParser(TestCase):
|
||||
wa_config = Mock(spec=WAConfiguration)
|
||||
run_config = Mock(spec=RunConfiguration)
|
||||
jobs_config = Mock(spec=JobGenerator)
|
||||
|
||||
cmd_args = MagicMock(
|
||||
verbosity=1,
|
||||
output_directory="my_results",
|
||||
instruments_to_disable=["abc", "def", "ghi"],
|
||||
only_run_ids=["wk1", "s1_wk4"],
|
||||
some_other_setting="value123"
|
||||
)
|
||||
CommandLineArgsParser(cmd_args, wa_config, jobs_config)
|
||||
wa_config.set.assert_has_calls([call("verbosity", 1)], any_order=True)
|
||||
jobs_config.disable_instruments.assert_has_calls([
|
||||
call(toggle_set(["~abc", "~def", "~ghi"]))
|
||||
], any_order=True)
|
||||
jobs_config.only_run_ids.assert_has_calls([call(["wk1", "s1_wk4"])], any_order=True)
|
@ -21,7 +21,7 @@ from nose.tools import raises, assert_equal, assert_not_equal # pylint: disable
|
||||
|
||||
from wlauto.utils.android import check_output
|
||||
from wlauto.utils.misc import merge_dicts, merge_lists, TimeoutError
|
||||
from wlauto.utils.types import list_or_integer, list_or_bool, caseless_string, arguments
|
||||
from wlauto.utils.types import list_or_integer, list_or_bool, caseless_string, arguments, toggle_set
|
||||
|
||||
|
||||
class TestCheckOutput(TestCase):
|
||||
@ -89,3 +89,21 @@ class TestTypes(TestCase):
|
||||
['--foo', '7', '--bar', 'fizz buzz'])
|
||||
assert_equal(arguments(['test', 42]), ['test', '42'])
|
||||
|
||||
def toggle_set_test():
|
||||
|
||||
a = toggle_set(['qaz', 'qwert', 'asd', '~fgh', '~seb'])
|
||||
b = toggle_set(['qaz', 'xyz', '~asd', 'fgh', '~seb'])
|
||||
|
||||
a_into_b = ['qaz', 'xyz', '~seb', 'qwert', 'asd', '~fgh']
|
||||
assert_equal(a.merge_into(b), a_into_b)
|
||||
assert_equal(b.merge_with(a), a_into_b)
|
||||
|
||||
b_into_a = ['qaz', 'qwert', '~seb', 'xyz', '~asd', 'fgh']
|
||||
assert_equal(b.merge_into(a), b_into_a)
|
||||
assert_equal(a.merge_with(b), b_into_a)
|
||||
|
||||
assert_equal(a.values(), ['qaz', 'qwert', 'asd'])
|
||||
assert_equal(b.merge_with(a).values(), ['qaz', 'xyz', 'qwert', 'asd'])
|
||||
|
||||
assert_equal(a.values(), ['qaz', 'qwert', 'asd'])
|
||||
assert_equal(a.conflicts_with(b), ['~asd', '~fgh'])
|
||||
|
@ -1,27 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from wlauto.core.version import get_wa_version
|
||||
|
||||
|
||||
def init_argument_parser(parser):
|
||||
parser.add_argument('-c', '--config', help='specify an additional config.py')
|
||||
parser.add_argument('-v', '--verbose', action='count',
|
||||
help='The scripts will produce verbose output.')
|
||||
parser.add_argument('--debug', action='store_true',
|
||||
help='Enable debug mode. Note: this implies --verbose.')
|
||||
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(get_wa_version()))
|
||||
return parser
|
||||
|
@ -36,20 +36,19 @@ import hashlib
|
||||
from datetime import datetime, timedelta
|
||||
from operator import mul, itemgetter
|
||||
from StringIO import StringIO
|
||||
from itertools import cycle, groupby
|
||||
from itertools import cycle, groupby, chain
|
||||
from functools import partial
|
||||
from distutils.spawn import find_executable
|
||||
|
||||
import yaml
|
||||
from dateutil import tz
|
||||
|
||||
from devlib.utils.misc import ABI_MAP, check_output, walk_modules, \
|
||||
ensure_directory_exists, ensure_file_directory_exists, \
|
||||
merge_dicts, merge_lists, normalize, convert_new_lines, \
|
||||
escape_quotes, escape_single_quotes, escape_double_quotes, \
|
||||
isiterable, getch, as_relative, ranges_to_list, \
|
||||
list_to_ranges, list_to_mask, mask_to_list, which, \
|
||||
get_cpu_mask, unique
|
||||
from devlib.utils.misc import (ABI_MAP, check_output, walk_modules,
|
||||
ensure_directory_exists, ensure_file_directory_exists,
|
||||
normalize, convert_new_lines, get_cpu_mask, unique,
|
||||
escape_quotes, escape_single_quotes, escape_double_quotes,
|
||||
isiterable, getch, as_relative, ranges_to_list,
|
||||
list_to_ranges, list_to_mask, mask_to_list, which)
|
||||
|
||||
check_output_logger = logging.getLogger('check_output')
|
||||
|
||||
@ -469,3 +468,128 @@ def istextfile(fileobj, blocksize=512):
|
||||
# occurrences of _text_characters from the block
|
||||
nontext = block.translate(None, _text_characters)
|
||||
return float(len(nontext)) / len(block) <= 0.30
|
||||
|
||||
|
||||
def categorize(v):
|
||||
if hasattr(v, 'merge_with') and hasattr(v, 'merge_into'):
|
||||
return 'o'
|
||||
elif hasattr(v, 'iteritems'):
|
||||
return 'm'
|
||||
elif isiterable(v):
|
||||
return 's'
|
||||
elif v is None:
|
||||
return 'n'
|
||||
else:
|
||||
return 'c'
|
||||
|
||||
|
||||
def merge_config_values(base, other):
|
||||
"""
|
||||
This is used to merge two objects, typically when setting the value of a
|
||||
``ConfigurationPoint``. First, both objects are categorized into
|
||||
|
||||
c: A scalar value. Basically, most objects. These values
|
||||
are treated as atomic, and not mergeable.
|
||||
s: A sequence. Anything iterable that is not a dict or
|
||||
a string (strings are considered scalars).
|
||||
m: A key-value mapping. ``dict`` and it's derivatives.
|
||||
n: ``None``.
|
||||
o: A mergeable object; this is an object that implements both
|
||||
``merge_with`` and ``merge_into`` methods.
|
||||
|
||||
The merge rules based on the two categories are then as follows:
|
||||
|
||||
(c1, c2) --> c2
|
||||
(s1, s2) --> s1 . s2
|
||||
(m1, m2) --> m1 . m2
|
||||
(c, s) --> [c] . s
|
||||
(s, c) --> s . [c]
|
||||
(s, m) --> s . [m]
|
||||
(m, s) --> [m] . s
|
||||
(m, c) --> ERROR
|
||||
(c, m) --> ERROR
|
||||
(o, X) --> o.merge_with(X)
|
||||
(X, o) --> o.merge_into(X)
|
||||
(X, n) --> X
|
||||
(n, X) --> X
|
||||
|
||||
where:
|
||||
|
||||
'.' means concatenation (for maps, contcationation of (k, v) streams
|
||||
then converted back into a map). If the types of the two objects
|
||||
differ, the type of ``other`` is used for the result.
|
||||
'X' means "any category"
|
||||
'[]' used to indicate a literal sequence (not necessarily a ``list``).
|
||||
when this is concatenated with an actual sequence, that sequencies
|
||||
type is used.
|
||||
|
||||
notes:
|
||||
|
||||
- When a mapping is combined with a sequence, that mapping is
|
||||
treated as a scalar value.
|
||||
- When combining two mergeable objects, they're combined using
|
||||
``o1.merge_with(o2)`` (_not_ using o2.merge_into(o1)).
|
||||
- Combining anything with ``None`` yields that value, irrespective
|
||||
of the order. So a ``None`` value is eqivalent to the corresponding
|
||||
item being omitted.
|
||||
- When both values are scalars, merging is equivalent to overwriting.
|
||||
- There is no recursion (e.g. if map values are lists, they will not
|
||||
be merged; ``other`` will overwrite ``base`` values). If complicated
|
||||
merging semantics (such as recursion) are required, they should be
|
||||
implemented within custom mergeable types (i.e. those that implement
|
||||
``merge_with`` and ``merge_into``).
|
||||
|
||||
While this can be used as a generic "combine any two arbitry objects"
|
||||
function, the semantics have been selected specifically for merging
|
||||
configuration point values.
|
||||
|
||||
"""
|
||||
cat_base = categorize(base)
|
||||
cat_other = categorize(other)
|
||||
|
||||
if cat_base == 'n':
|
||||
return other
|
||||
elif cat_other == 'n':
|
||||
return base
|
||||
|
||||
if cat_base == 'o':
|
||||
return base.merge_with(other)
|
||||
elif cat_other == 'o':
|
||||
return other.merge_into(base)
|
||||
|
||||
if cat_base == 'm':
|
||||
if cat_other == 's':
|
||||
return merge_sequencies([base], other)
|
||||
elif cat_other == 'm':
|
||||
return merge_maps(base, other)
|
||||
else:
|
||||
message = 'merge error ({}, {}): "{}" and "{}"'
|
||||
raise ValueError(message.format(cat_base, cat_other, base, other))
|
||||
elif cat_base == 's':
|
||||
if cat_other == 's':
|
||||
return merge_sequencies(base, other)
|
||||
else:
|
||||
return merge_sequencies(base, [other])
|
||||
else: # cat_base == 'c'
|
||||
if cat_other == 's':
|
||||
return merge_sequencies([base], other)
|
||||
elif cat_other == 'm':
|
||||
message = 'merge error ({}, {}): "{}" and "{}"'
|
||||
raise ValueError(message.format(cat_base, cat_other, base, other))
|
||||
else:
|
||||
return other
|
||||
|
||||
|
||||
def merge_sequencies(s1, s2):
|
||||
return type(s2)(unique(chain(s1, s2)))
|
||||
|
||||
|
||||
def merge_maps(m1, m2):
|
||||
return type(m2)(chain(m1.iteritems(), m2.iteritems()))
|
||||
|
||||
|
||||
def merge_dicts_simple(base, other):
|
||||
result = base.copy()
|
||||
for key, value in (base or {}).iteritems():
|
||||
result[key] = merge_config_values(result.get(key), value)
|
||||
return result
|
||||
|
@ -7,7 +7,7 @@ structures and Python class instances).
|
||||
|
||||
The modifications to standard serilization procedures are:
|
||||
|
||||
- mappings are deserialized as ``OrderedDict``\ 's are than standard
|
||||
- mappings are deserialized as ``OrderedDict``\ 's rather than standard
|
||||
Python ``dict``\ 's. This allows for cleaner syntax in certain parts
|
||||
of WA configuration (e.g. values to be written to files can be specified
|
||||
as a dict, and they will be written in the order specified in the config).
|
||||
@ -61,15 +61,27 @@ __all__ = [
|
||||
'read_pod',
|
||||
'dump',
|
||||
'load',
|
||||
'is_pod',
|
||||
'POD_TYPES',
|
||||
]
|
||||
|
||||
POD_TYPES = [
|
||||
list,
|
||||
tuple,
|
||||
dict,
|
||||
set,
|
||||
basestring,
|
||||
int,
|
||||
float,
|
||||
bool,
|
||||
datetime,
|
||||
regex_type
|
||||
]
|
||||
|
||||
class WAJSONEncoder(_json.JSONEncoder):
|
||||
|
||||
def default(self, obj): # pylint: disable=method-hidden
|
||||
if hasattr(obj, 'to_pod'):
|
||||
return obj.to_pod()
|
||||
elif isinstance(obj, regex_type):
|
||||
if isinstance(obj, regex_type):
|
||||
return 'REGEX:{}:{}'.format(obj.flags, obj.pattern)
|
||||
elif isinstance(obj, datetime):
|
||||
return 'DATET:{}'.format(obj.isoformat())
|
||||
@ -241,3 +253,7 @@ def _read_pod(fh, fmt=None):
|
||||
return python.load(fh)
|
||||
else:
|
||||
raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(fh, 'name', '<none>')))
|
||||
|
||||
|
||||
def is_pod(obj):
|
||||
return type(obj) in POD_TYPES
|
||||
|
@ -30,7 +30,8 @@ import re
|
||||
import math
|
||||
import shlex
|
||||
from bisect import insort
|
||||
from collections import defaultdict
|
||||
from collections import defaultdict, MutableMapping
|
||||
from copy import copy
|
||||
|
||||
from wlauto.utils.misc import isiterable, to_identifier
|
||||
from devlib.utils.types import identifier, boolean, integer, numeric, caseless_string
|
||||
@ -338,3 +339,130 @@ class prioritylist(object):
|
||||
|
||||
def __len__(self):
|
||||
return self.size
|
||||
|
||||
|
||||
class toggle_set(set):
|
||||
"""
|
||||
A list that contains items to enable or disable something.
|
||||
|
||||
A prefix of ``~`` is used to denote disabling something, for example
|
||||
the list ['apples', '~oranges', 'cherries'] enables both ``apples``
|
||||
and ``cherries`` but disables ``oranges``.
|
||||
"""
|
||||
|
||||
def merge_with(self, other):
|
||||
new_self = copy(self)
|
||||
return toggle_set.merge(other, new_self)
|
||||
|
||||
def merge_into(self, other):
|
||||
other = copy(other)
|
||||
return toggle_set.merge(self, other)
|
||||
|
||||
@staticmethod
|
||||
def merge(source, dest):
|
||||
for item in source:
|
||||
if item not in dest:
|
||||
#Disable previously enabled item
|
||||
if item.startswith('~') and item[1:] in dest:
|
||||
dest.remove(item[1:])
|
||||
#Enable previously disabled item
|
||||
if not item.startswith('~') and ('~' + item) in dest:
|
||||
dest.remove('~' + item)
|
||||
dest.add(item)
|
||||
return dest
|
||||
|
||||
def values(self):
|
||||
"""
|
||||
returns a list of enabled items.
|
||||
"""
|
||||
return set([item for item in self if not item.startswith('~')])
|
||||
|
||||
def conflicts_with(self, other):
|
||||
"""
|
||||
Checks if any items in ``other`` conflict with items already in this list.
|
||||
|
||||
Args:
|
||||
other (list): The list to be checked against
|
||||
|
||||
Returns:
|
||||
A list of items in ``other`` that conflict with items in this list
|
||||
"""
|
||||
conflicts = []
|
||||
for item in other:
|
||||
if item.startswith('~') and item[1:] in self:
|
||||
conflicts.append(item)
|
||||
if not item.startswith('~') and ('~' + item) in self:
|
||||
conflicts.append(item)
|
||||
return conflicts
|
||||
|
||||
class ID(str):
|
||||
|
||||
def merge_with(self, other):
|
||||
return '_'.join(self, other)
|
||||
|
||||
def merge_into(self, other):
|
||||
return '_'.join(other, self)
|
||||
|
||||
|
||||
class obj_dict(MutableMapping):
|
||||
"""
|
||||
An object that behaves like a dict but each dict entry can also be accesed
|
||||
as an attribute.
|
||||
|
||||
:param not_in_dict: A list of keys that can only be accessed as attributes
|
||||
"""
|
||||
|
||||
def __init__(self, not_in_dict=None, values={}):
|
||||
self.__dict__['not_in_dict'] = not_in_dict if not_in_dict is not None else []
|
||||
self.__dict__['dict'] = dict(values)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key in self.not_in_dict:
|
||||
msg = '"{}" is in the list keys that can only be accessed as attributes'
|
||||
raise KeyError(msg.format(key))
|
||||
return self.__dict__['dict'][key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.__dict__['dict'][key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self.__dict__['dict'][key]
|
||||
|
||||
def __len__(self):
|
||||
return sum(1 for _ in self)
|
||||
|
||||
def __iter__(self):
|
||||
for key in self.__dict__['dict']:
|
||||
if key not in self.__dict__['not_in_dict']:
|
||||
yield key
|
||||
|
||||
def __repr__(self):
|
||||
return repr(dict(self))
|
||||
|
||||
def __str__(self):
|
||||
return str(dict(self))
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
self.__dict__['dict'][name] = value
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name in self:
|
||||
del self.__dict__['dict'][name]
|
||||
else:
|
||||
raise AttributeError("No such attribute: " + name)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in self.__dict__['dict']:
|
||||
return self.__dict__['dict'][name]
|
||||
else:
|
||||
raise AttributeError("No such attribute: " + name)
|
||||
|
||||
def to_pod(self):
|
||||
return self.__dict__.copy()
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
instance = ObjDict()
|
||||
for k, v in pod.iteritems():
|
||||
instance[k] = v
|
||||
return instance
|
||||
|
Loading…
x
Reference in New Issue
Block a user