1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2025-02-22 12:58:36 +00:00

Fixed run command to the point of invoking the Executor

This commit is contained in:
Sergei Trofimov 2017-02-09 15:38:28 +00:00
parent dc6d9676f2
commit 3d8503b056
11 changed files with 310 additions and 352 deletions

View File

@ -117,7 +117,7 @@ class CreateWorkloadSubcommand(CreateSubcommand):
'should place the APK file into the workload\'s directory at the ' + 'should place the APK file into the workload\'s directory at the ' +
'same level as the __init__.py.') 'same level as the __init__.py.')
def execute(self, args): # pylint: disable=R0201 def execute(self, state, args): # pylint: disable=R0201
where = args.path or 'local' where = args.path or 'local'
check_name = not args.force check_name = not args.force

View File

@ -39,7 +39,7 @@ class ListCommand(Command):
self.parser.add_argument('-p', '--platform', help='Only list results that are supported by ' self.parser.add_argument('-p', '--platform', help='Only list results that are supported by '
'the specified platform') 'the specified platform')
def execute(self, args): def execute(self, state, args):
filters = {} filters = {}
if args.name: if args.name:
filters['name'] = args.name filters['name'] = args.name

View File

@ -78,7 +78,7 @@ class RecordCommand(Command):
args.suffix += "." args.suffix += "."
# pylint: disable=W0201 # pylint: disable=W0201
def execute(self, args): def execute(self, state, args):
self.validate_args(args) self.validate_args(args)
self.logger.info("Connecting to device...") self.logger.info("Connecting to device...")

View File

@ -20,11 +20,13 @@ import shutil
import wlauto import wlauto
from wlauto import Command, settings from wlauto import Command, settings
from wlauto.core.execution import Executor
from wlauto.utils.log import add_log_file
from wlauto.core.configuration import RunConfiguration
from wlauto.core import pluginloader from wlauto.core import pluginloader
from wlauto.core.configuration.parsers import AgendaParser, ConfigParser, CommandLineArgsParser from wlauto.core.configuration import RunConfiguration
from wlauto.core.configuration.parsers import AgendaParser, ConfigParser
from wlauto.core.execution import Executor
from wlauto.exceptions import NotFoundError, ConfigError
from wlauto.utils.log import add_log_file
from wlauto.utils.types import toggle_set
class RunCommand(Command): class RunCommand(Command):
@ -32,103 +34,6 @@ class RunCommand(Command):
name = 'run' name = 'run'
description = 'Execute automated workloads on a remote device and process the resulting output.' description = 'Execute automated workloads on a remote device and process the resulting output.'
def initialize(self, context):
self.parser.add_argument('agenda', metavar='AGENDA',
help="""
Agenda for this workload automation run. This defines which
workloads will be executed, how many times, with which
tunables, etc. See example agendas in {} for an example of
how this file should be structured.
""".format(os.path.dirname(wlauto.__file__)))
self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None,
help="""
Specify a directory where the output will be generated. If
the directory already exists, the script will abort unless -f
option (see below) is used, in which case the contents of the
directory will be overwritten. If this option is not specified,
then {} will be used instead.
""".format("settings.default_output_directory")) # TODO: Fix this!
self.parser.add_argument('-f', '--force', action='store_true',
help="""
Overwrite output directory if it exists. By default, the script
will abort in this situation to prevent accidental data loss.
""")
self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID',
help="""
Specify a workload spec ID from an agenda to run. If this is
specified, only that particular spec will be run, and other
workloads in the agenda will be ignored. This option may be
used to specify multiple IDs.
""")
self.parser.add_argument('--disable', action='append', dest='instruments_to_disable',
metavar='INSTRUMENT', help="""
Specify an instrument to disable from the command line. This
equivalent to adding "~{metavar}" to the instrumentation list in
the agenda. This can be used to temporarily disable a troublesome
instrument for a particular run without introducing permanent
change to the config (which one might then forget to revert).
This option may be specified multiple times.
""")
def execute(self, args):
# STAGE 1: Gather configuratation
env = EnvironmentVars()
args = CommandLineArgs(args)
# STAGE 2.1a: Early WAConfiguration, required to find config files
if env.user_directory:
settings.set("user_directory", env.user_directory)
if env.plugin_paths:
settings.set("plugin_paths", env.plugin_paths)
# STAGE 1 continued
# TODO: Check for config.py and convert to yaml, if it fails, warn user.
configs = [ConfigFile(os.path.join(settings.user_directory, 'config.yaml'))]
for c in args.config:
configs.append(ConfigFile(c))
agenda = Agenda(args.agenda)
configs.append(Agenda.config)
# STAGE 2: Sending configuration to the correct place & merging in
# order of priority.
#
# Priorities (lowest to highest):
# - Enviroment Variables
# - config.yaml from `user_directory`
# - config files passed via command line
# (the first specified will be the first to be applied)
# - Agenda
# - Command line configuration e.g. disabled instrumentation.
# STAGE 2.1b: WAConfiguration
for config in configs:
for config_point in settings.configuration.keys():
if hasattr(config, config_point):
settings.set(config_point, config.getattr(config_point))
def _parse_config(self):
pass
def _serialize_raw_config(self, env, args, agenda, configs):
pod = {}
pod['environment_variables'] = env.to_pod()
pod['commandline_arguments'] = args.to_pod()
pod['agenda'] = agenda.to_pod()
pod['config_files'] = [c.to_pod() for c in configs]
return pod
def _serialize_final_config(self):
pass
class OldRunCommand(Command):
name = 'old_run'
description = 'Execute automated workloads on a remote device and process the resulting output.'
def initialize(self, context): def initialize(self, context):
self.parser.add_argument('agenda', metavar='AGENDA', self.parser.add_argument('agenda', metavar='AGENDA',
help=""" help="""
@ -158,6 +63,7 @@ class OldRunCommand(Command):
used to specify multiple IDs. used to specify multiple IDs.
""") """)
self.parser.add_argument('--disable', action='append', dest='instruments_to_disable', self.parser.add_argument('--disable', action='append', dest='instruments_to_disable',
default=[],
metavar='INSTRUMENT', help=""" metavar='INSTRUMENT', help="""
Specify an instrument to disable from the command line. This Specify an instrument to disable from the command line. This
equivalent to adding "~{metavar}" to the instrumentation list in equivalent to adding "~{metavar}" to the instrumentation list in
@ -167,38 +73,32 @@ class OldRunCommand(Command):
This option may be specified multiple times. This option may be specified multiple times.
""") """)
def execute(self, args): # NOQA def execute(self, state, args):
output_directory = self.set_up_output_directory(args) output_directory = self.set_up_output_directory(args)
add_log_file(os.path.join(output_directory, "run.log")) add_log_file(os.path.join(output_directory, "run.log"))
config = RunConfiguration(pluginloader)
disabled_instruments = toggle_set(["~{}".format(i)
for i in args.instruments_to_disable])
state.jobs_config.disable_instruments(disabled_instruments)
state.jobs_config.only_run_ids(args.only_run_ids)
parser = AgendaParser()
if os.path.isfile(args.agenda): if os.path.isfile(args.agenda):
agenda = Agenda(args.agenda) parser.load_from_path(state, args.agenda)
settings.agenda = args.agenda
shutil.copy(args.agenda, config.meta_directory)
else: else:
self.logger.debug('{} is not a file; assuming workload name.'.format(args.agenda)) try:
agenda = Agenda() pluginloader.get_workload(args.agenda)
agenda.add_workload_entry(args.agenda) agenda = {'workloads': [{'name': args.agenda}]}
parser.load(state, agenda, 'CMDLINE_ARGS')
except NotFoundError:
msg = 'Agenda file "{}" does not exist, and there no workload '\
'with that name.\nYou can get a list of available '\
'by running "wa list workloads".'
raise ConfigError(msg.format(args.agenda))
for filepath in settings.config_paths: executor = Executor()
config.load_config(filepath) # TODO: fix executor
# executor.execute(state, selectors={'ids': args.only_run_ids})
if args.instruments_to_disable:
if 'instrumentation' not in agenda.config:
agenda.config['instrumentation'] = []
for itd in args.instruments_to_disable:
self.logger.debug('Updating agenda to disable {}'.format(itd))
agenda.config['instrumentation'].append('~{}'.format(itd))
basename = 'config_'
for file_number, path in enumerate(settings.config_paths, 1):
file_ext = os.path.splitext(path)[1]
shutil.copy(path, os.path.join(config.meta_directory,
basename + str(file_number) + file_ext))
executor = Executor(config)
executor.execute(agenda, selectors={'ids': args.only_run_ids})
def set_up_output_directory(self, args): def set_up_output_directory(self, args):
if args.output_directory: if args.output_directory:

View File

@ -40,7 +40,7 @@ class ShowCommand(Command):
help='''The name of the plugin for which information will help='''The name of the plugin for which information will
be shown.''') be shown.''')
def execute(self, args): def execute(self, state, args):
# pylint: disable=unpacking-non-sequence # pylint: disable=unpacking-non-sequence
plugin = pluginloader.get_plugin_class(args.name) plugin = pluginloader.get_plugin_class(args.name)
out = StringIO() out = StringIO()

View File

@ -21,19 +21,22 @@ from wlauto.core.version import get_wa_version
def init_argument_parser(parser): def init_argument_parser(parser):
parser.add_argument('-c', '--config', help='specify an additional config.py', action='append', default=[]) parser.add_argument('-c', '--config', action='append', default=[],
help='specify an additional config.py')
parser.add_argument('-v', '--verbose', action='count', parser.add_argument('-v', '--verbose', action='count',
help='The scripts will produce verbose output.') help='The scripts will produce verbose output.')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(get_wa_version())) parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(get_wa_version()))
return parser return parser
class Command(Plugin): class Command(Plugin):
""" """
Defines a Workload Automation command. This will be executed from the command line as Defines a Workload Automation command. This will be executed from the
``wa <command> [args ...]``. This defines the name to be used when invoking wa, the command line as ``wa <command> [args ...]``. This defines the name to be
code that will actually be executed on invocation and the argument parser to be used used when invoking wa, the code that will actually be executed on
to parse the reset of the command line arguments. invocation and the argument parser to be used to parse the reset of the
command line arguments.
""" """
kind = "command" kind = "command"
@ -57,16 +60,19 @@ class Command(Plugin):
def initialize(self, context): def initialize(self, context):
""" """
Perform command-specific initialisation (e.g. adding command-specific options to the command's Perform command-specific initialisation (e.g. adding command-specific
parser). ``context`` is always ``None``. options to the command's parser). ``context`` is always ``None``.
""" """
pass pass
def execute(self, args): def execute(self, state, args):
""" """
Execute this command. Execute this command.
:state: An initialized ``WAState`` that contains the current state of
WA exeuction up to that point (processed configuraition, loaded
plugins, etc).
:args: An ``argparse.Namespace`` containing command line arguments (as returned by :args: An ``argparse.Namespace`` containing command line arguments (as returned by
``argparse.ArgumentParser.parse_args()``. This would usually be the result of ``argparse.ArgumentParser.parse_args()``. This would usually be the result of
invoking ``self.parser``. invoking ``self.parser``.

View File

@ -516,8 +516,10 @@ class Configuration(object):
def set(self, name, value, check_mandatory=True): def set(self, name, value, check_mandatory=True):
if name not in self.configuration: if name not in self.configuration:
raise ConfigError('Unknown {} configuration "{}"'.format(self.name, name)) raise ConfigError('Unknown {} configuration "{}"'.format(self.name,
self.configuration[name].set_value(self, value, check_mandatory=check_mandatory) name))
self.configuration[name].set_value(self, value,
check_mandatory=check_mandatory)
def update_config(self, values, check_mandatory=True): def update_config(self, values, check_mandatory=True):
for k, v in values.iteritems(): for k, v in values.iteritems():
@ -610,6 +612,9 @@ class WAConfiguration(Configuration):
def plugins_directory(self): def plugins_directory(self):
return os.path.join(self.user_directory, 'plugins') return os.path.join(self.user_directory, 'plugins')
@property
def user_config_file(self):
return os.path.joion(self.user_directory, 'config.yaml')
def __init__(self, environ): def __init__(self, environ):
super(WAConfiguration, self).__init__() super(WAConfiguration, self).__init__()
@ -618,7 +623,6 @@ class WAConfiguration(Configuration):
self.set('user_directory', user_directory) self.set('user_directory', user_directory)
# This is generic top-level configuration for WA runs. # This is generic top-level configuration for WA runs.
class RunConfiguration(Configuration): class RunConfiguration(Configuration):

View File

@ -20,13 +20,151 @@ from wlauto.utils.serializer import read_pod, SerializerSyntaxError
from wlauto.utils.types import toggle_set, counter from wlauto.utils.types import toggle_set, counter
from wlauto.core.configuration.configuration import JobSpec from wlauto.core.configuration.configuration import JobSpec
###############
### Parsers ###
###############
class ConfigParser(object):
def load_from_path(self, state, filepath):
self.load(_load_file(filepath, "Config"), filepath)
def load(self, state, raw, source, wrap_exceptions=True): # pylint: disable=too-many-branches
try:
if 'run_name' in raw:
msg = '"run_name" can only be specified in the config '\
'section of an agenda'
raise ConfigError(msg)
if 'id' in raw:
raise ConfigError('"id" cannot be set globally')
merge_result_processors_instruments(raw)
# Get WA core configuration
for cfg_point in state.settings.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.settings.set(cfg_point.name, value)
# Get run specific configuration
for cfg_point in state.run_config.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.run_config.set(cfg_point.name, value)
# Get global job spec configuration
for cfg_point in JobSpec.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.jobs_config.set_global_value(cfg_point.name, value)
for name, values in raw.iteritems():
# Assume that all leftover config is for a plug-in or a global
# alias it is up to PluginCache to assert this assumption
state.plugin_cache.add_configs(name, values, source)
except ConfigError as e:
if wrap_exceptions:
raise ConfigError('Error in "{}":\n{}'.format(source, str(e)))
else:
raise e
class AgendaParser(object):
def load_from_path(self, state, filepath):
raw = _load_file(filepath, 'Agenda')
self.load(state, raw, filepath)
def load(self, state, raw, source):
try:
if not isinstance(raw, dict):
raise ConfigError('Invalid agenda, top level entry must be a dict')
self._populate_and_validate_config(state, raw, source)
sections = self._pop_sections(raw)
global_workloads = self._pop_workloads(raw)
if raw:
msg = 'Invalid top level agenda entry(ies): "{}"'
raise ConfigError(msg.format('", "'.join(raw.keys())))
sect_ids, wkl_ids = self._collect_ids(sections, global_workloads)
self._process_global_workloads(state, global_workloads, wkl_ids)
self._process_sections(state, sections, sect_ids, wkl_ids)
except (ConfigError, SerializerSyntaxError) as e:
raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
def _populate_and_validate_config(self, state, raw, source):
for name in ['config', 'global']:
entry = raw.pop(name, None)
if entry is None:
continue
if not isinstance(entry, dict):
msg = 'Invalid entry "{}" - must be a dict'
raise ConfigError(msg.format(name))
if 'run_name' in entry:
state.run_config.set('run_name', entry.pop('run_name'))
state.load_config(entry, source, wrap_exceptions=False)
def _pop_sections(self, raw):
sections = raw.pop("sections", [])
if not isinstance(sections, list):
raise ConfigError('Invalid entry "sections" - must be a list')
return sections
def _pop_workloads(self, raw):
workloads = raw.pop("workloads", [])
if not isinstance(workloads, list):
raise ConfigError('Invalid entry "workloads" - must be a list')
return workloads
def _collect_ids(self, sections, global_workloads):
seen_section_ids = set()
seen_workload_ids = set()
for workload in global_workloads:
workload = _get_workload_entry(workload)
_collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
for section in sections:
_collect_valid_id(section.get("id"), seen_section_ids, "section")
for workload in section["workloads"] if "workloads" in section else []:
workload = _get_workload_entry(workload)
_collect_valid_id(workload.get("id"), seen_workload_ids,
"workload")
return seen_section_ids, seen_workload_ids
def _process_global_workloads(self, state, global_workloads, seen_wkl_ids):
for workload_entry in global_workloads:
workload = _process_workload_entry(workload_entry, seen_wkl_ids,
state.jobs_config)
state.jobs_config.add_workload(workload)
def _process_sections(self, state, sections, seen_sect_ids, seen_wkl_ids):
for section in sections:
workloads = []
for workload_entry in section.pop("workloads", []):
workload = _process_workload_entry(workload_entry, seen_workload_ids,
state.jobs_config)
workloads.append(workload)
section = _construct_valid_entry(section, seen_section_ids,
"s", state.jobs_config)
state.jobs_config.add_section(section, workloads)
######################## ########################
### Helper functions ### ### Helper functions ###
######################## ########################
DUPLICATE_ENTRY_ERROR = 'Only one of {} may be specified in a single entry'
def get_aliased_param(cfg_point, d, default=None, pop=True): def get_aliased_param(cfg_point, d, default=None, pop=True):
""" """
Given a ConfigurationPoint and a dict, this function will search the dict for Given a ConfigurationPoint and a dict, this function will search the dict for
@ -62,55 +200,79 @@ def _load_file(filepath, error_name):
def merge_result_processors_instruments(raw): def merge_result_processors_instruments(raw):
instruments = toggle_set(get_aliased_param(JobSpec.configuration['instrumentation'], instr_config = JobSpec.configuration['instrumentation']
raw, default=[])) instruments = toggle_set(get_aliased_param(instr_config, raw, default=[]))
result_processors = toggle_set(raw.pop('result_processors', [])) result_processors = toggle_set(raw.pop('result_processors', []))
if instruments and result_processors: if instruments and result_processors:
conflicts = instruments.conflicts_with(result_processors) conflicts = instruments.conflicts_with(result_processors)
if conflicts: if conflicts:
msg = '"instrumentation" and "result_processors" have conflicting entries: {}' msg = '"instrumentation" and "result_processors" have '\
'conflicting entries: {}'
entires = ', '.join('"{}"'.format(c.strip("~")) for c in conflicts) entires = ', '.join('"{}"'.format(c.strip("~")) for c in conflicts)
raise ConfigError(msg.format(entires)) raise ConfigError(msg.format(entires))
raw['instrumentation'] = instruments.merge_with(result_processors) raw['instrumentation'] = instruments.merge_with(result_processors)
def _construct_valid_entry(raw, seen_ids, counter_name, jobs_config): def _pop_aliased(d, names, entry_id):
entries = {} name_count = sum(1 for n in names if n in d)
if name_count > 1:
names_list = ', '.join(names)
msg = 'Inivalid workload entry "{}": at moust one of ({}}) must be specified.'
raise ConfigError(msg.format(workload_entry['id'], names_list))
for name in names:
if name in d:
return d.pop(name)
return None
def _construct_valid_entry(raw, seen_ids, prefix, jobs_config):
workload_entry = {}
# Generate an automatic ID if the entry doesn't already have one # Generate an automatic ID if the entry doesn't already have one
if "id" not in raw: if 'id' not in raw:
while True: while True:
new_id = "{}{}".format(counter_name, counter(name=counter_name)) new_id = '{}{}'.format(prefix, counter(name=prefix))
if new_id not in seen_ids: if new_id not in seen_ids:
break break
entries["id"] = new_id workload_entry['id'] = new_id
seen_ids.add(new_id) seen_ids.add(new_id)
else: else:
entries["id"] = raw.pop("id") workload_entry['id'] = raw.pop('id')
# Process instrumentation # Process instrumentation
merge_result_processors_instruments(raw) merge_result_processors_instruments(raw)
# Validate all entries # Validate all workload_entry
for name, cfg_point in JobSpec.configuration.iteritems(): for name, cfg_point in JobSpec.configuration.iteritems():
value = get_aliased_param(cfg_point, raw) value = get_aliased_param(cfg_point, raw)
if value is not None: if value is not None:
value = cfg_point.kind(value) value = cfg_point.kind(value)
cfg_point.validate_value(name, value) cfg_point.validate_value(name, value)
entries[name] = value workload_entry[name] = value
entries["workload_parameters"] = raw.pop("workload_parameters", None)
entries["runtime_parameters"] = raw.pop("runtime_parameters", None)
entries["boot_parameters"] = raw.pop("boot_parameters", None)
if "instrumentation" in entries: wk_id = workload_entry['id']
jobs_config.update_enabled_instruments(entries["instrumentation"]) param_names = ['workload_params', 'workload_parameters']
if prefix == 'wk':
param_names += ['params', 'parameters']
workload_entry["workload_parameters"] = _pop_aliased(raw, param_names, wk_id)
# error if there are unknown entries param_names = ['runtime_parameters', 'runtime_params']
if prefix == 's':
param_names += ['params', 'parameters']
workload_entry["runtime_parameters"] = _pop_aliased(raw, param_names, wk_id)
param_names = ['boot_parameters', 'boot_params']
workload_entry["boot_parameters"] = _pop_aliased(raw, param_names, wk_id)
if "instrumentation" in workload_entry:
jobs_config.update_enabled_instruments(workload_entry["instrumentation"])
# error if there are unknown workload_entry
if raw: if raw:
msg = 'Invalid entry(ies) in "{}": "{}"' msg = 'Invalid entry(ies) in "{}": "{}"'
raise ConfigError(msg.format(entries['id'], ', '.join(raw.keys()))) raise ConfigError(msg.format(workload_entry['id'], ', '.join(raw.keys())))
return entries return workload_entry
def _collect_valid_id(entry_id, seen_ids, entry_type): def _collect_valid_id(entry_id, seen_ids, entry_type):
@ -128,15 +290,6 @@ def _collect_valid_id(entry_id, seen_ids, entry_type):
seen_ids.add(entry_id) seen_ids.add(entry_id)
def _resolve_params_alias(entry, param_alias):
possible_names = {"params", "{}_params".format(param_alias), "{}_parameters".format(param_alias)}
duplicate_entries = possible_names.intersection(set(entry.keys()))
if len(duplicate_entries) > 1:
raise ConfigError(DUPLICATE_ENTRY_ERROR.format(list(possible_names)))
for name in duplicate_entries:
entry["{}_parameters".format(param_alias)] = entry.pop(name)
def _get_workload_entry(workload): def _get_workload_entry(workload):
if isinstance(workload, basestring): if isinstance(workload, basestring):
workload = {'name': workload} workload = {'name': workload}
@ -147,150 +300,7 @@ def _get_workload_entry(workload):
def _process_workload_entry(workload, seen_workload_ids, jobs_config): def _process_workload_entry(workload, seen_workload_ids, jobs_config):
workload = _get_workload_entry(workload) workload = _get_workload_entry(workload)
_resolve_params_alias(workload, "workload") workload = _construct_valid_entry(workload, seen_workload_ids,
workload = _construct_valid_entry(workload, seen_workload_ids, "wk", jobs_config) "wk", jobs_config)
return workload return workload
###############
### Parsers ###
###############
class ConfigParser(object):
def __init__(self, wa_config, run_config, jobs_config, plugin_cache):
self.wa_config = wa_config
self.run_config = run_config
self.jobs_config = jobs_config
self.plugin_cache = plugin_cache
def load_from_path(self, filepath):
self.load(_load_file(filepath, "Config"), filepath)
def load(self, raw, source, wrap_exceptions=True): # pylint: disable=too-many-branches
try:
if 'run_name' in raw:
msg = '"run_name" can only be specified in the config section of an agenda'
raise ConfigError(msg)
if 'id' in raw:
raise ConfigError('"id" cannot be set globally')
merge_result_processors_instruments(raw)
# Get WA core configuration
for cfg_point in self.wa_config.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
self.wa_config.set(cfg_point.name, value)
# Get run specific configuration
for cfg_point in self.run_config.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
self.run_config.set(cfg_point.name, value)
# Get global job spec configuration
for cfg_point in JobSpec.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
self.jobs_config.set_global_value(cfg_point.name, value)
for name, values in raw.iteritems():
# Assume that all leftover config is for a plug-in or a global
# alias it is up to PluginCache to assert this assumption
self.plugin_cache.add_configs(name, values, source)
except ConfigError as e:
if wrap_exceptions:
raise ConfigError('Error in "{}":\n{}'.format(source, str(e)))
else:
raise e
class AgendaParser(object):
def __init__(self, wa_config, run_config, jobs_config, plugin_cache):
self.wa_config = wa_config
self.run_config = run_config
self.jobs_config = jobs_config
self.plugin_cache = plugin_cache
def load_from_path(self, filepath):
raw = _load_file(filepath, 'Agenda')
self.load(raw, filepath)
def load(self, raw, source): # pylint: disable=too-many-branches, too-many-locals
try:
if not isinstance(raw, dict):
raise ConfigError('Invalid agenda, top level entry must be a dict')
# PHASE 1: Populate and validate configuration.
for name in ['config', 'global']:
entry = raw.pop(name, {})
if not isinstance(entry, dict):
raise ConfigError('Invalid entry "{}" - must be a dict'.format(name))
if 'run_name' in entry:
self.run_config.set('run_name', entry.pop('run_name'))
config_parser = ConfigParser(self.wa_config, self.run_config,
self.jobs_config, self.plugin_cache)
config_parser.load(entry, source, wrap_exceptions=False)
# PHASE 2: Getting "section" and "workload" entries.
sections = raw.pop("sections", [])
if not isinstance(sections, list):
raise ConfigError('Invalid entry "sections" - must be a list')
global_workloads = raw.pop("workloads", [])
if not isinstance(global_workloads, list):
raise ConfigError('Invalid entry "workloads" - must be a list')
if raw:
msg = 'Invalid top level agenda entry(ies): "{}"'
raise ConfigError(msg.format('", "'.join(raw.keys())))
# PHASE 3: Collecting existing workload and section IDs
seen_section_ids = set()
seen_workload_ids = set()
for workload in global_workloads:
workload = _get_workload_entry(workload)
_collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
for section in sections:
_collect_valid_id(section.get("id"), seen_section_ids, "section")
for workload in section["workloads"] if "workloads" in section else []:
workload = _get_workload_entry(workload)
_collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
# PHASE 4: Assigning IDs and validating entries
# TODO: Error handling for workload errors vs section errors ect
for workload in global_workloads:
self.jobs_config.add_workload(_process_workload_entry(workload,
seen_workload_ids,
self.jobs_config))
for section in sections:
workloads = []
for workload in section.pop("workloads", []):
workloads.append(_process_workload_entry(workload,
seen_workload_ids,
self.jobs_config))
_resolve_params_alias(section, seen_section_ids)
section = _construct_valid_entry(section, seen_section_ids, "s", self.jobs_config)
self.jobs_config.add_section(section, workloads)
return seen_workload_ids, seen_section_ids
except (ConfigError, SerializerSyntaxError) as e:
raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
# Command line options are parsed in the "run" command. This is used to send
# certain arguments to the correct configuration points and keep a record of
# how WA was invoked
class CommandLineArgsParser(object):
def __init__(self, cmd_args, wa_config, jobs_config):
wa_config.set("verbosity", cmd_args.verbosity)
# TODO: Is this correct? Does there need to be a third output dir param
disabled_instruments = toggle_set(["~{}".format(i) for i in cmd_args.instruments_to_disable])
jobs_config.disable_instruments(disabled_instruments)
jobs_config.only_run_ids(cmd_args.only_run_ids)

View File

@ -21,16 +21,15 @@ import os
import subprocess import subprocess
import warnings import warnings
from wlauto.core.configuration import settings
from wlauto.core import pluginloader from wlauto.core import pluginloader
from wlauto.core.command import init_argument_parser from wlauto.core.command import init_argument_parser
from wlauto.core.configuration import settings
from wlauto.core.host import init_user_directory from wlauto.core.host import init_user_directory
from wlauto.exceptions import WAError, ConfigError from wlauto.core.state import WAState
from wlauto.utils.misc import get_traceback from wlauto.exceptions import WAError, DevlibError, ConfigError
from wlauto.utils.log import init_logging
from wlauto.utils.doc import format_body from wlauto.utils.doc import format_body
from wlauto.utils.log import init_logging
from devlib import DevlibError from wlauto.utils.misc import get_traceback
warnings.filterwarnings(action='ignore', category=UserWarning, module='zope') warnings.filterwarnings(action='ignore', category=UserWarning, module='zope')
@ -41,11 +40,14 @@ logger = logging.getLogger('command_line')
def load_commands(subparsers): def load_commands(subparsers):
commands = {} commands = {}
for command in pluginloader.list_commands(): for command in pluginloader.list_commands():
commands[command.name] = pluginloader.get_command(command.name, subparsers=subparsers) commands[command.name] = pluginloader.get_command(command.name,
subparsers=subparsers)
return commands return commands
def main(): def main():
state = WAState()
if not os.path.exists(settings.user_directory): if not os.path.exists(settings.user_directory):
init_user_directory() init_user_directory()
@ -59,19 +61,22 @@ def main():
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,
) )
init_argument_parser(parser) init_argument_parser(parser)
commands = load_commands(parser.add_subparsers(dest='command')) # each command will add its own subparser # each command will add its own subparser
commands = load_commands(parser.add_subparsers(dest='command'))
args = parser.parse_args() args = parser.parse_args()
settings.set("verbosity", args.verbose) settings.set("verbosity", args.verbose)
for config in args.config: for config_file in args.config:
if not os.path.exists(config): if not os.path.exists(config_file):
raise ConfigError("Config file {} not found".format(config)) raise ConfigError("Config file {} not found".format(config_file))
state.load_config_file(config_file)
init_logging(settings.verbosity) init_logging(settings.verbosity)
command = commands[args.command] command = commands[args.command]
sys.exit(command.execute(args)) sys.exit(command.execute(state, args))
except KeyboardInterrupt: except KeyboardInterrupt:
logging.info('Got CTRL-C. Aborting.') logging.info('Got CTRL-C. Aborting.')

View File

@ -36,29 +36,31 @@ following actors:
allow instrumentation to do its stuff. allow instrumentation to do its stuff.
""" """
import os
import uuid
import logging import logging
import subprocess import os
import random import random
import subprocess
import uuid
from collections import Counter, defaultdict, OrderedDict
from contextlib import contextmanager
from copy import copy from copy import copy
from datetime import datetime from datetime import datetime
from contextlib import contextmanager
from collections import Counter, defaultdict, OrderedDict
from itertools import izip_longest from itertools import izip_longest
import wlauto.core.signal as signal import wlauto.core.signal as signal
from wlauto.core import instrumentation from wlauto.core import instrumentation
from wlauto.core import pluginloader
from wlauto.core.configuration import settings from wlauto.core.configuration import settings
from wlauto.core.plugin import Artifact from wlauto.core.plugin import Artifact
from wlauto.core import pluginloader
from wlauto.core.resolver import ResourceResolver from wlauto.core.resolver import ResourceResolver
from wlauto.core.result import ResultManager, IterationResult, RunResult from wlauto.core.result import ResultManager, IterationResult, RunResult
from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError, from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError,
DeviceError, DeviceNotRespondingError) DeviceError, DeviceNotRespondingError)
from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, format_duration from wlauto.utils.misc import (ensure_directory_exists as _d,
get_traceback, format_duration)
from wlauto.utils.serializer import json from wlauto.utils.serializer import json
# The maximum number of reboot attempts for an iteration. # The maximum number of reboot attempts for an iteration.
MAX_REBOOT_ATTEMPTS = 3 MAX_REBOOT_ATTEMPTS = 3
@ -95,6 +97,7 @@ class RunInfo(object):
return d return d
#TODO: pod #TODO: pod
class ExecutionContext(object): class ExecutionContext(object):
""" """
Provides a context for instrumentation. Keeps track of things like Provides a context for instrumentation. Keeps track of things like
@ -239,31 +242,32 @@ def _check_artifact_path(path, rootpath):
class Executor(object): class Executor(object):
""" """
The ``Executor``'s job is to set up the execution context and pass to a ``Runner`` The ``Executor``'s job is to set up the execution context and pass to a
along with a loaded run specification. Once the ``Runner`` has done its thing, ``Runner`` along with a loaded run specification. Once the ``Runner`` has
the ``Executor`` performs some final reporint before returning. done its thing, the ``Executor`` performs some final reporint before
returning.
The initial context set up involves combining configuration from various sources, The initial context set up involves combining configuration from various
loading of requided workloads, loading and installation of instruments and result sources, loading of requided workloads, loading and installation of
processors, etc. Static validation of the combined configuration is also performed. instruments and result processors, etc. Static validation of the combined
configuration is also performed.
""" """
# pylint: disable=R0915 # pylint: disable=R0915
def __init__(self, config): def __init__(self):
self.logger = logging.getLogger('Executor') self.logger = logging.getLogger('Executor')
self.error_logged = False self.error_logged = False
self.warning_logged = False self.warning_logged = False
self.config = config
pluginloader = None pluginloader = None
self.device_manager = None self.device_manager = None
self.device = None self.device = None
self.context = None self.context = None
def execute(self, agenda, selectors=None): # NOQA def execute(self, state, selectors=None): # NOQA
""" """
Execute the run specified by an agenda. Optionally, selectors may be used to only Execute the run specified by an agenda. Optionally, selectors may be
selecute a subset of the specified agenda. used to only selecute a subset of the specified agenda.
Params:: Params::
@ -275,9 +279,10 @@ class Executor(object):
Currently, the following seectors are supported: Currently, the following seectors are supported:
ids ids
The value must be a sequence of workload specfication IDs to be executed. Note The value must be a sequence of workload specfication IDs to be
that if sections are specified inthe agenda, the workload specifacation ID will executed. Note that if sections are specified inthe agenda, the
be a combination of the section and workload IDs. workload specifacation ID will be a combination of the section and
workload IDs.
""" """
signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED) signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED)

28
wlauto/core/state.py Normal file
View File

@ -0,0 +1,28 @@
from wlauto.core.configuration.configuration import (RunConfiguration,
JobGenerator, settings)
from wlauto.core.configuration.parsers import ConfigParser
from wlauto.core.configuration.plugin_cache import PluginCache
class WAState(object):
"""
Represents run-time state of WA. Mostly used as a container for loaded
configuration and discovered plugins.
This exists outside of any command or run and is associated with the running
instance of wA itself.
"""
def __init__(self, settings=settings):
self.settings = settings
self.run_config = RunConfiguration()
self.plugin_cache = PluginCache()
self.jobs_config = JobGenerator(self.plugin_cache)
self._config_parser = ConfigParser()
def load_config_file(self, filepath):
self._config_parser.load_from_path(self, filepath)
def load_config(self, values, source, wrap_exceptions=True):
self._config_parser.load(self, values, source)