2017-03-06 11:10:25 +00:00
|
|
|
# Copyright 2014-2016 ARM Limited
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2017-02-21 13:37:11 +00:00
|
|
|
import os
|
2017-03-06 11:10:25 +00:00
|
|
|
import re
|
2017-09-26 17:15:42 +01:00
|
|
|
from copy import copy, deepcopy
|
2017-03-06 11:10:25 +00:00
|
|
|
from collections import OrderedDict, defaultdict
|
|
|
|
|
|
|
|
from wa.framework.exception import ConfigError, NotFoundError
|
|
|
|
from wa.framework.configuration.tree import SectionNode
|
|
|
|
from wa.utils.misc import (get_article, merge_config_values)
|
2017-06-09 11:27:10 +01:00
|
|
|
from wa.utils.types import (identifier, integer, boolean, list_of_strings,
|
2017-03-09 17:39:44 +00:00
|
|
|
list_of, toggle_set, obj_dict, enum)
|
2017-03-06 11:10:25 +00:00
|
|
|
from wa.utils.serializer import is_pod
|
|
|
|
|
2017-03-16 17:54:48 +00:00
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
# Mapping for kind conversion; see docs for convert_types below
|
|
|
|
KIND_MAP = {
|
|
|
|
int: integer,
|
|
|
|
bool: boolean,
|
|
|
|
dict: OrderedDict,
|
|
|
|
}
|
2017-02-21 13:37:11 +00:00
|
|
|
|
2017-03-20 16:24:22 +00:00
|
|
|
Status = enum(['UNKNOWN', 'NEW', 'PENDING',
|
|
|
|
'STARTED', 'CONNECTED', 'INITIALIZED', 'RUNNING',
|
2017-09-18 16:12:03 +01:00
|
|
|
'OK', 'PARTIAL', 'FAILED', 'ABORTED', 'SKIPPED'])
|
2017-03-16 17:54:48 +00:00
|
|
|
|
2017-02-21 13:37:11 +00:00
|
|
|
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
##########################
|
|
|
|
### CONFIG POINT TYPES ###
|
|
|
|
##########################
|
|
|
|
|
|
|
|
|
|
|
|
class RebootPolicy(object):
|
2017-02-21 13:37:11 +00:00
|
|
|
"""
|
2017-03-06 11:10:25 +00:00
|
|
|
Represents the reboot policy for the execution -- at what points the device
|
|
|
|
should be rebooted. This, in turn, is controlled by the policy value that is
|
|
|
|
passed in on construction and would typically be read from the user's settings.
|
|
|
|
Valid policy values are:
|
|
|
|
|
|
|
|
:never: The device will never be rebooted.
|
|
|
|
:as_needed: Only reboot the device if it becomes unresponsive, or needs to be flashed, etc.
|
|
|
|
:initial: The device will be rebooted when the execution first starts, just before
|
|
|
|
executing the first workload spec.
|
|
|
|
:each_spec: The device will be rebooted before running a new workload spec.
|
|
|
|
:each_iteration: The device will be rebooted before each new iteration.
|
2017-02-21 13:37:11 +00:00
|
|
|
|
|
|
|
"""
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_iteration']
|
|
|
|
|
2018-02-15 13:46:39 +00:00
|
|
|
@staticmethod
|
|
|
|
def from_pod(pod):
|
|
|
|
return RebootPolicy(pod)
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def __init__(self, policy):
|
2018-02-15 13:46:39 +00:00
|
|
|
if isinstance(policy, RebootPolicy):
|
|
|
|
policy = policy.policy
|
2017-03-06 11:10:25 +00:00
|
|
|
policy = policy.strip().lower().replace(' ', '_')
|
|
|
|
if policy not in self.valid_policies:
|
|
|
|
message = 'Invalid reboot policy {}; must be one of {}'.format(policy, ', '.join(self.valid_policies))
|
|
|
|
raise ConfigError(message)
|
|
|
|
self.policy = policy
|
|
|
|
|
|
|
|
@property
|
|
|
|
def can_reboot(self):
|
|
|
|
return self.policy != 'never'
|
|
|
|
|
|
|
|
@property
|
|
|
|
def perform_initial_boot(self):
|
|
|
|
return self.policy not in ['never', 'as_needed']
|
|
|
|
|
|
|
|
@property
|
|
|
|
def reboot_on_each_spec(self):
|
|
|
|
return self.policy in ['each_spec', 'each_iteration']
|
|
|
|
|
|
|
|
@property
|
|
|
|
def reboot_on_each_iteration(self):
|
|
|
|
return self.policy == 'each_iteration'
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return self.policy
|
|
|
|
|
|
|
|
__repr__ = __str__
|
|
|
|
|
|
|
|
def __cmp__(self, other):
|
|
|
|
if isinstance(other, RebootPolicy):
|
|
|
|
return cmp(self.policy, other.policy)
|
|
|
|
else:
|
|
|
|
return cmp(self.policy, other)
|
|
|
|
|
|
|
|
def to_pod(self):
|
|
|
|
return self.policy
|
|
|
|
|
|
|
|
|
|
|
|
class status_list(list):
|
|
|
|
|
|
|
|
def append(self, item):
|
|
|
|
list.append(self, str(item).upper())
|
|
|
|
|
|
|
|
|
|
|
|
class LoggingConfig(dict):
|
|
|
|
|
|
|
|
defaults = {
|
|
|
|
'file_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
|
|
|
|
'verbose_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
|
|
|
|
'regular_format': '%(levelname)-8s %(message)s',
|
|
|
|
'color': True,
|
2017-02-21 13:37:11 +00:00
|
|
|
}
|
|
|
|
|
2017-10-06 12:52:12 +01:00
|
|
|
@staticmethod
|
|
|
|
def from_pod(pod):
|
|
|
|
return LoggingConfig(pod)
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def __init__(self, config=None):
|
|
|
|
dict.__init__(self)
|
|
|
|
if isinstance(config, dict):
|
|
|
|
config = {identifier(k.lower()): v for k, v in config.iteritems()}
|
|
|
|
self['regular_format'] = config.pop('regular_format', self.defaults['regular_format'])
|
|
|
|
self['verbose_format'] = config.pop('verbose_format', self.defaults['verbose_format'])
|
|
|
|
self['file_format'] = config.pop('file_format', self.defaults['file_format'])
|
|
|
|
self['color'] = config.pop('colour_enabled', self.defaults['color']) # legacy
|
|
|
|
self['color'] = config.pop('color', self.defaults['color'])
|
|
|
|
if config:
|
2017-03-21 15:23:10 +00:00
|
|
|
message = 'Unexpected logging configuration parameters: {}'
|
2017-03-06 11:10:25 +00:00
|
|
|
raise ValueError(message.format(bad_vals=', '.join(config.keys())))
|
|
|
|
elif config is None:
|
|
|
|
for k, v in self.defaults.iteritems():
|
|
|
|
self[k] = v
|
|
|
|
else:
|
|
|
|
raise ValueError(config)
|
|
|
|
|
2017-10-06 12:52:12 +01:00
|
|
|
def to_pod(self):
|
|
|
|
return self
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
|
|
|
|
def get_type_name(kind):
|
|
|
|
typename = str(kind)
|
|
|
|
if '\'' in typename:
|
|
|
|
typename = typename.split('\'')[1]
|
|
|
|
elif typename.startswith('<function'):
|
|
|
|
typename = typename.split()[1]
|
|
|
|
return typename
|
|
|
|
|
|
|
|
|
|
|
|
class ConfigurationPoint(object):
|
|
|
|
"""
|
|
|
|
This defines a generic configuration point for workload automation. This is
|
|
|
|
used to handle global settings, plugin parameters, etc.
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
2017-02-21 13:37:11 +00:00
|
|
|
def __init__(self, name,
|
|
|
|
kind=None,
|
|
|
|
mandatory=None,
|
|
|
|
default=None,
|
|
|
|
override=False,
|
|
|
|
allowed_values=None,
|
|
|
|
description=None,
|
|
|
|
constraint=None,
|
|
|
|
merge=False,
|
|
|
|
aliases=None,
|
2017-03-06 11:10:25 +00:00
|
|
|
global_alias=None):
|
2017-02-21 13:37:11 +00:00
|
|
|
"""
|
|
|
|
Create a new Parameter object.
|
|
|
|
|
|
|
|
:param name: The name of the parameter. This will become an instance
|
2017-03-06 11:10:25 +00:00
|
|
|
member of the plugin object to which the parameter is
|
2017-02-21 13:37:11 +00:00
|
|
|
applied, so it must be a valid python identifier. This
|
|
|
|
is the only mandatory parameter.
|
|
|
|
:param kind: The type of parameter this is. This must be a callable
|
|
|
|
that takes an arbitrary object and converts it to the
|
|
|
|
expected type, or raised ``ValueError`` if such conversion
|
|
|
|
is not possible. Most Python standard types -- ``str``,
|
|
|
|
``int``, ``bool``, etc. -- can be used here. This
|
|
|
|
defaults to ``str`` if not specified.
|
|
|
|
:param mandatory: If set to ``True``, then a non-``None`` value for
|
2017-03-06 11:10:25 +00:00
|
|
|
this parameter *must* be provided on plugin
|
2017-02-21 13:37:11 +00:00
|
|
|
object construction, otherwise ``ConfigError``
|
|
|
|
will be raised.
|
|
|
|
:param default: The default value for this parameter. If no value
|
2017-03-06 11:10:25 +00:00
|
|
|
is specified on plugin construction, this value
|
2017-02-21 13:37:11 +00:00
|
|
|
will be used instead. (Note: if this is specified
|
|
|
|
and is not ``None``, then ``mandatory`` parameter
|
|
|
|
will be ignored).
|
|
|
|
:param override: A ``bool`` that specifies whether a parameter of
|
|
|
|
the same name further up the hierarchy should
|
|
|
|
be overridden. If this is ``False`` (the
|
|
|
|
default), an exception will be raised by the
|
|
|
|
``AttributeCollection`` instead.
|
|
|
|
:param allowed_values: This should be the complete list of allowed
|
|
|
|
values for this parameter. Note: ``None``
|
|
|
|
value will always be allowed, even if it is
|
|
|
|
not in this list. If you want to disallow
|
|
|
|
``None``, set ``mandatory`` to ``True``.
|
|
|
|
:param constraint: If specified, this must be a callable that takes
|
|
|
|
the parameter value as an argument and return a
|
|
|
|
boolean indicating whether the constraint has been
|
|
|
|
satisfied. Alternatively, can be a two-tuple with
|
|
|
|
said callable as the first element and a string
|
|
|
|
describing the constraint as the second.
|
|
|
|
:param merge: The default behaviour when setting a value on an object
|
|
|
|
that already has that attribute is to overrided with
|
|
|
|
the new value. If this is set to ``True`` then the two
|
|
|
|
values will be merged instead. The rules by which the
|
|
|
|
values are merged will be determined by the types of
|
2017-03-06 11:10:25 +00:00
|
|
|
the existing and new values -- see
|
2017-02-21 13:37:11 +00:00
|
|
|
``merge_config_values`` documentation for details.
|
|
|
|
:param aliases: Alternative names for the same configuration point.
|
|
|
|
These are largely for backwards compatibility.
|
2017-03-06 11:10:25 +00:00
|
|
|
:param global_alias: An alias for this parameter that can be specified at
|
|
|
|
the global level. A global_alias can map onto many
|
|
|
|
ConfigurationPoints.
|
2017-02-21 13:37:11 +00:00
|
|
|
"""
|
|
|
|
self.name = identifier(name)
|
2017-03-06 11:10:25 +00:00
|
|
|
if kind in KIND_MAP:
|
|
|
|
kind = KIND_MAP[kind]
|
2017-02-21 13:37:11 +00:00
|
|
|
if kind is not None and not callable(kind):
|
|
|
|
raise ValueError('Kind must be callable.')
|
|
|
|
self.kind = kind
|
|
|
|
self.mandatory = mandatory
|
2017-03-06 11:10:25 +00:00
|
|
|
if not is_pod(default):
|
|
|
|
msg = "The default for '{}' must be a Plain Old Data type, but it is of type '{}' instead."
|
|
|
|
raise TypeError(msg.format(self.name, type(default)))
|
2017-02-21 13:37:11 +00:00
|
|
|
self.default = default
|
|
|
|
self.override = override
|
|
|
|
self.allowed_values = allowed_values
|
|
|
|
self.description = description
|
|
|
|
if self.kind is None and not self.override:
|
|
|
|
self.kind = str
|
|
|
|
if constraint is not None and not callable(constraint) and not isinstance(constraint, tuple):
|
|
|
|
raise ValueError('Constraint must be callable or a (callable, str) tuple.')
|
|
|
|
self.constraint = constraint
|
|
|
|
self.merge = merge
|
|
|
|
self.aliases = aliases or []
|
2017-03-06 11:10:25 +00:00
|
|
|
self.global_alias = global_alias
|
|
|
|
|
|
|
|
if self.default is not None:
|
|
|
|
try:
|
|
|
|
self.validate_value("init", self.default)
|
|
|
|
except ConfigError:
|
|
|
|
raise ValueError('Default value "{}" is not valid'.format(self.default))
|
2017-02-21 13:37:11 +00:00
|
|
|
|
|
|
|
def match(self, name):
|
2017-03-06 11:10:25 +00:00
|
|
|
if name == self.name or name in self.aliases:
|
2017-02-21 13:37:11 +00:00
|
|
|
return True
|
2017-03-06 11:10:25 +00:00
|
|
|
elif name == self.global_alias:
|
2017-02-21 13:37:11 +00:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def set_value(self, obj, value=None, check_mandatory=True):
|
2017-02-21 13:37:11 +00:00
|
|
|
if value is None:
|
|
|
|
if self.default is not None:
|
2017-10-06 12:52:57 +01:00
|
|
|
value = self.kind(self.default)
|
2017-03-06 11:10:25 +00:00
|
|
|
elif check_mandatory and self.mandatory:
|
|
|
|
msg = 'No values specified for mandatory parameter "{}" in {}'
|
2017-02-21 13:37:11 +00:00
|
|
|
raise ConfigError(msg.format(self.name, obj.name))
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
value = self.kind(value)
|
|
|
|
except (ValueError, TypeError):
|
2017-03-06 11:10:25 +00:00
|
|
|
typename = get_type_name(self.kind)
|
2017-02-21 13:37:11 +00:00
|
|
|
msg = 'Bad value "{}" for {}; must be {} {}'
|
|
|
|
article = get_article(typename)
|
|
|
|
raise ConfigError(msg.format(value, self.name, article, typename))
|
2017-03-06 11:10:25 +00:00
|
|
|
if value is not None:
|
2017-03-21 13:23:31 +00:00
|
|
|
self.validate_value(self.name, value)
|
2017-02-21 13:37:11 +00:00
|
|
|
if self.merge and hasattr(obj, self.name):
|
|
|
|
value = merge_config_values(getattr(obj, self.name), value)
|
|
|
|
setattr(obj, self.name, value)
|
|
|
|
|
2017-09-19 15:56:12 +01:00
|
|
|
def validate(self, obj, check_mandatory=True):
|
2017-02-21 13:37:11 +00:00
|
|
|
value = getattr(obj, self.name, None)
|
|
|
|
if value is not None:
|
2017-03-06 11:10:25 +00:00
|
|
|
self.validate_value(obj.name, value)
|
2017-02-21 13:37:11 +00:00
|
|
|
else:
|
2017-09-19 15:56:12 +01:00
|
|
|
if check_mandatory and self.mandatory:
|
2017-03-06 11:10:25 +00:00
|
|
|
msg = 'No value specified for mandatory parameter "{}" in {}.'
|
2017-02-21 13:37:11 +00:00
|
|
|
raise ConfigError(msg.format(self.name, obj.name))
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def validate_value(self, name, value):
|
|
|
|
if self.allowed_values:
|
|
|
|
self.validate_allowed_values(name, value)
|
|
|
|
if self.constraint:
|
|
|
|
self.validate_constraint(name, value)
|
2017-02-21 13:37:11 +00:00
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def validate_allowed_values(self, name, value):
|
2017-02-21 13:37:11 +00:00
|
|
|
if 'list' in str(self.kind):
|
|
|
|
for v in value:
|
|
|
|
if v not in self.allowed_values:
|
|
|
|
msg = 'Invalid value {} for {} in {}; must be in {}'
|
2017-03-06 11:10:25 +00:00
|
|
|
raise ConfigError(msg.format(v, self.name, name, self.allowed_values))
|
2017-02-21 13:37:11 +00:00
|
|
|
else:
|
|
|
|
if value not in self.allowed_values:
|
|
|
|
msg = 'Invalid value {} for {} in {}; must be in {}'
|
2017-03-06 11:10:25 +00:00
|
|
|
raise ConfigError(msg.format(value, self.name, name, self.allowed_values))
|
2017-02-21 13:37:11 +00:00
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def validate_constraint(self, name, value):
|
|
|
|
msg_vals = {'value': value, 'param': self.name, 'plugin': name}
|
2017-02-21 13:37:11 +00:00
|
|
|
if isinstance(self.constraint, tuple) and len(self.constraint) == 2:
|
|
|
|
constraint, msg = self.constraint # pylint: disable=unpacking-non-sequence
|
|
|
|
elif callable(self.constraint):
|
|
|
|
constraint = self.constraint
|
2017-03-06 11:10:25 +00:00
|
|
|
msg = '"{value}" failed constraint validation for "{param}" in "{plugin}".'
|
2017-02-21 13:37:11 +00:00
|
|
|
else:
|
2017-03-06 11:10:25 +00:00
|
|
|
raise ValueError('Invalid constraint for "{}": must be callable or a 2-tuple'.format(self.name))
|
2017-02-21 13:37:11 +00:00
|
|
|
if not constraint(value):
|
|
|
|
raise ConfigError(value, msg.format(**msg_vals))
|
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
d = copy(self.__dict__)
|
|
|
|
del d['description']
|
2017-03-06 11:10:25 +00:00
|
|
|
return 'ConfigurationPoint({})'.format(d)
|
2017-02-21 13:37:11 +00:00
|
|
|
|
|
|
|
__str__ = __repr__
|
|
|
|
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
#####################
|
|
|
|
### Configuration ###
|
|
|
|
#####################
|
|
|
|
|
|
|
|
|
|
|
|
def _to_pod(cfg_point, value):
|
|
|
|
if is_pod(value):
|
|
|
|
return value
|
|
|
|
if hasattr(cfg_point.kind, 'to_pod'):
|
|
|
|
return value.to_pod()
|
|
|
|
msg = '{} value "{}" is not serializable'
|
|
|
|
raise ValueError(msg.format(cfg_point.name, value))
|
|
|
|
|
|
|
|
|
|
|
|
class Configuration(object):
|
|
|
|
|
|
|
|
config_points = []
|
|
|
|
name = ''
|
|
|
|
|
|
|
|
# The below line must be added to all subclasses
|
|
|
|
configuration = {cp.name: cp for cp in config_points}
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_pod(cls, pod):
|
|
|
|
instance = cls()
|
|
|
|
for cfg_point in cls.config_points:
|
2017-11-06 13:01:46 +00:00
|
|
|
if cfg_point.name in pod:
|
|
|
|
value = pod.pop(cfg_point.name)
|
2017-03-06 11:10:25 +00:00
|
|
|
if hasattr(cfg_point.kind, 'from_pod'):
|
|
|
|
value = cfg_point.kind.from_pod(value)
|
|
|
|
cfg_point.set_value(instance, value)
|
|
|
|
if pod:
|
|
|
|
msg = 'Invalid entry(ies) for "{}": "{}"'
|
|
|
|
raise ValueError(msg.format(cls.name, '", "'.join(pod.keys())))
|
|
|
|
return instance
|
2017-02-21 13:37:11 +00:00
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def __init__(self):
|
|
|
|
for confpoint in self.config_points:
|
|
|
|
confpoint.set_value(self, check_mandatory=False)
|
2017-02-21 13:37:11 +00:00
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def set(self, name, value, check_mandatory=True):
|
|
|
|
if name not in self.configuration:
|
2017-06-09 11:27:10 +01:00
|
|
|
raise ConfigError('Unknown {} configuration "{}"'.format(self.name,
|
2017-03-06 11:10:25 +00:00
|
|
|
name))
|
2017-06-09 11:27:10 +01:00
|
|
|
self.configuration[name].set_value(self, value,
|
2017-03-06 11:10:25 +00:00
|
|
|
check_mandatory=check_mandatory)
|
2017-02-21 13:37:11 +00:00
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def update_config(self, values, check_mandatory=True):
|
|
|
|
for k, v in values.iteritems():
|
|
|
|
self.set(k, v, check_mandatory=check_mandatory)
|
|
|
|
|
|
|
|
def validate(self):
|
|
|
|
for cfg_point in self.config_points:
|
|
|
|
cfg_point.validate(self)
|
2017-02-21 13:37:11 +00:00
|
|
|
|
|
|
|
def to_pod(self):
|
2017-03-06 11:10:25 +00:00
|
|
|
pod = {}
|
|
|
|
for cfg_point in self.config_points:
|
|
|
|
value = getattr(self, cfg_point.name, None)
|
|
|
|
pod[cfg_point.name] = _to_pod(cfg_point, value)
|
|
|
|
return pod
|
|
|
|
|
|
|
|
|
|
|
|
# This configuration for the core WA framework
|
|
|
|
class MetaConfiguration(Configuration):
|
|
|
|
|
|
|
|
name = "Meta Configuration"
|
|
|
|
|
2018-02-06 14:19:24 +00:00
|
|
|
core_plugin_packages = [
|
2017-03-06 11:10:25 +00:00
|
|
|
'wa.commands',
|
2017-03-21 16:00:18 +00:00
|
|
|
'wa.framework.getters',
|
|
|
|
'wa.framework.target.descriptor',
|
2018-01-10 14:54:43 +00:00
|
|
|
'wa.instruments',
|
2018-01-12 15:22:11 +00:00
|
|
|
'wa.output_processors',
|
2017-03-21 16:00:18 +00:00
|
|
|
'wa.workloads',
|
2017-03-06 11:10:25 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
config_points = [
|
|
|
|
ConfigurationPoint(
|
|
|
|
'user_directory',
|
|
|
|
description="""
|
|
|
|
Path to the user directory. This is the location WA will look for
|
|
|
|
user configuration, additional plugins and plugin dependencies.
|
|
|
|
""",
|
|
|
|
kind=str,
|
|
|
|
default=os.path.join(os.path.expanduser('~'), '.workload_automation'),
|
|
|
|
),
|
|
|
|
ConfigurationPoint(
|
|
|
|
'assets_repository',
|
|
|
|
description="""
|
|
|
|
The local mount point for the filer hosting WA assets.
|
|
|
|
""",
|
|
|
|
),
|
|
|
|
ConfigurationPoint(
|
|
|
|
'logging',
|
|
|
|
kind=LoggingConfig,
|
|
|
|
default=LoggingConfig.defaults,
|
|
|
|
description="""
|
|
|
|
WA logging configuration. This should be a dict with a subset
|
|
|
|
of the following keys::
|
|
|
|
|
|
|
|
:normal_format: Logging format used for console output
|
|
|
|
:verbose_format: Logging format used for verbose console output
|
|
|
|
:file_format: Logging format used for run.log
|
|
|
|
:color: If ``True`` (the default), console logging output will
|
|
|
|
contain bash color escape codes. Set this to ``False`` if
|
|
|
|
console output will be piped somewhere that does not know
|
|
|
|
how to handle those.
|
|
|
|
""",
|
|
|
|
),
|
|
|
|
ConfigurationPoint(
|
|
|
|
'verbosity',
|
|
|
|
kind=int,
|
|
|
|
default=0,
|
|
|
|
description="""
|
|
|
|
Verbosity of console output.
|
|
|
|
""",
|
|
|
|
),
|
|
|
|
ConfigurationPoint( # TODO: Needs some format for dates etc/ comes from cfg
|
|
|
|
'default_output_directory',
|
|
|
|
default="wa_output",
|
|
|
|
description="""
|
|
|
|
The default output directory that will be created if not
|
|
|
|
specified when invoking a run.
|
|
|
|
""",
|
|
|
|
),
|
|
|
|
]
|
|
|
|
configuration = {cp.name: cp for cp in config_points}
|
|
|
|
|
|
|
|
@property
|
|
|
|
def dependencies_directory(self):
|
|
|
|
return os.path.join(self.user_directory, 'dependencies')
|
|
|
|
|
|
|
|
@property
|
|
|
|
def plugins_directory(self):
|
|
|
|
return os.path.join(self.user_directory, 'plugins')
|
|
|
|
|
|
|
|
@property
|
|
|
|
def user_config_file(self):
|
|
|
|
return os.path.join(self.user_directory, 'config.yaml')
|
|
|
|
|
2018-02-06 14:19:24 +00:00
|
|
|
@property
|
|
|
|
def additional_packages_file(self):
|
|
|
|
return os.path.join(self.user_directory, 'packages')
|
|
|
|
|
2017-12-22 10:39:53 +00:00
|
|
|
def __init__(self, environ=os.environ):
|
2017-03-06 11:10:25 +00:00
|
|
|
super(MetaConfiguration, self).__init__()
|
|
|
|
user_directory = environ.pop('WA_USER_DIRECTORY', '')
|
|
|
|
if user_directory:
|
|
|
|
self.set('user_directory', user_directory)
|
|
|
|
|
2018-02-06 14:19:24 +00:00
|
|
|
self.plugin_packages = copy(self.core_plugin_packages)
|
|
|
|
if os.path.isfile(self.additional_packages_file):
|
|
|
|
with open(self.additional_packages_file) as fh:
|
|
|
|
extra_packages = [p.strip() for p in fh.read().split('\n') if p.strip()]
|
|
|
|
self.plugin_packages.extend(extra_packages)
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
|
|
|
|
# This is generic top-level configuration for WA runs.
|
|
|
|
class RunConfiguration(Configuration):
|
|
|
|
|
|
|
|
name = "Run Configuration"
|
|
|
|
|
2017-03-15 17:16:59 +00:00
|
|
|
# Metadata is separated out because it is not loaded into the auto
|
|
|
|
# generated config file
|
2017-03-06 11:10:25 +00:00
|
|
|
meta_data = [
|
2017-03-20 14:40:13 +00:00
|
|
|
ConfigurationPoint(
|
|
|
|
'run_name',
|
|
|
|
kind=str,
|
|
|
|
description='''
|
|
|
|
A string that labels the WA run that is being performed. This would
|
|
|
|
typically be set in the ``config`` section of an agenda (see
|
|
|
|
:ref:`configuration in an agenda <configuration_in_agenda>`) rather
|
|
|
|
than in the config file.
|
|
|
|
''',
|
|
|
|
),
|
|
|
|
ConfigurationPoint(
|
|
|
|
'project',
|
|
|
|
kind=str,
|
|
|
|
description='''
|
|
|
|
A string naming the project for which data is being collected. This
|
|
|
|
may be useful, e.g. when uploading data to a shared database that
|
|
|
|
is populated from multiple projects.
|
|
|
|
''',
|
|
|
|
),
|
|
|
|
ConfigurationPoint(
|
|
|
|
'project_stage',
|
|
|
|
kind=dict,
|
|
|
|
description='''
|
|
|
|
A dict or a string that allows adding additional identifier. This
|
|
|
|
is may be useful for long-running projects.
|
|
|
|
''',
|
|
|
|
),
|
2017-03-06 11:10:25 +00:00
|
|
|
]
|
|
|
|
config_points = [
|
2017-03-20 14:40:13 +00:00
|
|
|
ConfigurationPoint(
|
|
|
|
'execution_order',
|
|
|
|
kind=str,
|
|
|
|
default='by_iteration',
|
|
|
|
allowed_values=['by_iteration', 'by_spec', 'by_section', 'random'],
|
|
|
|
description='''
|
|
|
|
Defines the order in which the agenda spec will be executed. At the
|
|
|
|
moment, the following execution orders are supported:
|
|
|
|
|
|
|
|
``"by_iteration"``
|
|
|
|
The first iteration of each workload spec is executed one after
|
|
|
|
the other, so all workloads are executed before proceeding on
|
|
|
|
to the second iteration. E.g. A1 B1 C1 A2 C2 A3. This is the
|
|
|
|
default if no order is explicitly specified.
|
|
|
|
|
|
|
|
In case of multiple sections, this will spread them out, such
|
|
|
|
that specs from the same section are further part. E.g. given
|
|
|
|
sections X and Y, global specs A and B, and two iterations,
|
|
|
|
this will run ::
|
|
|
|
|
|
|
|
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
|
|
|
|
|
|
|
|
``"by_section"``
|
|
|
|
Same as ``"by_iteration"``, however this will group specs from
|
|
|
|
the same section together, so given sections X and Y, global
|
|
|
|
specs A and B, and two iterations, this will run ::
|
|
|
|
|
|
|
|
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
|
|
|
|
|
|
|
|
``"by_spec"``
|
|
|
|
All iterations of the first spec are executed before moving on
|
|
|
|
to the next spec. E.g. A1 A2 A3 B1 C1 C2 This may also be
|
|
|
|
specified as ``"classic"``, as this was the way workloads were
|
|
|
|
executed in earlier versions of WA.
|
|
|
|
|
|
|
|
``"random"``
|
|
|
|
Execution order is entirely random.
|
|
|
|
''',
|
|
|
|
),
|
|
|
|
ConfigurationPoint(
|
|
|
|
'reboot_policy',
|
|
|
|
kind=RebootPolicy,
|
|
|
|
default='as_needed',
|
|
|
|
allowed_values=RebootPolicy.valid_policies,
|
|
|
|
description='''
|
|
|
|
This defines when during execution of a run the Device will be
|
|
|
|
rebooted. The possible values are:
|
|
|
|
|
2018-02-13 15:01:40 +00:00
|
|
|
``"as_needed"``
|
|
|
|
The device will only be rebooted if the need arises (e.g. if it
|
|
|
|
becomes unresponsive.
|
|
|
|
|
2017-03-20 14:40:13 +00:00
|
|
|
``"never"``
|
|
|
|
The device will never be rebooted.
|
|
|
|
|
|
|
|
``"initial"``
|
|
|
|
The device will be rebooted when the execution first starts,
|
|
|
|
just before executing the first workload spec.
|
|
|
|
|
|
|
|
``"each_spec"``
|
|
|
|
The device will be rebooted before running a new workload spec.
|
|
|
|
|
|
|
|
.. note:: this acts the same as each_iteration when execution order
|
|
|
|
is set to by_iteration
|
|
|
|
|
|
|
|
``"each_iteration"``
|
|
|
|
The device will be rebooted before each new iteration.
|
|
|
|
'''),
|
|
|
|
ConfigurationPoint(
|
|
|
|
'device',
|
|
|
|
kind=str,
|
2018-02-13 15:02:35 +00:00
|
|
|
default='generic_android',
|
2017-03-20 14:40:13 +00:00
|
|
|
description='''
|
|
|
|
This setting defines what specific Device subclass will be used to
|
|
|
|
interact the connected device. Obviously, this must match your
|
|
|
|
setup.
|
|
|
|
''',
|
|
|
|
),
|
|
|
|
ConfigurationPoint(
|
|
|
|
'retry_on_status',
|
2017-03-20 16:24:22 +00:00
|
|
|
kind=list_of(Status),
|
2017-03-20 14:40:13 +00:00
|
|
|
default=['FAILED', 'PARTIAL'],
|
2017-08-09 15:24:57 +01:00
|
|
|
allowed_values=Status.levels[Status.RUNNING.value:],
|
2017-03-20 14:40:13 +00:00
|
|
|
description='''
|
2017-03-21 15:23:10 +00:00
|
|
|
This is list of statuses on which a job will be considered to have
|
2017-03-20 14:40:13 +00:00
|
|
|
failed and will be automatically retried up to ``max_retries``
|
|
|
|
times. This defaults to ``["FAILED", "PARTIAL"]`` if not set.
|
|
|
|
Possible values are::
|
|
|
|
|
|
|
|
``"OK"``
|
|
|
|
This iteration has completed and no errors have been detected
|
|
|
|
|
|
|
|
``"PARTIAL"``
|
|
|
|
One or more instruments have failed (the iteration may still be running).
|
|
|
|
|
|
|
|
``"FAILED"``
|
|
|
|
The workload itself has failed.
|
|
|
|
|
|
|
|
``"ABORTED"``
|
2017-03-21 15:23:10 +00:00
|
|
|
The user interrupted the workload
|
2017-03-20 14:40:13 +00:00
|
|
|
''',
|
|
|
|
),
|
|
|
|
ConfigurationPoint(
|
|
|
|
'max_retries',
|
|
|
|
kind=int,
|
|
|
|
default=2,
|
|
|
|
description='''
|
|
|
|
The maximum number of times failed jobs will be retried before
|
|
|
|
giving up. If not set.
|
|
|
|
|
|
|
|
.. note:: this number does not include the original attempt
|
|
|
|
''',
|
|
|
|
),
|
2017-10-23 16:41:28 +01:00
|
|
|
ConfigurationPoint(
|
|
|
|
'bail_on_init_failure',
|
|
|
|
kind=bool,
|
|
|
|
default=True,
|
|
|
|
description='''
|
|
|
|
When jobs fail during their main setup and run phases, WA will
|
|
|
|
continue attempting to run the remaining jobs. However, by default,
|
|
|
|
if they fail during their early initialization phase, the entire run
|
|
|
|
will end without continuing to run jobs. Setting this to ``False``
|
|
|
|
means that WA will instead skip all the jobs from the job spec that
|
|
|
|
failed, but continue attempting to run others.
|
|
|
|
'''
|
|
|
|
),
|
2017-10-04 13:25:06 +01:00
|
|
|
ConfigurationPoint(
|
|
|
|
'allow_phone_home',
|
|
|
|
kind=bool, default=True,
|
|
|
|
description='''
|
|
|
|
Setting this to ``False`` prevents running any workloads that are marked
|
|
|
|
with 'phones_home', meaning they are at risk of exposing information
|
|
|
|
about the device to the outside world. For example, some benchmark
|
|
|
|
applications upload device data to a database owned by the
|
|
|
|
maintainers.
|
|
|
|
|
|
|
|
This can be used to minimise the risk of accidentally running such
|
|
|
|
workloads when testing confidential devices.
|
|
|
|
'''),
|
2017-03-06 11:10:25 +00:00
|
|
|
]
|
|
|
|
configuration = {cp.name: cp for cp in config_points + meta_data}
|
2017-02-21 13:37:11 +00:00
|
|
|
|
2018-02-15 13:46:39 +00:00
|
|
|
@classmethod
|
|
|
|
def from_pod(cls, pod):
|
|
|
|
meta_pod = {}
|
|
|
|
for cfg_point in cls.meta_data:
|
|
|
|
meta_pod[cfg_point.name] = pod.pop(cfg_point.name, None)
|
|
|
|
|
|
|
|
device_config = pod.pop('device_config', None)
|
|
|
|
instance = super(RunConfiguration, cls).from_pod(pod)
|
|
|
|
instance.device_config = device_config
|
|
|
|
for cfg_point in cls.meta_data:
|
|
|
|
cfg_point.set_value(instance, meta_pod[cfg_point.name])
|
|
|
|
|
|
|
|
return instance
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def __init__(self):
|
|
|
|
super(RunConfiguration, self).__init__()
|
|
|
|
for confpoint in self.meta_data:
|
|
|
|
confpoint.set_value(self, check_mandatory=False)
|
|
|
|
self.device_config = None
|
|
|
|
|
|
|
|
def merge_device_config(self, plugin_cache):
|
|
|
|
"""
|
|
|
|
Merges global device config and validates that it is correct for the
|
|
|
|
selected device.
|
|
|
|
"""
|
|
|
|
# pylint: disable=no-member
|
|
|
|
if self.device is None:
|
2017-03-21 15:23:10 +00:00
|
|
|
msg = 'Attempting to merge device config with unspecified device'
|
2017-03-06 11:10:25 +00:00
|
|
|
raise RuntimeError(msg)
|
|
|
|
self.device_config = plugin_cache.get_plugin_config(self.device,
|
|
|
|
generic_name="device_config")
|
2017-02-21 13:37:11 +00:00
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def to_pod(self):
|
|
|
|
pod = super(RunConfiguration, self).to_pod()
|
|
|
|
pod['device_config'] = dict(self.device_config or {})
|
|
|
|
return pod
|
2017-02-21 13:37:11 +00:00
|
|
|
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
class JobSpec(Configuration):
|
|
|
|
|
|
|
|
name = "Job Spec"
|
|
|
|
|
|
|
|
config_points = [
|
|
|
|
ConfigurationPoint('iterations', kind=int, default=1,
|
|
|
|
description='''
|
|
|
|
How many times to repeat this workload spec
|
|
|
|
'''),
|
|
|
|
ConfigurationPoint('workload_name', kind=str, mandatory=True,
|
|
|
|
aliases=["name"],
|
|
|
|
description='''
|
|
|
|
The name of the workload to run.
|
|
|
|
'''),
|
|
|
|
ConfigurationPoint('workload_parameters', kind=obj_dict,
|
2018-02-14 16:38:48 +00:00
|
|
|
aliases=["params", "workload_params", "parameters"],
|
2017-03-06 11:10:25 +00:00
|
|
|
description='''
|
|
|
|
Parameter to be passed to the workload
|
|
|
|
'''),
|
|
|
|
ConfigurationPoint('runtime_parameters', kind=obj_dict,
|
|
|
|
aliases=["runtime_params"],
|
|
|
|
description='''
|
|
|
|
Runtime parameters to be set prior to running
|
|
|
|
the workload.
|
|
|
|
'''),
|
|
|
|
ConfigurationPoint('boot_parameters', kind=obj_dict,
|
|
|
|
aliases=["boot_params"],
|
|
|
|
description='''
|
|
|
|
Parameters to be used when rebooting the target
|
|
|
|
prior to running the workload.
|
|
|
|
'''),
|
|
|
|
ConfigurationPoint('label', kind=str,
|
|
|
|
description='''
|
|
|
|
Similar to IDs but do not have the uniqueness restriction.
|
|
|
|
If specified, labels will be used by some result
|
|
|
|
processes instead of (or in addition to) the workload
|
2018-01-12 15:22:11 +00:00
|
|
|
name. For example, the csv output processor will put
|
2017-03-06 11:10:25 +00:00
|
|
|
the label in the "workload" column of the CSV file.
|
|
|
|
'''),
|
2017-11-03 15:52:49 +00:00
|
|
|
ConfigurationPoint('augmentations', kind=toggle_set, merge=True,
|
|
|
|
aliases=["instruments", "processors", "instrumentation",
|
2018-01-17 16:30:11 +00:00
|
|
|
"output_processors", "augment", "result_processors"],
|
2017-03-06 11:10:25 +00:00
|
|
|
description='''
|
2018-01-12 15:22:11 +00:00
|
|
|
The instruments and output processors to enable (or
|
2017-11-03 15:52:49 +00:00
|
|
|
disabled using a ~) during this workload spec. This combines the
|
|
|
|
"instrumentation" and "result_processors" from
|
|
|
|
previous versions of WA (the old entries are now
|
|
|
|
aliases for this).
|
2017-03-06 11:10:25 +00:00
|
|
|
'''),
|
|
|
|
ConfigurationPoint('flash', kind=dict, merge=True,
|
|
|
|
description='''
|
|
|
|
|
|
|
|
'''),
|
|
|
|
ConfigurationPoint('classifiers', kind=dict, merge=True,
|
|
|
|
description='''
|
|
|
|
Classifiers allow you to tag metrics from this workload
|
|
|
|
spec to help in post processing them. Theses are often
|
|
|
|
used to help identify what runtime_parameters were used
|
|
|
|
for results when post processing.
|
|
|
|
'''),
|
|
|
|
]
|
|
|
|
configuration = {cp.name: cp for cp in config_points}
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_pod(cls, pod):
|
|
|
|
job_id = pod.pop('id')
|
|
|
|
instance = super(JobSpec, cls).from_pod(pod)
|
2017-11-06 13:01:46 +00:00
|
|
|
instance.id = job_id
|
2017-03-06 11:10:25 +00:00
|
|
|
return instance
|
2017-02-21 13:37:11 +00:00
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
@property
|
|
|
|
def section_id(self):
|
|
|
|
if self.id is not None:
|
2018-01-09 13:22:19 +00:00
|
|
|
return self.id.rsplit('-', 1)[0]
|
2017-02-21 13:37:11 +00:00
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
@property
|
|
|
|
def workload_id(self):
|
|
|
|
if self.id is not None:
|
2018-01-09 13:22:19 +00:00
|
|
|
return self.id.rsplit('-', 1)[-1]
|
2017-03-06 11:10:25 +00:00
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
super(JobSpec, self).__init__()
|
2018-02-16 14:31:15 +00:00
|
|
|
if self.classifiers is None:
|
|
|
|
self.classifiers = OrderedDict()
|
2017-03-06 11:10:25 +00:00
|
|
|
self.to_merge = defaultdict(OrderedDict)
|
|
|
|
self._sources = []
|
|
|
|
self.id = None
|
|
|
|
|
|
|
|
def to_pod(self):
|
|
|
|
pod = super(JobSpec, self).to_pod()
|
|
|
|
pod['id'] = self.id
|
|
|
|
return pod
|
|
|
|
|
|
|
|
def update_config(self, source, check_mandatory=True):
|
|
|
|
self._sources.append(source)
|
|
|
|
values = source.config
|
|
|
|
for k, v in values.iteritems():
|
|
|
|
if k == "id":
|
|
|
|
continue
|
|
|
|
elif k.endswith('_parameters'):
|
|
|
|
if v:
|
|
|
|
self.to_merge[k][source] = copy(v)
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
self.set(k, v, check_mandatory=check_mandatory)
|
|
|
|
except ConfigError as e:
|
|
|
|
msg = 'Error in {}:\n\t{}'
|
|
|
|
raise ConfigError(msg.format(source.name, e.message))
|
|
|
|
|
|
|
|
def merge_workload_parameters(self, plugin_cache):
|
|
|
|
# merge global generic and specific config
|
|
|
|
workload_params = plugin_cache.get_plugin_config(self.workload_name,
|
2017-09-19 15:56:12 +01:00
|
|
|
generic_name="workload_parameters",
|
|
|
|
is_final=False)
|
2017-03-06 11:10:25 +00:00
|
|
|
|
|
|
|
cfg_points = plugin_cache.get_plugin_parameters(self.workload_name)
|
|
|
|
for source in self._sources:
|
2017-11-24 16:29:16 +00:00
|
|
|
config = dict(self.to_merge["workload_parameters"].get(source, {}))
|
|
|
|
if not config:
|
2017-03-06 11:10:25 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
for name, cfg_point in cfg_points.iteritems():
|
|
|
|
if name in config:
|
|
|
|
value = config.pop(name)
|
2017-06-09 11:27:10 +01:00
|
|
|
cfg_point.set_value(workload_params, value,
|
2017-03-06 11:10:25 +00:00
|
|
|
check_mandatory=False)
|
|
|
|
if config:
|
2017-06-09 10:19:26 +01:00
|
|
|
msg = 'Unexpected config "{}" for "{}"'
|
|
|
|
raise ConfigError(msg.format(config, self.workload_name))
|
2017-03-06 11:10:25 +00:00
|
|
|
|
|
|
|
self.workload_parameters = workload_params
|
|
|
|
|
|
|
|
def merge_runtime_parameters(self, plugin_cache, target_manager):
|
|
|
|
|
|
|
|
# Order global runtime parameters
|
|
|
|
runtime_parameters = OrderedDict()
|
|
|
|
try:
|
|
|
|
global_runtime_params = plugin_cache.get_plugin_config("runtime_parameters")
|
|
|
|
except NotFoundError:
|
|
|
|
global_runtime_params = {}
|
|
|
|
for source in plugin_cache.sources:
|
2017-03-15 17:16:59 +00:00
|
|
|
if source in global_runtime_params:
|
|
|
|
runtime_parameters[source] = global_runtime_params[source]
|
2017-03-06 11:10:25 +00:00
|
|
|
|
|
|
|
# Add runtime parameters from JobSpec
|
|
|
|
for source, values in self.to_merge['runtime_parameters'].iteritems():
|
|
|
|
runtime_parameters[source] = values
|
|
|
|
|
|
|
|
# Merge
|
|
|
|
self.runtime_parameters = target_manager.merge_runtime_parameters(runtime_parameters)
|
|
|
|
|
|
|
|
def finalize(self):
|
2017-03-09 16:26:50 +00:00
|
|
|
self.id = "-".join([source.config['id']
|
|
|
|
for source in self._sources[1:]]) # ignore first id, "global"
|
|
|
|
if self.label is None:
|
|
|
|
self.label = self.workload_name
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
|
|
|
|
|
|
|
|
# This is used to construct the list of Jobs WA will run
|
|
|
|
class JobGenerator(object):
|
|
|
|
|
|
|
|
name = "Jobs Configuration"
|
|
|
|
|
|
|
|
@property
|
|
|
|
def enabled_instruments(self):
|
|
|
|
self._read_enabled_instruments = True
|
2018-01-16 16:44:34 +00:00
|
|
|
return self._enabled_instruments.values()
|
2017-03-06 11:10:25 +00:00
|
|
|
|
2017-11-03 16:02:44 +00:00
|
|
|
@property
|
|
|
|
def enabled_processors(self):
|
|
|
|
self._read_enabled_processors = True
|
2018-01-16 16:44:34 +00:00
|
|
|
return self._enabled_processors.values()
|
2017-11-03 16:02:44 +00:00
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
def __init__(self, plugin_cache):
|
|
|
|
self.plugin_cache = plugin_cache
|
|
|
|
self.ids_to_run = []
|
|
|
|
self.sections = []
|
|
|
|
self.workloads = []
|
2018-01-16 16:44:34 +00:00
|
|
|
self._enabled_instruments = toggle_set()
|
|
|
|
self._enabled_processors = toggle_set()
|
2017-03-06 11:10:25 +00:00
|
|
|
self._read_enabled_instruments = False
|
2017-11-03 16:02:44 +00:00
|
|
|
self._read_enabled_processors = False
|
2018-01-16 16:44:34 +00:00
|
|
|
self.disabled_augmentations = set()
|
2017-03-06 11:10:25 +00:00
|
|
|
|
|
|
|
self.job_spec_template = obj_dict(not_in_dict=['name'])
|
|
|
|
self.job_spec_template.name = "globally specified job spec configuration"
|
|
|
|
self.job_spec_template.id = "global"
|
|
|
|
# Load defaults
|
|
|
|
for cfg_point in JobSpec.configuration.itervalues():
|
|
|
|
cfg_point.set_value(self.job_spec_template, check_mandatory=False)
|
|
|
|
|
|
|
|
self.root_node = SectionNode(self.job_spec_template)
|
|
|
|
|
|
|
|
def set_global_value(self, name, value):
|
|
|
|
JobSpec.configuration[name].set_value(self.job_spec_template, value,
|
|
|
|
check_mandatory=False)
|
2017-11-03 15:52:49 +00:00
|
|
|
if name == "augmentations":
|
2017-11-03 16:20:20 +00:00
|
|
|
self.update_augmentations(value)
|
2017-03-06 11:10:25 +00:00
|
|
|
|
|
|
|
def add_section(self, section, workloads):
|
|
|
|
new_node = self.root_node.add_section(section)
|
|
|
|
for workload in workloads:
|
|
|
|
new_node.add_workload(workload)
|
|
|
|
|
|
|
|
def add_workload(self, workload):
|
|
|
|
self.root_node.add_workload(workload)
|
|
|
|
|
2017-11-03 16:24:15 +00:00
|
|
|
def disable_augmentations(self, augmentations):
|
2018-01-16 16:44:34 +00:00
|
|
|
for entry in augmentations:
|
|
|
|
if entry.startswith('~'):
|
|
|
|
entry = entry[1:]
|
|
|
|
try:
|
|
|
|
self.plugin_cache.get_plugin_class(entry)
|
|
|
|
except NotFoundError:
|
|
|
|
raise ConfigError('Error disabling unknown augmentation: "{}"'.format(entry))
|
|
|
|
self.disabled_augmentations = self.disabled_augmentations.union(augmentations)
|
2017-03-06 11:10:25 +00:00
|
|
|
|
2017-11-03 16:20:20 +00:00
|
|
|
def update_augmentations(self, value):
|
|
|
|
for entry in value:
|
2018-01-16 16:44:34 +00:00
|
|
|
entry_name = entry[1:] if entry.startswith('~') else entry
|
|
|
|
entry_cls = self.plugin_cache.get_plugin_class(entry_name)
|
2017-11-03 16:20:20 +00:00
|
|
|
if entry_cls.kind == 'instrument':
|
|
|
|
if self._read_enabled_instruments:
|
|
|
|
msg = "'enabled_instruments' cannot be updated after it has been accessed"
|
|
|
|
raise RuntimeError(msg)
|
|
|
|
self._enabled_instruments.add(entry)
|
2018-01-12 15:22:11 +00:00
|
|
|
elif entry_cls.kind == 'output_processor':
|
2017-11-03 16:20:20 +00:00
|
|
|
if self._read_enabled_processors:
|
|
|
|
msg = "'enabled_processors' cannot be updated after it has been accessed"
|
|
|
|
raise RuntimeError(msg)
|
|
|
|
self._enabled_processors.add(entry)
|
|
|
|
else:
|
|
|
|
msg = 'Unknown augmentation type: {}'
|
|
|
|
raise ConfigError(msg.format(entry_cls.kind))
|
2018-01-16 16:44:34 +00:00
|
|
|
self._enabled_instruments = self._enabled_instruments.merge_with(self.disabled_augmentations)
|
|
|
|
self._enabled_processors = self._enabled_processors.merge_with(self.disabled_augmentations)
|
2017-03-06 11:10:25 +00:00
|
|
|
|
|
|
|
def only_run_ids(self, ids):
|
|
|
|
if isinstance(ids, str):
|
|
|
|
ids = [ids]
|
|
|
|
self.ids_to_run = ids
|
|
|
|
|
|
|
|
def generate_job_specs(self, target_manager):
|
|
|
|
specs = []
|
|
|
|
for leaf in self.root_node.leaves():
|
|
|
|
workload_entries = leaf.workload_entries
|
|
|
|
sections = [leaf]
|
|
|
|
for ancestor in leaf.ancestors():
|
|
|
|
workload_entries = ancestor.workload_entries + workload_entries
|
|
|
|
sections.insert(0, ancestor)
|
|
|
|
|
|
|
|
for workload_entry in workload_entries:
|
2017-09-26 17:15:42 +01:00
|
|
|
job_spec = create_job_spec(deepcopy(workload_entry), sections,
|
2017-03-06 11:10:25 +00:00
|
|
|
target_manager, self.plugin_cache,
|
2017-11-03 16:24:15 +00:00
|
|
|
self.disabled_augmentations)
|
2017-03-06 11:10:25 +00:00
|
|
|
if self.ids_to_run:
|
|
|
|
for job_id in self.ids_to_run:
|
|
|
|
if job_id in job_spec.id:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
continue
|
2017-11-03 16:20:20 +00:00
|
|
|
self.update_augmentations(job_spec.augmentations.values())
|
2017-03-06 11:10:25 +00:00
|
|
|
specs.append(job_spec)
|
|
|
|
return specs
|
|
|
|
|
|
|
|
|
|
|
|
def create_job_spec(workload_entry, sections, target_manager, plugin_cache,
|
2017-11-03 15:52:49 +00:00
|
|
|
disabled_augmentations):
|
2017-03-06 11:10:25 +00:00
|
|
|
job_spec = JobSpec()
|
|
|
|
|
|
|
|
# PHASE 2.1: Merge general job spec configuration
|
|
|
|
for section in sections:
|
|
|
|
job_spec.update_config(section, check_mandatory=False)
|
|
|
|
job_spec.update_config(workload_entry, check_mandatory=False)
|
|
|
|
|
|
|
|
# PHASE 2.2: Merge global, section and workload entry "workload_parameters"
|
|
|
|
job_spec.merge_workload_parameters(plugin_cache)
|
|
|
|
|
2017-03-21 15:23:10 +00:00
|
|
|
# TODO: PHASE 2.3: Validate device runtime/boot parameters
|
2017-03-06 11:10:25 +00:00
|
|
|
job_spec.merge_runtime_parameters(plugin_cache, target_manager)
|
|
|
|
target_manager.validate_runtime_parameters(job_spec.runtime_parameters)
|
|
|
|
|
2017-11-03 15:52:49 +00:00
|
|
|
# PHASE 2.4: Disable globally disabled augmentations
|
|
|
|
job_spec.set("augmentations", disabled_augmentations)
|
2017-03-06 11:10:25 +00:00
|
|
|
job_spec.finalize()
|
|
|
|
|
|
|
|
return job_spec
|
|
|
|
|
|
|
|
|
2017-11-29 11:35:22 +00:00
|
|
|
def get_config_point_map(params):
|
|
|
|
pmap = {}
|
|
|
|
for p in params:
|
|
|
|
pmap[p.name] = p
|
|
|
|
for alias in p.aliases:
|
|
|
|
pmap[alias] = p
|
|
|
|
return pmap
|
|
|
|
|
|
|
|
|
2017-03-06 11:10:25 +00:00
|
|
|
settings = MetaConfiguration(os.environ)
|