1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2025-09-02 11:22:41 +01:00

WA3 Exsisting Code

This commit is contained in:
Marc Bonnici
2017-02-21 13:37:11 +00:00
parent 067f76adf3
commit 1f1f2b12c6
54 changed files with 9239 additions and 0 deletions

0
wa/framework/__init__.py Normal file
View File

31
wa/framework/actor.py Normal file
View File

@@ -0,0 +1,31 @@
import uuid
import logging
from wa.framework import pluginloader
from wa.framework.plugin import Plugin
class JobActor(Plugin):
kind = 'job_actor'
def initialize(self, context):
pass
def run(self):
pass
def restart(self):
pass
def complete(self):
pass
def finalize(self):
pass
class NullJobActor(JobActor):
name = 'null-job-actor'

246
wa/framework/agenda.py Normal file
View File

@@ -0,0 +1,246 @@
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from copy import copy
from collections import OrderedDict, defaultdict
from wa.framework.exception import ConfigError, SerializerSyntaxError
from wa.utils.serializer import yaml
from wa.utils import counter
def get_aliased_param(d, aliases, default=None, pop=True):
alias_map = [i for i, a in enumerate(aliases) if a in d]
if len(alias_map) > 1:
message = 'Only one of {} may be specified in a single entry'
raise ConfigError(message.format(aliases))
elif alias_map:
if pop:
return d.pop(aliases[alias_map[0]])
else:
return d[aliases[alias_map[0]]]
else:
return default
class AgendaEntry(object):
def to_dict(self):
return copy(self.__dict__)
def __str__(self):
name = self.__class__.__name__.split('.')[-1]
if hasattr(self, 'id'):
return '{}({})'.format(name, self.id)
else:
return name
__repr__ = __str__
class AgendaWorkloadEntry(AgendaEntry):
"""
Specifies execution of a workload, including things like the number of
iterations, device runtime_parameters configuration, etc.
"""
def __init__(self, **kwargs):
super(AgendaWorkloadEntry, self).__init__()
self.id = kwargs.pop('id')
self.workload_name = get_aliased_param(kwargs, ['workload_name', 'name'])
if not self.workload_name:
raise ConfigError('No workload name specified in entry {}'.format(self.id))
self.label = kwargs.pop('label', self.workload_name)
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params', 'params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
self.classifiers = kwargs.pop('classifiers', OrderedDict())
if kwargs:
raise ConfigError('Invalid entry(ies) in workload {}: {}'.format(self.id, ', '.join(kwargs.keys())))
class AgendaSectionEntry(AgendaEntry):
"""
Specifies execution of a workload, including things like the number of
iterations, device runtime_parameters configuration, etc.
"""
def __init__(self, agenda, **kwargs):
super(AgendaSectionEntry, self).__init__()
self.id = kwargs.pop('id')
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params', 'params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
self.classifiers = kwargs.pop('classifiers', OrderedDict())
self.workloads = []
for w in kwargs.pop('workloads', []):
self.workloads.append(agenda.get_workload_entry(w))
if kwargs:
raise ConfigError('Invalid entry(ies) in section {}: {}'.format(self.id, ', '.join(kwargs.keys())))
def to_dict(self):
d = copy(self.__dict__)
d['workloads'] = [w.to_dict() for w in self.workloads]
return d
class AgendaGlobalEntry(AgendaEntry):
"""
Workload configuration global to all workloads.
"""
def __init__(self, **kwargs):
super(AgendaGlobalEntry, self).__init__()
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params', 'params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
self.classifiers = kwargs.pop('classifiers', OrderedDict())
if kwargs:
raise ConfigError('Invalid entries in global section: {}'.format(kwargs))
class Agenda(object):
def __init__(self, source=None):
self.filepath = None
self.config = None
self.global_ = None
self.sections = []
self.workloads = []
self._seen_ids = defaultdict(set)
if source:
try:
counter.reset('section')
counter.reset('workload')
self._load(source)
except (ConfigError, SerializerSyntaxError, SyntaxError), e:
raise ConfigError(str(e))
def add_workload_entry(self, w):
entry = self.get_workload_entry(w)
self.workloads.append(entry)
def get_workload_entry(self, w):
if isinstance(w, basestring):
w = {'name': w}
if not isinstance(w, dict):
raise ConfigError('Invalid workload entry: "{}" in {}'.format(w, self.filepath))
self._assign_id_if_needed(w, 'workload')
return AgendaWorkloadEntry(**w)
def expand(self, target):
# TODO: currently a no-op, this method is here to support future features, such
# as section cross products and sweeps.
pass
def _load(self, source): # pylint: disable=too-many-branches
try:
raw = self._load_raw_from_source(source)
except SerializerSyntaxError as e:
name = getattr(source, 'name', '')
raise ConfigError('Error parsing agenda {}: {}'.format(name, e))
if not isinstance(raw, dict):
message = '{} does not contain a valid agenda structure; top level must be a dict.'
raise ConfigError(message.format(self.filepath))
for k, v in raw.iteritems():
if k == 'config':
if not isinstance(v, dict):
raise ConfigError('Invalid agenda: "config" entry must be a dict')
self.config = v
elif k == 'global':
self.global_ = AgendaGlobalEntry(**v)
elif k == 'sections':
self._collect_existing_ids(v, 'section')
for s in v:
if not isinstance(s, dict):
raise ConfigError('Invalid section entry: "{}" in {}'.format(s, self.filepath))
self._collect_existing_ids(s.get('workloads', []), 'workload')
for s in v:
self._assign_id_if_needed(s, 'section')
self.sections.append(AgendaSectionEntry(self, **s))
elif k == 'workloads':
self._collect_existing_ids(v, 'workload')
for w in v:
self.workloads.append(self.get_workload_entry(w))
else:
raise ConfigError('Unexpected agenda entry "{}" in {}'.format(k, self.filepath))
def _load_raw_from_source(self, source):
if hasattr(source, 'read') and hasattr(source, 'name'): # file-like object
self.filepath = source.name
raw = yaml.load(source)
elif isinstance(source, basestring):
if os.path.isfile(source):
self.filepath = source
with open(source, 'rb') as fh:
raw = yaml.load(fh)
else: # assume YAML text
raw = yaml.loads(source)
else:
raise ConfigError('Unknown agenda source: {}'.format(source))
return raw
def _collect_existing_ids(self, ds, pool):
# Collection needs to take place first so that auto IDs can be
# correctly assigned, e.g. if someone explicitly specified an ID
# of '1' for one of the workloads.
for d in ds:
if isinstance(d, dict) and 'id' in d:
did = str(d['id'])
if did in self._seen_ids[pool]:
raise ConfigError('Duplicate {} ID: {}'.format(pool, did))
self._seen_ids[pool].add(did)
def _assign_id_if_needed(self, d, pool):
# Also enforces string IDs
if d.get('id') is None:
did = str(counter.next(pool))
while did in self._seen_ids[pool]:
did = str(counter.next(pool))
d['id'] = did
self._seen_ids[pool].add(did)
else:
d['id'] = str(d['id'])

68
wa/framework/command.py Normal file
View File

@@ -0,0 +1,68 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import textwrap
from wa.framework.plugin import Plugin
from wa.framework.entrypoint import init_argument_parser
from wa.utils.doc import format_body
class Command(Plugin):
"""
Defines a Workload Automation command. This will be executed from the command line as
``wa <command> [args ...]``. This defines the name to be used when invoking wa, the
code that will actually be executed on invocation and the argument parser to be used
to parse the reset of the command line arguments.
"""
kind = 'command'
help = None
usage = None
description = None
epilog = None
formatter_class = None
def __init__(self, subparsers, **kwargs):
super(Command, self).__init__(**kwargs)
self.group = subparsers
parser_params = dict(help=(self.help or self.description), usage=self.usage,
description=format_body(textwrap.dedent(self.description), 80),
epilog=self.epilog)
if self.formatter_class:
parser_params['formatter_class'] = self.formatter_class
self.parser = subparsers.add_parser(self.name, **parser_params)
init_argument_parser(self.parser) # propagate top-level options
self.initialize(None)
def initialize(self, context):
"""
Perform command-specific initialisation (e.g. adding command-specific options to the command's
parser). ``context`` is always ``None``.
"""
pass
def execute(self, args):
"""
Execute this command.
:args: An ``argparse.Namespace`` containing command line arguments (as returned by
``argparse.ArgumentParser.parse_args()``. This would usually be the result of
invoking ``self.parser``.
"""
raise NotImplementedError()

View File

@@ -0,0 +1,2 @@
from wa.framework.configuration.core import settings, ConfigurationPoint, PluginConfiguration
from wa.framework.configuration.core import merge_config_values, WA_CONFIGURATION

View File

@@ -0,0 +1,639 @@
import os
import logging
from glob import glob
from copy import copy
from itertools import chain
from wa.framework import pluginloader
from wa.framework.exception import ConfigError
from wa.utils.types import integer, boolean, identifier, list_of_strings, list_of
from wa.utils.misc import isiterable, get_article
from wa.utils.serializer import read_pod, yaml
class ConfigurationPoint(object):
"""
This defines a gneric configuration point for workload automation. This is
used to handle global settings, plugin parameters, etc.
"""
# Mapping for kind conversion; see docs for convert_types below
kind_map = {
int: integer,
bool: boolean,
}
def __init__(self, name,
kind=None,
mandatory=None,
default=None,
override=False,
allowed_values=None,
description=None,
constraint=None,
merge=False,
aliases=None,
convert_types=True):
"""
Create a new Parameter object.
:param name: The name of the parameter. This will become an instance
member of the extension object to which the parameter is
applied, so it must be a valid python identifier. This
is the only mandatory parameter.
:param kind: The type of parameter this is. This must be a callable
that takes an arbitrary object and converts it to the
expected type, or raised ``ValueError`` if such conversion
is not possible. Most Python standard types -- ``str``,
``int``, ``bool``, etc. -- can be used here. This
defaults to ``str`` if not specified.
:param mandatory: If set to ``True``, then a non-``None`` value for
this parameter *must* be provided on extension
object construction, otherwise ``ConfigError``
will be raised.
:param default: The default value for this parameter. If no value
is specified on extension construction, this value
will be used instead. (Note: if this is specified
and is not ``None``, then ``mandatory`` parameter
will be ignored).
:param override: A ``bool`` that specifies whether a parameter of
the same name further up the hierarchy should
be overridden. If this is ``False`` (the
default), an exception will be raised by the
``AttributeCollection`` instead.
:param allowed_values: This should be the complete list of allowed
values for this parameter. Note: ``None``
value will always be allowed, even if it is
not in this list. If you want to disallow
``None``, set ``mandatory`` to ``True``.
:param constraint: If specified, this must be a callable that takes
the parameter value as an argument and return a
boolean indicating whether the constraint has been
satisfied. Alternatively, can be a two-tuple with
said callable as the first element and a string
describing the constraint as the second.
:param merge: The default behaviour when setting a value on an object
that already has that attribute is to overrided with
the new value. If this is set to ``True`` then the two
values will be merged instead. The rules by which the
values are merged will be determined by the types of
the existing and new values -- see
``merge_config_values`` documentation for details.
:param aliases: Alternative names for the same configuration point.
These are largely for backwards compatibility.
:param convert_types: If ``True`` (the default), will automatically
convert ``kind`` values from native Python
types to WA equivalents. This allows more
ituitive interprestation of parameter values,
e.g. the string ``"false"`` being interpreted
as ``False`` when specifed as the value for
a boolean Parameter.
"""
self.name = identifier(name)
if kind is not None and not callable(kind):
raise ValueError('Kind must be callable.')
if convert_types and kind in self.kind_map:
kind = self.kind_map[kind]
self.kind = kind
self.mandatory = mandatory
self.default = default
self.override = override
self.allowed_values = allowed_values
self.description = description
if self.kind is None and not self.override:
self.kind = str
if constraint is not None and not callable(constraint) and not isinstance(constraint, tuple):
raise ValueError('Constraint must be callable or a (callable, str) tuple.')
self.constraint = constraint
self.merge = merge
self.aliases = aliases or []
def match(self, name):
if name == self.name:
return True
elif name in self.aliases:
return True
return False
def set_value(self, obj, value=None):
if value is None:
if self.default is not None:
value = self.default
elif self.mandatory:
msg = 'No values specified for mandatory parameter {} in {}'
raise ConfigError(msg.format(self.name, obj.name))
else:
try:
value = self.kind(value)
except (ValueError, TypeError):
typename = self.get_type_name()
msg = 'Bad value "{}" for {}; must be {} {}'
article = get_article(typename)
raise ConfigError(msg.format(value, self.name, article, typename))
if self.merge and hasattr(obj, self.name):
value = merge_config_values(getattr(obj, self.name), value)
setattr(obj, self.name, value)
def validate(self, obj):
value = getattr(obj, self.name, None)
self.validate_value(value)
def validate_value(self,obj, value):
if value is not None:
if self.allowed_values:
self._validate_allowed_values(obj, value)
if self.constraint:
self._validate_constraint(obj, value)
else:
if self.mandatory:
msg = 'No value specified for mandatory parameter {} in {}.'
raise ConfigError(msg.format(self.name, obj.name))
def get_type_name(self):
typename = str(self.kind)
if '\'' in typename:
typename = typename.split('\'')[1]
elif typename.startswith('<function'):
typename = typename.split()[1]
return typename
def _validate_allowed_values(self, obj, value):
if 'list' in str(self.kind):
for v in value:
if v not in self.allowed_values:
msg = 'Invalid value {} for {} in {}; must be in {}'
raise ConfigError(msg.format(v, self.name, obj.name, self.allowed_values))
else:
if value not in self.allowed_values:
msg = 'Invalid value {} for {} in {}; must be in {}'
raise ConfigError(msg.format(value, self.name, obj.name, self.allowed_values))
def _validate_constraint(self, obj, value):
msg_vals = {'value': value, 'param': self.name, 'extension': obj.name}
if isinstance(self.constraint, tuple) and len(self.constraint) == 2:
constraint, msg = self.constraint # pylint: disable=unpacking-non-sequence
elif callable(self.constraint):
constraint = self.constraint
msg = '"{value}" failed constraint validation for {param} in {extension}.'
else:
raise ValueError('Invalid constraint for {}: must be callable or a 2-tuple'.format(self.name))
if not constraint(value):
raise ConfigError(value, msg.format(**msg_vals))
def __repr__(self):
d = copy(self.__dict__)
del d['description']
return 'ConfPoint({})'.format(d)
__str__ = __repr__
class ConfigurationPointCollection(object):
def __init__(self):
self._configs = []
self._config_map = {}
def get(self, name, default=None):
return self._config_map.get(name, default)
def add(self, point):
if not isinstance(point, ConfigurationPoint):
raise ValueError('Mustbe a ConfigurationPoint, got {}'.format(point.__class__))
existing = self.get(point.name)
if existing:
if point.override:
new_point = copy(existing)
for a, v in point.__dict__.iteritems():
if v is not None:
setattr(new_point, a, v)
self.remove(existing)
point = new_point
else:
raise ValueError('Duplicate ConfigurationPoint "{}"'.format(point.name))
self._add(point)
def remove(self, point):
self._configs.remove(point)
del self._config_map[point.name]
for alias in point.aliases:
del self._config_map[alias]
append = add
def _add(self, point):
self._configs.append(point)
self._config_map[point.name] = point
for alias in point.aliases:
if alias in self._config_map:
message = 'Clashing alias "{}" between "{}" and "{}"'
raise ValueError(message.format(alias, point.name,
self._config_map[alias].name))
def __str__(self):
str(self._configs)
__repr__ = __str__
def __iadd__(self, other):
for p in other:
self.add(p)
return self
def __iter__(self):
return iter(self._configs)
def __contains__(self, p):
if isinstance(p, basestring):
return p in self._config_map
return p.name in self._config_map
def __getitem__(self, i):
if isinstance(i, int):
return self._configs[i]
return self._config_map[i]
def __len__(self):
return len(self._configs)
class LoggingConfig(dict):
defaults = {
'file_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
'verbose_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
'regular_format': '%(levelname)-8s %(message)s',
'color': True,
}
def __init__(self, config=None):
if isinstance(config, dict):
config = {identifier(k.lower()): v for k, v in config.iteritems()}
self['regular_format'] = config.pop('regular_format', self.defaults['regular_format'])
self['verbose_format'] = config.pop('verbose_format', self.defaults['verbose_format'])
self['file_format'] = config.pop('file_format', self.defaults['file_format'])
self['color'] = config.pop('colour_enabled', self.defaults['color']) # legacy
self['color'] = config.pop('color', self.defaults['color'])
if config:
message = 'Unexpected logging configuation parameters: {}'
raise ValueError(message.format(bad_vals=', '.join(config.keys())))
elif config is None:
for k, v in self.defaults.iteritems():
self[k] = v
else:
raise ValueError(config)
__WA_CONFIGURATION = [
ConfigurationPoint(
'user_directory',
description="""
Path to the user directory. This is the location WA will look for
user configuration, additional plugins and plugin dependencies.
""",
kind=str,
default=os.path.join(os.path.expanduser('~'), '.workload_automation'),
),
ConfigurationPoint(
'plugin_packages',
kind=list_of_strings,
default=[
'wa.commands',
'wa.workloads',
# 'wa.instruments',
# 'wa.processors',
# 'wa.targets',
'wa.framework.actor',
'wa.framework.target',
'wa.framework.resource',
'wa.framework.execution',
],
description="""
List of packages that will be scanned for WA plugins.
""",
),
ConfigurationPoint(
'plugin_paths',
kind=list_of_strings,
default=[
'workloads',
'instruments',
'targets',
'processors',
# Legacy
'devices',
'result_processors',
],
description="""
List of paths that will be scanned for WA plugins.
""",
),
ConfigurationPoint(
'plugin_ignore_paths',
kind=list_of_strings,
default=[],
description="""
List of (sub)paths that will be ignored when scanning
``plugin_paths`` for WA plugins.
""",
),
ConfigurationPoint(
'filer_mount_point',
description="""
The local mount point for the filer hosting WA assets.
""",
),
ConfigurationPoint(
'logging',
kind=LoggingConfig,
description="""
WA logging configuration. This should be a dict with a subset
of the following keys::
:normal_format: Logging format used for console output
:verbose_format: Logging format used for verbose console output
:file_format: Logging format used for run.log
:color: If ``True`` (the default), console logging output will
contain bash color escape codes. Set this to ``False`` if
console output will be piped somewhere that does not know
how to handle those.
""",
),
ConfigurationPoint(
'verbosity',
kind=int,
default=0,
description="""
Verbosity of console output.
""",
),
]
WA_CONFIGURATION = {cp.name: cp for cp in __WA_CONFIGURATION}
ENVIRONMENT_VARIABLES = {
'WA_USER_DIRECTORY': WA_CONFIGURATION['user_directory'],
'WA_PLUGIN_PATHS': WA_CONFIGURATION['plugin_paths'],
'WA_EXTENSION_PATHS': WA_CONFIGURATION['plugin_paths'], # extension_paths (legacy)
}
class WAConfiguration(object):
"""
This is configuration for Workload Automation framework as a whole. This
does not track configuration for WA runs. Rather, this tracks "meta"
configuration, such as various locations WA looks for things, logging
configuration etc.
"""
basename = 'config'
def __init__(self):
self.user_directory = ''
self.dependencies_directory = 'dependencies'
self.plugin_packages = []
self.plugin_paths = []
self.plugin_ignore_paths = []
self.logging = {}
self._logger = logging.getLogger('settings')
for confpoint in WA_CONFIGURATION.itervalues():
confpoint.set_value(self)
def load_environment(self):
for name, confpoint in ENVIRONMENT_VARIABLES.iteritems():
value = os.getenv(name)
if value:
confpoint.set_value(self, value)
self._expand_paths()
def load_config_file(self, path):
self.load(read_pod(path))
def load_user_config(self):
globpath = os.path.join(self.user_directory, '{}.*'.format(self.basename))
for path in glob(globpath):
ext = os.path.splitext(path)[1].lower()
if ext in ['.pyc', '.pyo']:
continue
self.load_config_file(path)
def load(self, config):
for name, value in config.iteritems():
if name in WA_CONFIGURATION:
confpoint = WA_CONFIGURATION[name]
confpoint.set_value(self, value)
self._expand_paths()
def set(self, name, value):
if name not in WA_CONFIGURATION:
raise ConfigError('Unknown WA configuration "{}"'.format(name))
WA_CONFIGURATION[name].set_value(value)
def initialize_user_directory(self, overwrite=False):
"""
Initialize a fresh user environment creating the workload automation.
"""
if os.path.exists(self.user_directory):
if not overwrite:
raise ConfigError('Environment {} already exists.'.format(self.user_directory))
shutil.rmtree(self.user_directory)
self._expand_paths()
os.makedirs(self.dependencies_directory)
for path in self.plugin_paths:
os.makedirs(path)
with open(os.path.join(self.user_directory, 'config.yaml'), 'w') as wfh:
yaml.dump(self.to_pod())
if os.getenv('USER') == 'root':
# If running with sudo on POSIX, change the ownership to the real user.
real_user = os.getenv('SUDO_USER')
if real_user:
import pwd # done here as module won't import on win32
user_entry = pwd.getpwnam(real_user)
uid, gid = user_entry.pw_uid, user_entry.pw_gid
os.chown(self.user_directory, uid, gid)
# why, oh why isn't there a recusive=True option for os.chown?
for root, dirs, files in os.walk(self.user_directory):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
@staticmethod
def from_pod(pod):
instance = WAConfiguration()
instance.load(pod)
return instance
def to_pod(self):
return dict(
user_directory=self.user_directory,
plugin_packages=self.plugin_packages,
plugin_paths=self.plugin_paths,
plugin_ignore_paths=self.plugin_ignore_paths,
logging=self.logging,
)
def _expand_paths(self):
self.dependencies_directory = os.path.join(self.user_directory,
self.dependencies_directory)
expanded = []
for path in self.plugin_paths:
path = os.path.expanduser(path)
path = os.path.expandvars(path)
expanded.append(os.path.join(self.user_directory, path))
self.plugin_paths = expanded
expanded = []
for path in self.plugin_ignore_paths:
path = os.path.expanduser(path)
path = os.path.expandvars(path)
exanded.append(os.path.join(self.user_directory, path))
self.pluing_ignore_paths = expanded
class PluginConfiguration(object):
""" Maintains a mapping of plugin_name --> plugin_config. """
def __init__(self, loader=pluginloader):
self.loader = loader
self.config = {}
def update(self, name, config):
if not hasattr(config, 'get'):
raise ValueError('config must be a dict-like object got: {}'.format(config))
name, alias_config = self.loader.resolve_alias(name)
existing_config = self.config.get(name)
if existing_config is None:
existing_config = alias_config
new_config = config or {}
plugin_cls = self.loader.get_plugin_class(name)
def merge_config_values(base, other):
"""
This is used to merge two objects, typically when setting the value of a
``ConfigurationPoint``. First, both objects are categorized into
c: A scalar value. Basically, most objects. These values
are treated as atomic, and not mergeable.
s: A sequence. Anything iterable that is not a dict or
a string (strings are considered scalars).
m: A key-value mapping. ``dict`` and its derivatives.
n: ``None``.
o: A mergeable object; this is an object that implements both
``merge_with`` and ``merge_into`` methods.
The merge rules based on the two categories are then as follows:
(c1, c2) --> c2
(s1, s2) --> s1 . s2
(m1, m2) --> m1 . m2
(c, s) --> [c] . s
(s, c) --> s . [c]
(s, m) --> s . [m]
(m, s) --> [m] . s
(m, c) --> ERROR
(c, m) --> ERROR
(o, X) --> o.merge_with(X)
(X, o) --> o.merge_into(X)
(X, n) --> X
(n, X) --> X
where:
'.' means concatenation (for maps, contcationation of (k, v) streams
then converted back into a map). If the types of the two objects
differ, the type of ``other`` is used for the result.
'X' means "any category"
'[]' used to indicate a literal sequence (not necessarily a ``list``).
when this is concatenated with an actual sequence, that sequencies
type is used.
notes:
- When a mapping is combined with a sequence, that mapping is
treated as a scalar value.
- When combining two mergeable objects, they're combined using
``o1.merge_with(o2)`` (_not_ using o2.merge_into(o1)).
- Combining anything with ``None`` yields that value, irrespective
of the order. So a ``None`` value is eqivalent to the corresponding
item being omitted.
- When both values are scalars, merging is equivalent to overwriting.
- There is no recursion (e.g. if map values are lists, they will not
be merged; ``other`` will overwrite ``base`` values). If complicated
merging semantics (such as recursion) are required, they should be
implemented within custom mergeable types (i.e. those that implement
``merge_with`` and ``merge_into``).
While this can be used as a generic "combine any two arbitrary objects"
function, the semantics have been selected specifically for merging
configuration point values.
"""
cat_base = categorize(base)
cat_other = categorize(other)
if cat_base == 'n':
return other
elif cat_other == 'n':
return base
if cat_base == 'o':
return base.merge_with(other)
elif cat_other == 'o':
return other.merge_into(base)
if cat_base == 'm':
if cat_other == 's':
return merge_sequencies([base], other)
elif cat_other == 'm':
return merge_maps(base, other)
else:
message = 'merge error ({}, {}): "{}" and "{}"'
raise ValueError(message.format(cat_base, cat_other, base, other))
elif cat_base == 's':
if cat_other == 's':
return merge_sequencies(base, other)
else:
return merge_sequencies(base, [other])
else: # cat_base == 'c'
if cat_other == 's':
return merge_sequencies([base], other)
elif cat_other == 'm':
message = 'merge error ({}, {}): "{}" and "{}"'
raise ValueError(message.format(cat_base, cat_other, base, other))
else:
return other
def merge_sequencies(s1, s2):
return type(s2)(chain(s1, s2))
def merge_maps(m1, m2):
return type(m2)(chain(m1.iteritems(), m2.iteritems()))
def categorize(v):
if hasattr(v, 'merge_with') and hasattr(v, 'merge_into'):
return 'o'
elif hasattr(v, 'iteritems'):
return 'm'
elif isiterable(v):
return 's'
elif v is None:
return 'n'
else:
return 'c'
settings = WAConfiguration()

View File

@@ -0,0 +1,67 @@
from copy import copy
from collections import OrderedDict
from wa.framework import pluginloader
from wa.framework.exception import ConfigError
from wa.framework.configuration.core import ConfigurationPoint
from wa.framework.utils.types import TreeNode, list_of, identifier
class ExecConfig(object):
static_config_points = [
ConfigurationPoint(
'components',
kind=list_of(identifier),
description="""
Components to be activated.
""",
),
ConfigurationPoint(
'runtime_parameters',
kind=list_of(identifier),
aliases=['runtime_params'],
description="""
Components to be activated.
""",
),
ConfigurationPoint(
'classifiers',
kind=list_of(str),
description="""
Classifiers to be used. Classifiers are arbitrary key-value
pairs associated with with config. They may be used during output
proicessing and should be used to provide additional context for
collected results.
""",
),
]
config_points = None
@classmethod
def _load(cls, load_global=False, loader=pluginloader):
if cls.config_points is None:
cls.config_points = {c.name: c for c in cls.static_config_points}
for plugin in loader.list_plugins():
cp = ConfigurationPoint(
plugin.name,
kind=OrderedDict,
description="""
Configuration for {} plugin.
""".format(plugin.name)
)
cls._add_config_point(plugin.name, cp)
for alias in plugin.aliases:
cls._add_config_point(alias.name, cp)
@classmethod
def _add_config_point(cls, name, cp):
if name in cls.config_points:
message = 'Cofig point for "{}" already exists ("{}")'
raise ValueError(message.format(name, cls.config_points[name].name))
class GlobalExecConfig(ExecConfig):

View File

@@ -0,0 +1,83 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import logging
import subprocess
from wa.framework import pluginloader, log
from wa.framework.configuration import settings
from wa.framework.exception import WAError
from wa.utils.doc import format_body
from wa.utils.misc import init_argument_parser
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='zope')
logger = logging.getLogger('wa')
def init_settings():
settings.load_environment()
if not os.path.isdir(settings.user_directory):
settings.initialize_user_directory()
settings.load_user_config()
def get_argument_parser():
description = ("Execute automated workloads on a remote device and process "
"the resulting output.\n\nUse \"wa <subcommand> -h\" to see "
"help for individual subcommands.")
parser = argparse.ArgumentParser(description=format_body(description, 80),
prog='wa',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
init_argument_parser(parser)
return parser
def load_commands(subparsers):
commands = {}
for command in pluginloader.list_commands():
commands[command.name] = pluginloader.get_command(command.name, subparsers=subparsers)
return commands
def main():
try:
log.init()
init_settings()
parser = get_argument_parser()
commands = load_commands(parser.add_subparsers(dest='command')) # each command will add its own subparser
args = parser.parse_args()
settings.set('verbosity', args.verbose)
if args.config:
settings.load_config_file(args.config)
log.set_level(settings.verbosity)
command = commands[args.command]
sys.exit(command.execute(args))
except KeyboardInterrupt:
logging.info('Got CTRL-C. Aborting.')
sys.exit(1)
except Exception as e: # pylint: disable=broad-except
log_error(e, logger, critical=True)
if isinstance(e, WAError):
sys.exit(2)
else:
sys.exit(3)

139
wa/framework/exception.py Normal file
View File

@@ -0,0 +1,139 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wa.utils.misc import get_traceback, TimeoutError # NOQA pylint: disable=W0611
class WAError(Exception):
"""Base class for all Workload Automation exceptions."""
pass
class NotFoundError(WAError):
"""Raised when the specified item is not found."""
pass
class ValidationError(WAError):
"""Raised on failure to validate an extension."""
pass
class WorkloadError(WAError):
"""General Workload error."""
pass
class HostError(WAError):
"""Problem with the host on which WA is running."""
pass
class JobError(WAError):
"""Job execution error."""
pass
class InstrumentError(WAError):
"""General Instrument error."""
pass
class ResultProcessorError(WAError):
"""General ResultProcessor error."""
pass
class ResourceError(WAError):
"""General Resolver error."""
pass
class CommandError(WAError):
"""Raised by commands when they have encountered an error condition
during execution."""
pass
class ToolError(WAError):
"""Raised by tools when they have encountered an error condition
during execution."""
pass
class ConfigError(WAError):
"""Raised when configuration provided is invalid. This error suggests that
the user should modify their config and try again."""
pass
class SerializerSyntaxError(Exception):
"""
Error loading a serialized structure from/to a file handle.
"""
def __init__(self, message, line=None, column=None):
super(SerializerSyntaxError, self).__init__(message)
self.line = line
self.column = column
def __str__(self):
linestring = ' on line {}'.format(self.line) if self.line else ''
colstring = ' in column {}'.format(self.column) if self.column else ''
message = 'Syntax Error{}: {}'
return message.format(''.join([linestring, colstring]), self.message)
class PluginLoaderError(WAError):
"""Raised when there is an error loading an extension or
an external resource. Apart form the usual message, the __init__
takes an exc_info parameter which should be the result of
sys.exc_info() for the original exception (if any) that
caused the error."""
def __init__(self, message, exc_info=None):
super(PluginLoaderError, self).__init__(message)
self.exc_info = exc_info
def __str__(self):
if self.exc_info:
orig = self.exc_info[1]
orig_name = type(orig).__name__
if isinstance(orig, WAError):
reason = 'because of:\n{}: {}'.format(orig_name, orig)
else:
reason = 'because of:\n{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
return '\n'.join([self.message, reason])
else:
return self.message
class WorkerThreadError(WAError):
"""
This should get raised in the main thread if a non-WAError-derived exception occurs on
a worker/background thread. If a WAError-derived exception is raised in the worker, then
it that exception should be re-raised on the main thread directly -- the main point of this is
to preserve the backtrace in the output, and backtrace doesn't get output for WAErrors.
"""
def __init__(self, thread, exc_info):
self.thread = thread
self.exc_info = exc_info
orig = self.exc_info[1]
orig_name = type(orig).__name__
message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread)
message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
super(WorkerThreadError, self).__init__(message)

369
wa/framework/execution.py Normal file
View File

@@ -0,0 +1,369 @@
import os
import logging
import shutil
import random
from copy import copy
from collections import OrderedDict, defaultdict
from wa.framework import pluginloader, signal, log
from wa.framework.run import Runner, RunnerJob
from wa.framework.output import RunOutput
from wa.framework.actor import JobActor
from wa.framework.resource import ResourceResolver
from wa.framework.exception import ConfigError, NotFoundError
from wa.framework.configuration import ConfigurationPoint, PluginConfiguration, WA_CONFIGURATION
from wa.utils.serializer import read_pod
from wa.utils.misc import ensure_directory_exists as _d, Namespace
from wa.utils.types import list_of, identifier, caseless_string
__all__ = [
'Executor',
'ExecutionOutput',
'ExecutionwContext',
'ExecuteWorkloadContainerActor',
'ExecuteWorkloadJobActor',
]
class Executor(object):
def __init__(self, output):
self.output = output
self.config = ExecutionRunConfiguration()
self.agenda_string = None
self.agenda = None
self.jobs = None
self.container = None
self.target = None
def load_config(self, filepath):
self.config.update(filepath)
def load_agenda(self, agenda_string):
if self.agenda:
raise RuntimeError('Only one agenda may be loaded per run.')
self.agenda_string = agenda_string
if os.path.isfile(agenda_string):
self.logger.debug('Loading agenda from {}'.format(agenda_string))
self.agenda = Agenda(agenda_string)
shutil.copy(agenda_string, self.output.config_directory)
else:
self.logger.debug('"{}" is not a file; assuming workload name.'.format(agenda_string))
self.agenda = Agenda()
self.agenda.add_workload_entry(agenda_string)
def disable_instrument(self, name):
if not self.agenda:
raise RuntimeError('initialize() must be invoked before disable_instrument()')
self.agenda.config['instrumentation'].append('~{}'.format(itd))
def initialize(self):
if not self.agenda:
raise RuntimeError('No agenda has been loaded.')
self.config.update(self.agenda.config)
self.config.consolidate()
self._initialize_target()
self._initialize_job_config()
def execute(self, selectors=None):
pass
def finalize(self):
pass
def _initialize_target(self):
pass
def _initialize_job_config(self):
self.agenda.expand(self.target)
for tup in agenda_iterator(self.agenda, self.config.execution_order):
glob, sect, workload, iter_number = tup
def agenda_iterator(agenda, order):
"""
Iterates over all job components in an agenda, yielding tuples in the form ::
(global_entry, section_entry, workload_entry, iteration_number)
Which fully define the job to be crated. The order in which these tuples are
yielded is determined by the ``order`` parameter which may be one of the following
values:
``"by_iteration"``
The first iteration of each workload spec is executed one after the other,
so all workloads are executed before proceeding on to the second iteration.
E.g. A1 B1 C1 A2 C2 A3. This is the default if no order is explicitly specified.
In case of multiple sections, this will spread them out, such that specs
from the same section are further part. E.g. given sections X and Y, global
specs A and B, and two iterations, this will run ::
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
``"by_section"``
Same as ``"by_iteration"``, however this will group specs from the same
section together, so given sections X and Y, global specs A and B, and two iterations,
this will run ::
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
``"by_spec"``
All iterations of the first spec are executed before moving on to the next
spec. E.g. A1 A2 A3 B1 C1 C2.
``"random"``
Execution order is entirely random.
"""
# TODO: this would be a place to perform section expansions.
# (e.g. sweeps, cross-products, etc).
global_iterations = agenda.global_.number_of_iterations
all_iterations = [global_iterations]
all_iterations.extend([s.number_of_iterations for s in agenda.sections])
all_iterations.extend([w.number_of_iterations for w in agenda.workloads])
max_iterations = max(all_iterations)
if order == 'by_spec':
if agenda.sections:
for section in agenda.sections:
section_iterations = section.number_of_iterations or global_iterations
for workload in agenda.workloads + section.workloads:
workload_iterations = workload.number_of_iterations or section_iterations
for i in xrange(workload_iterations):
yield agenda.global_, section, workload, i
else: # not sections
for workload in agenda.workloads:
workload_iterations = workload.number_of_iterations or global_iterations
for i in xrange(workload_iterations):
yield agenda.global_, None, workload, i
elif order == 'by_section':
for i in xrange(max_iterations):
if agenda.sections:
for section in agenda.sections:
section_iterations = section.number_of_iterations or global_iterations
for workload in agenda.workloads + section.workloads:
workload_iterations = workload.number_of_iterations or section_iterations
if i < workload_iterations:
yield agenda.global_, section, workload, i
else: # not sections
for workload in agenda.workloads:
workload_iterations = workload.number_of_iterations or global_iterations
if i < workload_iterations:
yield agenda.global_, None, workload, i
elif order == 'by_iteration':
for i in xrange(max_iterations):
if agenda.sections:
for workload in agenda.workloads:
for section in agenda.sections:
section_iterations = section.number_of_iterations or global_iterations
workload_iterations = workload.number_of_iterations or section_iterations or global_iterations
if i < workload_iterations:
yield agenda.global_, section, workload, i
# Now do the section-specific workloads
for section in agenda.sections:
section_iterations = section.number_of_iterations or global_iterations
for workload in section.workloads:
workload_iterations = workload.number_of_iterations or section_iterations or global_iterations
if i < workload_iterations:
yield agenda.global_, section, workload, i
else: # not sections
for workload in agenda.workloads:
workload_iterations = workload.number_of_iterations or global_iterations
if i < workload_iterations:
yield agenda.global_, None, workload, i
elif order == 'random':
tuples = list(agenda_iterator(data, order='by_section'))
random.shuffle(tuples)
for t in tuples:
yield t
else:
raise ValueError('Invalid order: "{}"'.format(order))
class RebootPolicy(object):
"""
Represents the reboot policy for the execution -- at what points the device
should be rebooted. This, in turn, is controlled by the policy value that is
passed in on construction and would typically be read from the user's settings.
Valid policy values are:
:never: The device will never be rebooted.
:as_needed: Only reboot the device if it becomes unresponsive, or needs to be flashed, etc.
:initial: The device will be rebooted when the execution first starts, just before
executing the first workload spec.
:each_spec: The device will be rebooted before running a new workload spec.
:each_iteration: The device will be rebooted before each new iteration.
"""
valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_iteration']
def __init__(self, policy):
policy = policy.strip().lower().replace(' ', '_')
if policy not in self.valid_policies:
message = 'Invalid reboot policy {}; must be one of {}'.format(policy, ', '.join(self.valid_policies))
raise ConfigError(message)
self.policy = policy
@property
def can_reboot(self):
return self.policy != 'never'
@property
def perform_initial_boot(self):
return self.policy not in ['never', 'as_needed']
@property
def reboot_on_each_spec(self):
return self.policy in ['each_spec', 'each_iteration']
@property
def reboot_on_each_iteration(self):
return self.policy == 'each_iteration'
def __str__(self):
return self.policy
__repr__ = __str__
def __cmp__(self, other):
if isinstance(other, RebootPolicy):
return cmp(self.policy, other.policy)
else:
return cmp(self.policy, other)
class RuntimeParameterSetter(object):
"""
Manages runtime parameter state during execution.
"""
@property
def target(self):
return self.target_assistant.target
def __init__(self, target_assistant):
self.target_assistant = target_assistant
self.to_set = defaultdict(list) # name --> list of values
self.last_set = {}
self.to_unset = defaultdict(int) # name --> count
def validate(self, params):
self.target_assistant.validate_runtime_parameters(params)
def mark_set(self, params):
for name, value in params.iteritems():
self.to_set[name].append(value)
def mark_unset(self, params):
for name in params.iterkeys():
self.to_unset[name] += 1
def inact_set(self):
self.target_assistant.clear_parameters()
for name in self.to_set:
self._set_if_necessary(name)
self.target_assitant.set_parameters()
def inact_unset(self):
self.target_assistant.clear_parameters()
for name, count in self.to_unset.iteritems():
while count:
self.to_set[name].pop()
count -= 1
self._set_if_necessary(name)
self.target_assitant.set_parameters()
def _set_if_necessary(self, name):
if not self.to_set[name]:
return
new_value = self.to_set[name][-1]
prev_value = self.last_set.get(name)
if new_value != prev_value:
self.target_assistant.add_paramter(name, new_value)
self.last_set[name] = new_value
class WorkloadExecutionConfig(object):
@staticmethod
def from_pod(pod):
return WorkloadExecutionConfig(**pod)
def __init__(self, workload_name, workload_parameters=None,
runtime_parameters=None, components=None,
assumptions=None):
self.workload_name = workload_name or None
self.workload_parameters = workload_parameters or {}
self.runtime_parameters = runtime_parameters or {}
self.components = components or {}
self.assumpations = assumptions or {}
def to_pod(self):
return copy(self.__dict__)
class WorkloadExecutionActor(JobActor):
def __init__(self, target, config, loader=pluginloader):
self.target = target
self.config = config
self.logger = logging.getLogger('exec')
self.context = None
self.workload = loader.get_workload(config.workload_name, target,
**config.workload_parameters)
def get_config(self):
return self.config.to_pod()
def initialize(self, context):
self.context = context
self.workload.init_resources(self.context)
self.workload.validate()
self.workload.initialize(self.context)
def run(self):
if not self.workload:
self.logger.warning('Failed to initialize workload; skipping execution')
return
self.pre_run()
self.logger.info('Setting up workload')
with signal.wrap('WORKLOAD_SETUP'):
self.workload.setup(self.context)
try:
error = None
self.logger.info('Executing workload')
try:
with signal.wrap('WORKLOAD_EXECUTION'):
self.workload.run(self.context)
except Exception as e:
log.log_error(e, self.logger)
error = e
self.logger.info('Processing execution results')
with signal.wrap('WORKLOAD_RESULT_UPDATE'):
if not error:
self.workload.update_result(self.context)
else:
self.logger.info('Workload execution failed; not extracting workload results.')
raise error
finally:
if self.target.check_responsive():
self.logger.info('Tearing down workload')
with signal.wrap('WORKLOAD_TEARDOWN'):
self.workload.teardown(self.context)
self.post_run()
def finalize(self):
self.workload.finalize(self.context)
def pre_run(self):
# TODO: enable components, etc
pass
def post_run(self):
pass

23
wa/framework/host.py Normal file
View File

@@ -0,0 +1,23 @@
import os
from wa.framework.configuration import settings
from wa.framework.exception import ConfigError
from wa.utils.misc import ensure_directory_exists
class HostRunConfig(object):
"""
Host-side configuration for a run.
"""
def __init__(self, output_directory,
run_info_directory=None,
run_config_directory=None):
self.output_directory = output_directory
self.run_info_directory = run_info_directory or os.path.join(self.output_directory, '_info')
self.run_config_directory = run_config_directory or os.path.join(self.output_directory, '_config')
def initialize(self):
ensure_directory_exists(self.output_directory)
ensure_directory_exists(self.run_info_directory)
ensure_directory_exists(self.run_config_directory)

306
wa/framework/log.py Normal file
View File

@@ -0,0 +1,306 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import logging
import string
import threading
import subprocess
import colorama
from wa.framework import signal
from wa.framework.exception import WAError
from wa.utils.misc import get_traceback
COLOR_MAP = {
logging.DEBUG: colorama.Fore.BLUE,
logging.INFO: colorama.Fore.GREEN,
logging.WARNING: colorama.Fore.YELLOW,
logging.ERROR: colorama.Fore.RED,
logging.CRITICAL: colorama.Style.BRIGHT + colorama.Fore.RED,
}
RESET_COLOR = colorama.Style.RESET_ALL
_indent_level = 0
_indent_width = 4
_console_handler = None
def init(verbosity=logging.INFO, color=True, indent_with=4,
regular_fmt='%(levelname)-8s %(message)s',
verbose_fmt='%(asctime)s %(levelname)-8s %(name)-10.10s: %(message)s',
debug=False):
global _indent_width, _console_handler
_indent_width = indent_with
signal.log_error_func = lambda m: log_error(m, signal.logger)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
error_handler = ErrorSignalHandler(logging.DEBUG)
root_logger.addHandler(error_handler)
_console_handler = logging.StreamHandler()
if color:
formatter = ColorFormatter
else:
formatter = LineFormatter
if verbosity:
_console_handler.setLevel(logging.DEBUG)
_console_handler.setFormatter(formatter(verbose_fmt))
else:
_console_handler.setLevel(logging.INFO)
_console_handler.setFormatter(formatter(regular_fmt))
root_logger.addHandler(_console_handler)
logging.basicConfig(level=logging.DEBUG)
if not debug:
logging.raiseExceptions = False
def set_level(level):
_console_handler.setLevel(level)
def add_file(filepath, level=logging.DEBUG,
fmt='%(asctime)s %(levelname)-8s %(name)s: %(message)-10.10s'):
root_logger = logging.getLogger()
file_handler = logging.FileHandler(filepath)
file_handler.setLevel(level)
file_handler.setFormatter(LineFormatter(fmt))
root_logger.addHandler(file_handler)
def enable(logs):
if isinstance(logs, list):
for log in logs:
__enable_logger(log)
else:
__enable_logger(logs)
def disable(logs):
if isinstance(logs, list):
for log in logs:
__disable_logger(log)
else:
__disable_logger(logs)
def __enable_logger(logger):
if isinstance(logger, basestring):
logger = logging.getLogger(logger)
logger.propagate = True
def __disable_logger(logger):
if isinstance(logger, basestring):
logger = logging.getLogger(logger)
logger.propagate = False
def indent():
global _indent_level
_indent_level += 1
def dedent():
global _indent_level
_indent_level -= 1
def log_error(e, logger, critical=False):
"""
Log the specified Exception as an error. The Error message will be formatted
differently depending on the nature of the exception.
:e: the error to log. should be an instance of ``Exception``
:logger: logger to be used.
:critical: if ``True``, this error will be logged at ``logging.CRITICAL``
level, otherwise it will be logged as ``logging.ERROR``.
"""
if critical:
log_func = logger.critical
else:
log_func = logger.error
if isinstance(e, KeyboardInterrupt):
log_func('Got CTRL-C. Aborting.')
elif isinstance(e, WAError):
log_func(e)
elif isinstance(e, subprocess.CalledProcessError):
tb = get_traceback()
log_func(tb)
command = e.cmd
if e.args:
command = '{} {}'.format(command, ' '.join(e.args))
message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n'
log_func(message.format(command, e.returncode, e.output))
elif isinstance(e, SyntaxError):
tb = get_traceback()
log_func(tb)
message = 'Syntax Error in {}, line {}, offset {}:'
log_func(message.format(e.filename, e.lineno, e.offset))
log_func('\t{}'.format(e.msg))
else:
tb = get_traceback()
log_func(tb)
log_func('{}({})'.format(e.__class__.__name__, e))
class ErrorSignalHandler(logging.Handler):
"""
Emits signals for ERROR and WARNING level traces.
"""
def emit(self, record):
if record.levelno == logging.ERROR:
signal.send(signal.ERROR_LOGGED, self)
elif record.levelno == logging.WARNING:
signal.send(signal.WARNING_LOGGED, self)
class LineFormatter(logging.Formatter):
"""
Logs each line of the message separately.
"""
def format(self, record):
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
indent = _indent_width * _indent_level
d = record.__dict__
parts = []
for line in record.message.split('\n'):
line = ' ' * indent + line
d.update({'message': line.strip('\r')})
parts.append(self._fmt % d)
return '\n'.join(parts)
class ColorFormatter(LineFormatter):
"""
Formats logging records with color and prepends record info
to each line of the message.
BLUE for DEBUG logging level
GREEN for INFO logging level
YELLOW for WARNING logging level
RED for ERROR logging level
BOLD RED for CRITICAL logging level
"""
def __init__(self, fmt=None, datefmt=None):
super(ColorFormatter, self).__init__(fmt, datefmt)
template_text = self._fmt.replace('%(message)s', RESET_COLOR + '%(message)s${color}')
template_text = '${color}' + template_text + RESET_COLOR
self.fmt_template = string.Template(template_text)
def format(self, record):
self._set_color(COLOR_MAP[record.levelno])
return super(ColorFormatter, self).format(record)
def _set_color(self, color):
self._fmt = self.fmt_template.substitute(color=color)
class BaseLogWriter(object):
def __init__(self, name, level=logging.DEBUG):
"""
File-like object class designed to be used for logging from streams
Each complete line (terminated by new line character) gets logged
at DEBUG level. In complete lines are buffered until the next new line.
:param name: The name of the logger that will be used.
"""
self.logger = logging.getLogger(name)
self.buffer = ''
if level == logging.DEBUG:
self.do_write = self.logger.debug
elif level == logging.INFO:
self.do_write = self.logger.info
elif level == logging.WARNING:
self.do_write = self.logger.warning
elif level == logging.ERROR:
self.do_write = self.logger.error
else:
raise Exception('Unknown logging level: {}'.format(level))
def flush(self):
# Defined to match the interface expected by pexpect.
return self
def close(self):
if self.buffer:
self.logger.debug(self.buffer)
self.buffer = ''
return self
def __del__(self):
# Ensure we don't lose bufferd output
self.close()
class LogWriter(BaseLogWriter):
def write(self, data):
data = data.replace('\r\n', '\n').replace('\r', '\n')
if '\n' in data:
parts = data.split('\n')
parts[0] = self.buffer + parts[0]
for part in parts[:-1]:
self.do_write(part)
self.buffer = parts[-1]
else:
self.buffer += data
return self
class LineLogWriter(BaseLogWriter):
def write(self, data):
self.do_write(data)
class StreamLogger(threading.Thread):
"""
Logs output from a stream in a thread.
"""
def __init__(self, name, stream, level=logging.DEBUG, klass=LogWriter):
super(StreamLogger, self).__init__()
self.writer = klass(name, level)
self.stream = stream
self.daemon = True
def run(self):
line = self.stream.readline()
while line:
self.writer.write(line.rstrip('\n'))
line = self.stream.readline()
self.writer.close()

362
wa/framework/output.py Normal file
View File

@@ -0,0 +1,362 @@
import os
import shutil
import logging
import uuid
from copy import copy
from datetime import datetime, timedelta
from wa.framework import signal, log
from wa.framework.configuration.core import merge_config_values
from wa.utils import serializer
from wa.utils.misc import enum_metaclass, ensure_directory_exists as _d
from wa.utils.types import numeric
class Status(object):
__metaclass__ = enum_metaclass('values', return_name=True)
values = [
'NEW',
'PENDING',
'RUNNING',
'COMPLETE',
'OK',
'OKISH',
'NONCRITICAL',
'PARTIAL',
'FAILED',
'ABORTED',
'SKIPPED',
'UNKNOWN',
]
class WAOutput(object):
basename = '.wa-output'
@classmethod
def load(cls, source):
if os.path.isfile(source):
pod = serializer.load(source)
elif os.path.isdir(source):
pod = serializer.load(os.path.join(source, cls.basename))
else:
message = 'Cannot load {} from {}'
raise ValueError(message.format(cls.__name__, source))
return cls.from_pod(pod)
@classmethod
def from_pod(cls, pod):
instance = cls(pod['output_directory'])
instance.status = pod['status']
instance.metrics = [Metric.from_pod(m) for m in pod['metrics']]
instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']]
instance.events = [RunEvent.from_pod(e) for e in pod['events']]
instance.classifiers = pod['classifiers']
return instance
def __init__(self, output_directory):
self.logger = logging.getLogger('output')
self.output_directory = output_directory
self.status = Status.UNKNOWN
self.classifiers = {}
self.metrics = []
self.artifacts = []
self.events = []
def initialize(self, overwrite=False):
if os.path.exists(self.output_directory):
if not overwrite:
raise RuntimeError('"{}" already exists.'.format(self.output_directory))
self.logger.info('Removing existing output directory.')
shutil.rmtree(self.output_directory)
self.logger.debug('Creating output directory {}'.format(self.output_directory))
os.makedirs(self.output_directory)
def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
classifiers = merge_config_values(self.classifiers, classifiers or {})
self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
def get_path(self, subpath):
return os.path.join(self.output_directory, subpath)
def to_pod(self):
return {
'output_directory': self.output_directory,
'status': self.status,
'metrics': [m.to_pod() for m in self.metrics],
'artifacts': [a.to_pod() for a in self.artifacts],
'events': [e.to_pod() for e in self.events],
'classifiers': copy(self.classifiers),
}
def persist(self):
statefile = os.path.join(self.output_directory, self.basename)
with open(statefile, 'wb') as wfh:
serializer.dump(self, wfh)
class RunInfo(object):
default_name_format = 'wa-run-%y%m%d-%H%M%S'
def __init__(self, project=None, project_stage=None, name=None):
self.uuid = uuid.uuid4()
self.project = project
self.project_stage = project_stage
self.name = name or datetime.now().strftime(self.default_name_format)
self.start_time = None
self.end_time = None
self.duration = None
@staticmethod
def from_pod(pod):
instance = RunInfo()
instance.uuid = uuid.UUID(pod['uuid'])
instance.project = pod['project']
instance.project_stage = pod['project_stage']
instance.name = pod['name']
instance.start_time = pod['start_time']
instance.end_time = pod['end_time']
instance.duration = timedelta(seconds=pod['duration'])
return instance
def to_pod(self):
d = copy(self.__dict__)
d['uuid'] = str(self.uuid)
d['duration'] = self.duration.days * 3600 * 24 + self.duration.seconds
return d
class RunOutput(WAOutput):
@property
def info_directory(self):
return _d(os.path.join(self.output_directory, '_info'))
@property
def config_directory(self):
return _d(os.path.join(self.output_directory, '_config'))
@property
def failed_directory(self):
return _d(os.path.join(self.output_directory, '_failed'))
@property
def log_file(self):
return os.path.join(self.output_directory, 'run.log')
@classmethod
def from_pod(cls, pod):
instance = WAOutput.from_pod(pod)
instance.info = RunInfo.from_pod(pod['info'])
instance.jobs = [JobOutput.from_pod(i) for i in pod['jobs']]
instance.failed = [JobOutput.from_pod(i) for i in pod['failed']]
return instance
def __init__(self, output_directory):
super(RunOutput, self).__init__(output_directory)
self.logger = logging.getLogger('output')
self.info = RunInfo()
self.jobs = []
self.failed = []
def initialize(self, overwrite=False):
super(RunOutput, self).initialize(overwrite)
log.add_file(self.log_file)
self.add_artifact('runlog', self.log_file, 'log')
def create_job_output(self, id):
outdir = os.path.join(self.output_directory, id)
job_output = JobOutput(outdir)
self.jobs.append(job_output)
return job_output
def move_failed(self, job_output):
basename = os.path.basename(job_output.output_directory)
i = 1
dest = os.path.join(self.failed_directory, basename + '-{}'.format(i))
while os.path.exists(dest):
i += 1
dest = '{}-{}'.format(dest[:-2], i)
shutil.move(job_output.output_directory, dest)
def to_pod(self):
pod = super(RunOutput, self).to_pod()
pod['info'] = self.info.to_pod()
pod['jobs'] = [i.to_pod() for i in self.jobs]
pod['failed'] = [i.to_pod() for i in self.failed]
return pod
class JobOutput(WAOutput):
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
class Artifact(object):
"""
This is an artifact generated during execution/post-processing of a workload.
Unlike metrics, this represents an actual artifact, such as a file, generated.
This may be "result", such as trace, or it could be "meta data" such as logs.
These are distinguished using the ``kind`` attribute, which also helps WA decide
how it should be handled. Currently supported kinds are:
:log: A log file. Not part of "results" as such but contains information about the
run/workload execution that be useful for diagnostics/meta analysis.
:meta: A file containing metadata. This is not part of "results", but contains
information that may be necessary to reproduce the results (contrast with
``log`` artifacts which are *not* necessary).
:data: This file contains new data, not available otherwise and should be considered
part of the "results" generated by WA. Most traces would fall into this category.
:export: Exported version of results or some other artifact. This signifies that
this artifact does not contain any new data that is not available
elsewhere and that it may be safely discarded without losing information.
:raw: Signifies that this is a raw dump/log that is normally processed to extract
useful information and is then discarded. In a sense, it is the opposite of
``export``, but in general may also be discarded.
.. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on
how important it is to preserve this file, e.g. when archiving, vs
how much space it takes up. Unlike ``export`` artifacts which are
(almost) always ignored by other exporters as that would never result
in data loss, ``raw`` files *may* be processed by exporters if they
decided that the risk of losing potentially (though unlikely) useful
data is greater than the time/space cost of handling the artifact (e.g.
a database uploader may choose to ignore ``raw`` artifacts, where as a
network filer archiver may choose to archive them).
.. note: The kind parameter is intended to represent the logical function of a particular
artifact, not it's intended means of processing -- this is left entirely up to the
result processors.
"""
RUN = 'run'
ITERATION = 'iteration'
valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
@staticmethod
def from_pod(pod):
return Artifact(**pod)
def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
""""
:param name: Name that uniquely identifies this artifact.
:param path: The *relative* path of the artifact. Depending on the ``level``
must be either relative to the run or iteration output directory.
Note: this path *must* be delimited using ``/`` irrespective of the
operating system.
:param kind: The type of the artifact this is (e.g. log file, result, etc.) this
will be used a hit to result processors. This must be one of ``'log'``,
``'meta'``, ``'data'``, ``'export'``, ``'raw'``.
:param level: The level at which the artifact will be generated. Must be either
``'iteration'`` or ``'run'``.
:param mandatory: Boolean value indicating whether this artifact must be present
at the end of result processing for its level.
:param description: A free-form description of what this artifact is.
"""
if kind not in self.valid_kinds:
raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds))
self.name = name
self.path = path.replace('/', os.sep) if path is not None else path
self.kind = kind
self.level = level
self.mandatory = mandatory
self.description = description
def exists(self, context):
"""Returns ``True`` if artifact exists within the specified context, and
``False`` otherwise."""
fullpath = os.path.join(context.output_directory, self.path)
return os.path.exists(fullpath)
def to_pod(self):
return copy(self.__dict__)
class RunEvent(object):
"""
An event that occured during a run.
"""
@staticmethod
def from_pod(pod):
instance = RunEvent(pod['message'])
instance.timestamp = pod['timestamp']
return instance
def __init__(self, message):
self.timestamp = datetime.utcnow()
self.message = message
def to_pod(self):
return copy(self.__dict__)
def __str__(self):
return '{} {}'.format(self.timestamp, self.message)
__repr__ = __str__
class Metric(object):
"""
This is a single metric collected from executing a workload.
:param name: the name of the metric. Uniquely identifies the metric
within the results.
:param value: The numerical value of the metric for this execution of
a workload. This can be either an int or a float.
:param units: Units for the collected value. Can be None if the value
has no units (e.g. it's a count or a standardised score).
:param lower_is_better: Boolean flag indicating where lower values are
better than higher ones. Defaults to False.
:param classifiers: A set of key-value pairs to further classify this metric
beyond current iteration (e.g. this can be used to identify
sub-tests).
"""
@staticmethod
def from_pod(pod):
return Metric(**pod)
def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None):
self.name = name
self.value = numeric(value)
self.units = units
self.lower_is_better = lower_is_better
self.classifiers = classifiers or {}
def to_pod(self):
return copy(self.__dict__)
def __str__(self):
result = '{}: {}'.format(self.name, self.value)
if self.units:
result += ' ' + self.units
result += ' ({})'.format('-' if self.lower_is_better else '+')
return '<{}>'.format(result)
__repr__ = __str__
def _check_artifact_path(path, rootpath):
if path.startswith(rootpath):
return os.path.abspath(path)
rootpath = os.path.abspath(rootpath)
full_path = os.path.join(rootpath, path)
if not os.path.isfile(full_path):
raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
return full_path

734
wa/framework/plugin.py Normal file
View File

@@ -0,0 +1,734 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import os
import sys
import inspect
import imp
import string
import logging
from copy import copy
from itertools import chain
from collections import OrderedDict, defaultdict
from wa.framework import log
from wa.framework.exception import ValidationError, ConfigError, NotFoundError, PluginLoaderError
from wa.framework.configuration.core import ConfigurationPoint, ConfigurationPointCollection
from wa.utils.misc import isiterable, ensure_directory_exists as _d, get_article
from wa.utils.misc import walk_modules, get_article
from wa.utils.types import identifier, integer, boolean, caseless_string
class Parameter(ConfigurationPoint):
is_runtime = False
def __init__(self, name,
kind=None,
mandatory=None,
default=None,
override=False,
allowed_values=None,
description=None,
constraint=None,
convert_types=True,
global_alias=None,
reconfigurable=True):
"""
:param global_alias: This is an alternative alias for this parameter,
unlike the name, this alias will not be
namespaced under the owning extension's name
(hence the global part). This is introduced
primarily for backward compatibility -- so that
old extension settings names still work. This
should not be used for new parameters.
:param reconfigurable: This indicated whether this parameter may be
reconfigured during the run (e.g. between different
iterations). This determines where in run configruation
this parameter may appear.
For other parameters, see docstring for
``wa.framework.configuration.core.ConfigurationPoint``
"""
super(Parameter, self).__init__(name, kind, mandatory,
default, override, allowed_values,
description, constraint,
convert_types)
self.global_alias = global_alias
self.reconfigurable = reconfigurable
def __repr__(self):
d = copy(self.__dict__)
del d['description']
return 'Param({})'.format(d)
class PluginAliasCollection(object):
"""
Accumulator for extension attribute objects (such as Parameters). This will
replace any class member list accumulating such attributes through the magic of
metaprogramming\ [*]_.
.. [*] which is totally safe and not going backfire in any way...
"""
@property
def values(self):
return self._attrs.values()
def __init__(self):
self._attrs = OrderedDict()
def add(self, p):
p = self._to_attrcls(p)
if p.name in self._attrs:
if p.override:
newp = copy(self._attrs[p.name])
for a, v in p.__dict__.iteritems():
if v is not None:
setattr(newp, a, v)
self._attrs[p.name] = newp
else:
# Duplicate attribute condition is check elsewhere.
pass
else:
self._attrs[p.name] = p
append = add
def __str__(self):
return 'AC({})'.format(map(str, self._attrs.values()))
__repr__ = __str__
def _to_attrcls(self, p):
if isinstance(p, tuple) or isinstance(p, list):
# must be in the form (name, {param: value, ...})
p = Alias(p[1], **p[1])
elif not isinstance(p, Alias):
raise ValueError('Invalid parameter value: {}'.format(p))
if p.name in self._attrs:
raise ValueError('Attribute {} has already been defined.'.format(p.name))
return p
def __iadd__(self, other):
for p in other:
self.add(p)
return self
def __iter__(self):
return iter(self.values)
def __contains__(self, p):
return p in self._attrs
def __getitem__(self, i):
return self._attrs[i]
def __len__(self):
return len(self._attrs)
class Alias(object):
"""
This represents a configuration alias for an extension, mapping an alternative name to
a set of parameter values, effectively providing an alternative set of default values.
"""
def __init__(self, name, **kwargs):
self.name = name
self.parameters = kwargs
self.plugin_name = None # gets set by the MetaClass
def validate(self, plugin):
plugin_params = set(p.name for p in plugin.parameters)
for param in self.parameters:
if param not in plugin_params:
# Raising config error because aliases might have come through
# the config.
msg = 'Parameter {} (defined in alias {}) is invalid for {}'
raise ValueError(msg.format(param, self.name, plugin.name))
class PluginMeta(type):
"""
This basically adds some magic to extensions to make implementing new extensions, such as
workloads less complicated.
It ensures that certain class attributes (specified by the ``to_propagate``
attribute of the metaclass) get propagated down the inheritance hierarchy. The assumption
is that the values of the attributes specified in the class are iterable; if that is not met,
Bad Things(tm) will happen.
This also provides "virtual" method implementations. The ``super``'s version of these
methods (specified by the ``virtual_methods`` attribute of the metaclass) will be
automatically invoked.
"""
to_propagate = [
('parameters', ConfigurationPointCollection),
]
#virtual_methods = ['validate', 'initialize', 'finalize']
virtual_methods = []
def __new__(mcs, clsname, bases, attrs):
mcs._propagate_attributes(bases, attrs)
cls = type.__new__(mcs, clsname, bases, attrs)
mcs._setup_aliases(cls)
mcs._implement_virtual(cls, bases)
return cls
@classmethod
def _propagate_attributes(mcs, bases, attrs):
"""
For attributes specified by to_propagate, their values will be a union of
that specified for cls and it's bases (cls values overriding those of bases
in case of conflicts).
"""
for prop_attr, attr_collector_cls in mcs.to_propagate:
should_propagate = False
propagated = attr_collector_cls()
for base in bases:
if hasattr(base, prop_attr):
propagated += getattr(base, prop_attr) or []
should_propagate = True
if prop_attr in attrs:
propagated += attrs[prop_attr] or []
should_propagate = True
if should_propagate:
attrs[prop_attr] = propagated
@classmethod
def _setup_aliases(mcs, cls):
if hasattr(cls, 'aliases'):
aliases, cls.aliases = cls.aliases, PluginAliasCollection()
for alias in aliases:
if isinstance(alias, basestring):
alias = Alias(alias)
alias.validate(cls)
alias.plugin_name = cls.name
cls.aliases.add(alias)
@classmethod
def _implement_virtual(mcs, cls, bases):
"""
This implements automatic method propagation to the bases, so
that you don't have to do something like
super(cls, self).vmname()
This also ensures that the methods that have beend identified as
"globally virtual" are executed exactly once per WA execution, even if
invoked through instances of different subclasses
"""
methods = {}
called_globals = set()
for vmname in mcs.virtual_methods:
clsmethod = getattr(cls, vmname, None)
if clsmethod:
basemethods = [getattr(b, vmname) for b in bases if hasattr(b, vmname)]
methods[vmname] = [bm for bm in basemethods if bm != clsmethod]
methods[vmname].append(clsmethod)
def generate_method_wrapper(vname): # pylint: disable=unused-argument
# this creates a closure with the method name so that it
# does not need to be passed to the wrapper as an argument,
# leaving the wrapper to accept exactly the same set of
# arguments as the method it is wrapping.
name__ = vmname # pylint: disable=cell-var-from-loop
def wrapper(self, *args, **kwargs):
for dm in methods[name__]:
dm(self, *args, **kwargs)
return wrapper
setattr(cls, vmname, generate_method_wrapper(vmname))
class Plugin(object):
"""
Base class for all WA plugins.
A plugin extends the functionality of WA in some way. Plugins are discovered
and loaded dynamically by the plugin loader upon invocation of WA scripts.
Adding an extension is a matter of placing a class that implements an appropriate
interface somewhere it would be discovered by the loader. That "somewhere" is
typically one of the plugin subdirectories under ``~/.workload_automation/``.
"""
__metaclass__ = PluginMeta
name = None
kind = None
parameters = []
aliases = []
@classmethod
def get_default_config(cls):
return {p.name: p.default for p in cls.parameters}
@classmethod
def get_parameter(cls, name):
for param in cls.parameters:
if param.name == name or name in param.aliases:
return param
def __init__(self, **kwargs):
self.logger = logging.getLogger(self.name)
self.capabilities = getattr(self.__class__, 'capabilities', [])
self.update_config(**kwargs)
def get_config(self):
"""
Returns current configuration (i.e. parameter values) of this plugin.
"""
config = {}
for param in self.parameters:
config[param.name] = getattr(self, param.name, None)
return config
def update_config(self, **kwargs):
"""
Updates current configuration (i.e. parameter values) of this plugin.
"""
for param in self.parameters:
param.set_value(self, kwargs.get(param.name))
for key in kwargs:
if key not in self.parameters:
message = 'Unexpected parameter "{}" for {}'
raise ConfigError(message.format(key, self.name))
def validate(self):
"""
Perform basic validation to ensure that this extension is capable of running.
This is intended as an early check to ensure the extension has not been mis-configured,
rather than a comprehensive check (that may, e.g., require access to the execution
context).
This method may also be used to enforce (i.e. set as well as check) inter-parameter
constraints for the extension (e.g. if valid values for parameter A depend on the value
of parameter B -- something that is not possible to enforce using ``Parameter``\ 's
``constraint`` attribute.
"""
if self.name is None:
raise ValidationError('name not set for {}'.format(self.__class__.__name__))
if self.kind is None:
raise ValidationError('kind not set for {}'.format(self.name))
for param in self.parameters:
param.validate(self)
def initialize(self, context):
pass
def finalize(self, context):
pass
def has(self, capability):
"""Check if this extension has the specified capability. The alternative method ``can`` is
identical to this. Which to use is up to the caller depending on what makes semantic sense
in the context of the capability, e.g. ``can('hard_reset')`` vs ``has('active_cooling')``."""
return capability in self.capabilities
can = has
class TargetedPluginMeta(PluginMeta):
to_propagate = PluginMeta.to_propagate + [
('supported_targets', list),
('supported_platforms', list),
]
virtual_methods = PluginMeta.virtual_methods + [
'validate_on_target',
]
class TargetedPlugin(Plugin):
"""
A plugin that operates on a target device. These kinds of plugins are created
with a ``devlib.Target`` instance and may only support certain kinds of targets.
"""
__metaclass__ = TargetedPluginMeta
supported_targets = []
supported_platforms = []
def __init__(self, target, **kwargs):
super(TargetedPlugin, self).__init__(**kwargs)
if self.supported_targets and target.os not in self.supported_targets:
raise TargetError('Plugin {} does not support target {}'.format(self.name, target.name))
if self.supported_platforms and target.platform.name not in self.supported_platforms:
raise TargetError('Plugin {} does not support platform {}'.format(self.name, target.platform))
self.target = target
def validate_on_target(self):
"""
This will be invoked once at the beginning of a run after a ``Target``
has been connected and initialized. This is intended for validation
that cannot be performed offline but does not depend on ephemeral
state that is likely to change during the course of a run (validation
against such states should be done during setup of a particular
execution.
"""
pass
class GlobalParameterAlias(object):
"""
Represents a "global alias" for an plugin parameter. A global alias
is specified at the top-level of config rather namespaced under an plugin
name.
Multiple plugins may have parameters with the same global_alias if they are
part of the same inheritance hierarchy and one parameter is an override of the
other. This class keeps track of all such cases in its plugins dict.
"""
def __init__(self, name):
self.name = name
self.plugins = {}
def iteritems(self):
for ext in self.plugins.itervalues():
yield (self.get_param(ext), ext)
def get_param(self, ext):
for param in ext.parameters:
if param.global_alias == self.name:
return param
message = 'Plugin {} does not have a parameter with global alias {}'
raise ValueError(message.format(ext.name, self.name))
def update(self, other_ext):
self._validate_ext(other_ext)
self.plugins[other_ext.name] = other_ext
def _validate_ext(self, other_ext):
other_param = self.get_param(other_ext)
for param, ext in self.iteritems():
if ((not (issubclass(ext, other_ext) or issubclass(other_ext, ext))) and
other_param.kind != param.kind):
message = 'Duplicate global alias {} declared in {} and {} plugins with different types'
raise PluginLoaderError(message.format(self.name, ext.name, other_ext.name))
if not param.name == other_param.name:
message = 'Two params {} in {} and {} in {} both declare global alias {}'
raise PluginLoaderError(message.format(param.name, ext.name,
other_param.name, other_ext.name, self.name))
def __str__(self):
text = 'GlobalAlias({} => {})'
extlist = ', '.join(['{}.{}'.format(e.name, p.name) for p, e in self.iteritems()])
return text.format(self.name, extlist)
MODNAME_TRANS = string.maketrans(':/\\.', '____')
class PluginLoader(object):
"""
Discovers, enumerates and loads available devices, configs, etc.
The loader will attempt to discover things on construction by looking
in predetermined set of locations defined by default_paths. Optionally,
additional locations may specified through paths parameter that must
be a list of additional Python module paths (i.e. dot-delimited).
"""
def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False):
"""
params::
:packages: List of packages to load plugins from.
:paths: List of paths to be searched for Python modules containing
WA plugins.
:ignore_paths: List of paths to ignore when search for WA plugins (these would
typically be subdirectories of one or more locations listed in
``paths`` parameter.
:keep_going: Specifies whether to keep going if an error occurs while loading
plugins.
"""
self.logger = logging.getLogger('pluginloader')
self.keep_going = keep_going
self.packages = packages or []
self.paths = paths or []
self.ignore_paths = ignore_paths or []
self.plugins = {}
self.kind_map = defaultdict(dict)
self.aliases = {}
self.global_param_aliases = {}
self._discover_from_packages(self.packages)
self._discover_from_paths(self.paths, self.ignore_paths)
def update(self, packages=None, paths=None, ignore_paths=None):
""" Load plugins from the specified paths/packages
without clearing or reloading existing plugin. """
if packages:
self.packages.extend(packages)
self._discover_from_packages(packages)
if paths:
self.paths.extend(paths)
self.ignore_paths.extend(ignore_paths or [])
self._discover_from_paths(paths, ignore_paths or [])
def clear(self):
""" Clear all discovered items. """
self.plugins = []
self.kind_map.clear()
def reload(self):
""" Clear all discovered items and re-run the discovery. """
self.clear()
self._discover_from_packages(self.packages)
self._discover_from_paths(self.paths, self.ignore_paths)
def get_plugin_class(self, name, kind=None):
"""
Return the class for the specified plugin if found or raises ``ValueError``.
"""
name, _ = self.resolve_alias(name)
if kind is None:
try:
return self.plugins[name]
except KeyError:
raise NotFoundError('Plugins {} not found.'.format(name))
if kind not in self.kind_map:
raise ValueError('Unknown plugin type: {}'.format(kind))
store = self.kind_map[kind]
if name not in store:
raise NotFoundError('Plugins {} is not {} {}.'.format(name, get_article(kind), kind))
return store[name]
def get_plugin(self, name, kind=None, *args, **kwargs):
"""
Return plugin of the specified kind with the specified name. Any
additional parameters will be passed to the plugin's __init__.
"""
name, base_kwargs = self.resolve_alias(name)
kwargs = OrderedDict(chain(base_kwargs.iteritems(), kwargs.iteritems()))
cls = self.get_plugin_class(name, kind)
plugin = cls(*args, **kwargs)
return plugin
def get_default_config(self, name):
"""
Returns the default configuration for the specified plugin name. The
name may be an alias, in which case, the returned config will be
augmented with appropriate alias overrides.
"""
real_name, alias_config = self.resolve_alias(name)
base_default_config = self.get_plugin_class(real_name).get_default_config()
return merge_dicts(base_default_config, alias_config, list_duplicates='last', dict_type=OrderedDict)
def list_plugins(self, kind=None):
"""
List discovered plugin classes. Optionally, only list plugins of a
particular type.
"""
if kind is None:
return self.plugins.values()
if kind not in self.kind_map:
raise ValueError('Unknown plugin type: {}'.format(kind))
return self.kind_map[kind].values()
def has_plugin(self, name, kind=None):
"""
Returns ``True`` if an plugins with the specified ``name`` has been
discovered by the loader. If ``kind`` was specified, only returns ``True``
if the plugin has been found, *and* it is of the specified kind.
"""
try:
self.get_plugin_class(name, kind)
return True
except NotFoundError:
return False
def resolve_alias(self, alias_name):
"""
Try to resolve the specified name as an plugin alias. Returns a
two-tuple, the first value of which is actual plugin name, and the
iisecond is a dict of parameter values for this alias. If the name passed
is already an plugin name, then the result is ``(alias_name, {})``.
"""
alias_name = identifier(alias_name.lower())
if alias_name in self.plugins:
return (alias_name, {})
if alias_name in self.aliases:
alias = self.aliases[alias_name]
return (alias.plugin_name, alias.parameters)
raise NotFoundError('Could not find plugin or alias "{}"'.format(alias_name))
# Internal methods.
def __getattr__(self, name):
"""
This resolves methods for specific plugins types based on corresponding
generic plugin methods. So it's possible to say things like ::
loader.get_device('foo')
instead of ::
loader.get_plugin('foo', kind='device')
"""
if name.startswith('get_'):
name = name.replace('get_', '', 1)
if name in self.kind_map:
def __wrapper(pname, *args, **kwargs):
return self.get_plugin(pname, name, *args, **kwargs)
return __wrapper
if name.startswith('list_'):
name = name.replace('list_', '', 1).rstrip('s')
if name in self.kind_map:
def __wrapper(*args, **kwargs):
return self.list_plugins(name, *args, **kwargs)
return __wrapper
if name.startswith('has_'):
name = name.replace('has_', '', 1)
if name in self.kind_map:
def __wrapper(pname, *args, **kwargs):
return self.has_plugin(pname, name, *args, **kwargs)
return __wrapper
raise AttributeError(name)
def _discover_from_packages(self, packages):
self.logger.debug('Discovering plugins in packages')
try:
for package in packages:
for module in walk_modules(package):
self._discover_in_module(module)
except ImportError as e:
source = getattr(e, 'path', package)
message = 'Problem loading plugins from {}: {}'
raise PluginLoaderError(message.format(source, e.message))
def _discover_from_paths(self, paths, ignore_paths):
paths = paths or []
ignore_paths = ignore_paths or []
self.logger.debug('Discovering plugins in paths')
for path in paths:
self.logger.debug('Checking path %s', path)
if os.path.isfile(path):
self._discover_from_file(path)
for root, _, files in os.walk(path, followlinks=True):
should_skip = False
for igpath in ignore_paths:
if root.startswith(igpath):
should_skip = True
break
if should_skip:
continue
for fname in files:
if not os.path.splitext(fname)[1].lower() == '.py':
continue
filepath = os.path.join(root, fname)
self._discover_from_file(filepath)
def _discover_from_file(self, filepath):
try:
modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS)
module = imp.load_source(modname, filepath)
self._discover_in_module(module)
except (SystemExit, ImportError), e:
if self.keep_going:
self.logger.warning('Failed to load {}'.format(filepath))
self.logger.warning('Got: {}'.format(e))
else:
raise PluginLoaderError('Failed to load {}'.format(filepath), sys.exc_info())
except Exception as e:
message = 'Problem loading plugins from {}: {}'
raise PluginLoaderError(message.format(filepath, e))
def _discover_in_module(self, module): # NOQA pylint: disable=too-many-branches
self.logger.debug('Checking module %s', module.__name__)
log.indent()
try:
for obj in vars(module).itervalues():
if inspect.isclass(obj):
if not issubclass(obj, Plugin):
continue
if not obj.kind:
message = 'Skipping plugin {} as it does not define a kind'
self.logger.debug(message.format(obj.__name__))
continue
if not obj.name:
message = 'Skipping {} {} as it does not define a name'
self.logger.debug(message.format(obj.kind, obj.__name__))
continue
try:
self._add_found_plugin(obj)
except PluginLoaderError as e:
if self.keep_going:
self.logger.warning(e)
else:
raise e
finally:
log.dedent()
def _add_found_plugin(self, obj):
"""
:obj: Found plugin class
:ext: matching plugin item.
"""
self.logger.debug('Adding %s %s', obj.kind, obj.name)
key = identifier(obj.name.lower())
if key in self.plugins or key in self.aliases:
raise PluginLoaderError('{} "{}" already exists.'.format(obj.kind, obj.name))
# Plugins are tracked both, in a common plugins
# dict, and in per-plugin kind dict (as retrieving
# plugins by kind is a common use case.
self.plugins[key] = obj
self.kind_map[obj.kind][key] = obj
for alias in obj.aliases:
alias_id = identifier(alias.name.lower())
if alias_id in self.plugins or alias_id in self.aliases:
raise PluginLoaderError('{} "{}" already exists.'.format(obj.kind, obj.name))
self.aliases[alias_id] = alias
# Update global aliases list. If a global alias is already in the list,
# then make sure this plugin is in the same parent/child hierarchy
# as the one already found.
for param in obj.parameters:
if param.global_alias:
if param.global_alias not in self.global_param_aliases:
ga = GlobalParameterAlias(param.global_alias)
ga.update(obj)
self.global_param_aliases[ga.name] = ga
else: # global alias already exists.
self.global_param_aliases[param.global_alias].update(obj)

View File

@@ -0,0 +1,69 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
class __LoaderWrapper(object):
def __init__(self):
self._loader = None
def reset(self):
# These imports cannot be done at top level, because of
# sys.modules manipulation below
from wa.framework.plugin import PluginLoader
from wa.framework.configuration.core import settings
self._loader = PluginLoader(settings.plugin_packages,
settings.plugin_paths,
settings.plugin_ignore_paths)
def update(self, packages=None, paths=None, ignore_paths=None):
if not self._loader: self.reset()
self._loader.update(packages, paths, ignore_paths)
def reload(self):
if not self._loader: self.reset()
self._loader.reload()
def list_plugins(self, kind=None):
if not self._loader: self.reset()
return self._loader.list_plugins(kind)
def has_plugin(self, name, kind=None):
if not self._loader: self.reset()
return self._loader.has_plugin(name, kind)
def get_plugin_class(self, name, kind=None):
if not self._loader: self.reset()
return _load.get_plugin_class(name, kind)
def get_plugin(self, name, kind=None, *args, **kwargs):
if not self._loader: self.reset()
return self._loader.get_plugin(name, kind=kind, *args, **kwargs)
def get_default_config(self, name):
if not self._loader: self.reset()
return self._loader.get_default_config(name)
def resolve_alias(self, name):
if not self._loader: self.reset()
return self._loader.resolve_alias(name)
def __getattr__(self, name):
if not self._loader: self.reset()
return getattr(self._loader, name)
sys.modules[__name__] = __LoaderWrapper()

711
wa/framework/resource.py Normal file
View File

@@ -0,0 +1,711 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import glob
import shutil
import inspect
import logging
from collections import defaultdict
from wa.framework import pluginloader
from wa.framework.plugin import Plugin, Parameter
from wa.framework.exception import ResourceError
from wa.framework.configuration import settings
from wa.utils.misc import ensure_directory_exists as _d
from wa.utils.types import boolean
from wa.utils.types import prioritylist
class GetterPriority(object):
"""
Enumerates standard ResourceGetter priorities. In general, getters should
register under one of these, rather than specifying other priority values.
:cached: The cached version of the resource. Look here first. This
priority also implies
that the resource at this location is a "cache" and is not
the only version of the resource, so it may be cleared without
losing access to the resource.
:preferred: Take this resource in favour of the environment resource.
:environment: Found somewhere under ~/.workload_automation/ or equivalent,
or from environment variables, external configuration
files, etc. These will override resource supplied with
the package.
:external_package: Resource provided by another package. :package:
Resource provided with the package. :remote:
Resource will be downloaded from a remote location
(such as an HTTP server or a samba share). Try this
only if no other getter was successful.
"""
cached = 20
preferred = 10
environment = 0
external_package = -5
package = -10
remote = -20
class Resource(object):
"""
Represents a resource that needs to be resolved. This can be pretty much
anything: a file, environment variable, a Python object, etc. The only
thing a resource *has* to have is an owner (which would normally be the
Workload/Instrument/Device/etc object that needs the resource). In
addition, a resource have any number of attributes to identify, but all of
them are resource type specific.
"""
name = None
def __init__(self, owner):
self.owner = owner
def delete(self, instance):
"""
Delete an instance of this resource type. This must be implemented
by the concrete subclasses based on what the resource looks like,
e.g. deleting a file or a directory tree, or removing an entry from
a database.
:note: Implementation should *not* contain any logic for deciding
whether or not a resource should be deleted, only the actual
deletion. The assumption is that if this method is invoked,
then the decision has already been made.
"""
raise NotImplementedError()
def __str__(self):
return '<{}\'s {}>'.format(self.owner, self.name)
class ResourceGetter(Plugin):
"""
Base class for implementing resolvers. Defines resolver
interface. Resolvers are responsible for discovering resources (such as
particular kinds of files) they know about based on the parameters that are
passed to them. Each resolver also has a dict of attributes that describe
it's operation, and may be used to determine which get invoked. There is
no pre-defined set of attributes and resolvers may define their own.
Class attributes:
:name: Name that uniquely identifies this getter. Must be set by any
concrete subclass.
:resource_type: Identifies resource type(s) that this getter can
handle. This must be either a string (for a single type)
or a list of strings for multiple resource types. This
must be set by any concrete subclass.
:priority: Priority with which this getter will be invoked. This should
be one of the standard priorities specified in
``GetterPriority`` enumeration. If not set, this will default
to ``GetterPriority.environment``.
"""
name = None
kind = 'resource_getter'
resource_type = None
priority = GetterPriority.environment
def __init__(self, resolver, **kwargs):
super(ResourceGetter, self).__init__(**kwargs)
self.resolver = resolver
def register(self):
"""
Registers with a resource resolver. Concrete implementations must
override this to invoke ``self.resolver.register()`` method to register
``self`` for specific resource types.
"""
if self.resource_type is None:
message = 'No resource type specified for {}'
raise ValueError(message.format(self.name))
elif isinstance(self.resource_type, list):
for rt in self.resource_type:
self.resolver.register(self, rt, self.priority)
else:
self.resolver.register(self, self.resource_type, self.priority)
def unregister(self):
"""Unregister from a resource resolver."""
if self.resource_type is None:
message = 'No resource type specified for {}'
raise ValueError(message.format(self.name))
elif isinstance(self.resource_type, list):
for rt in self.resource_type:
self.resolver.unregister(self, rt)
else:
self.resolver.unregister(self, self.resource_type)
def get(self, resource, **kwargs):
"""
This will get invoked by the resolver when attempting to resolve a
resource, passing in the resource to be resolved as the first
parameter. Any additional parameters would be specific to a particular
resource type.
This method will only be invoked for resource types that the getter has
registered for.
:param resource: an instance of :class:`wlauto.core.resource.Resource`.
:returns: Implementations of this method must return either the
discovered resource or ``None`` if the resource could not
be discovered.
"""
raise NotImplementedError()
def delete(self, resource, *args, **kwargs):
"""
Delete the resource if it is discovered. All arguments are passed to a
call to``self.get()``. If that call returns a resource, it is deleted.
:returns: ``True`` if the specified resource has been discovered
and deleted, and ``False`` otherwise.
"""
discovered = self.get(resource, *args, **kwargs)
if discovered:
resource.delete(discovered)
return True
else:
return False
def __str__(self):
return '<ResourceGetter {}>'.format(self.name)
class ResourceResolver(object):
"""
Discovers and registers getters, and then handles requests for
resources using registered getters.
"""
def __init__(self):
self.logger = logging.getLogger('resolver')
self.getters = defaultdict(prioritylist)
def load(self, loader=pluginloader):
"""
Discover getters under the specified source. The source could
be either a python package/module or a path.
"""
for rescls in loader.list_resource_getters():
getter = loader.get_resource_getter(rescls.name, resolver=self)
getter.register()
def get(self, resource, strict=True, *args, **kwargs):
"""
Uses registered getters to attempt to discover a resource of the specified
kind and matching the specified criteria. Returns path to the resource that
has been discovered. If a resource has not been discovered, this will raise
a ``ResourceError`` or, if ``strict`` has been set to ``False``, will return
``None``.
"""
self.logger.debug('Resolving {}'.format(resource))
for getter in self.getters[resource.name]:
self.logger.debug('Trying {}'.format(getter))
result = getter.get(resource, *args, **kwargs)
if result is not None:
self.logger.debug('Resource {} found using {}:'.format(resource, getter))
self.logger.debug('\t{}'.format(result))
return result
if strict:
raise ResourceError('{} could not be found'.format(resource))
self.logger.debug('Resource {} not found.'.format(resource))
return None
def register(self, getter, kind, priority=0):
"""
Register the specified resource getter as being able to discover a resource
of the specified kind with the specified priority.
This method would typically be invoked by a getter inside its __init__.
The idea being that getters register themselves for resources they know
they can discover.
*priorities*
getters that are registered with the highest priority will be invoked first. If
multiple getters are registered under the same priority, they will be invoked
in the order they were registered (i.e. in the order they were discovered). This is
essentially non-deterministic.
Generally getters that are more likely to find a resource, or would find a
"better" version of the resource should register with higher (positive) priorities.
Fall-back getters that should only be invoked if a resource is not found by usual
means should register with lower (negative) priorities.
"""
self.logger.debug('Registering {}'.format(getter.name))
self.getters[kind].add(getter, priority)
def unregister(self, getter, kind):
"""
Unregister a getter that has been registered earlier.
"""
self.logger.debug('Unregistering {}'.format(getter.name))
try:
self.getters[kind].remove(getter)
except ValueError:
raise ValueError('Resource getter {} is not installed.'.format(getter.name))
class __NullOwner(object):
"""Represents an owner for a resource not owned by anyone."""
name = 'noone'
dependencies_directory = settings.dependencies_directory
def __getattr__(self, name):
return None
def __str__(self):
return 'no-one'
__repr__ = __str__
NO_ONE = __NullOwner()
class FileResource(Resource):
"""
Base class for all resources that are a regular file in the
file system.
"""
def delete(self, instance):
os.remove(instance)
class File(FileResource):
name = 'file'
def __init__(self, owner, path, url=None):
super(File, self).__init__(owner)
self.path = path
self.url = url
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url)
class ExtensionAsset(File):
name = 'extension_asset'
def __init__(self, owner, path):
super(ExtensionAsset, self).__init__(
owner, os.path.join(owner.name, path))
class Executable(FileResource):
name = 'executable'
def __init__(self, owner, platform, filename):
super(Executable, self).__init__(owner)
self.platform = platform
self.filename = filename
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename)
class ReventFile(FileResource):
name = 'revent'
def __init__(self, owner, stage):
super(ReventFile, self).__init__(owner)
self.stage = stage
class JarFile(FileResource):
name = 'jar'
class ApkFile(FileResource):
name = 'apk'
class PackageFileGetter(ResourceGetter):
name = 'package_file'
description = """
Looks for exactly one file with the specified extension in the owner's
directory. If a version is specified on invocation of get, it will filter
the discovered file based on that version. Versions are treated as
case-insensitive.
"""
extension = None
def register(self):
self.resolver.register(self, self.extension, GetterPriority.package)
def get(self, resource, **kwargs):
resource_dir = os.path.dirname(
sys.modules[resource.owner.__module__].__file__)
version = kwargs.get('version')
return get_from_location_by_extension(resource, resource_dir, self.extension, version)
class EnvironmentFileGetter(ResourceGetter):
name = 'environment_file'
description = """
Looks for exactly one file with the specified extension in the owner's
directory. If a version is specified on invocation of get, it will filter
the discovered file based on that version. Versions are treated as
case-insensitive.
"""
extension = None
def register(self):
self.resolver.register(self, self.extension,
GetterPriority.environment)
def get(self, resource, **kwargs):
resource_dir = resource.owner.dependencies_directory
version = kwargs.get('version')
return get_from_location_by_extension(resource, resource_dir, self.extension, version)
class ReventGetter(ResourceGetter):
"""Implements logic for identifying revent files."""
def get_base_location(self, resource):
raise NotImplementedError()
def register(self):
self.resolver.register(self, 'revent', GetterPriority.package)
def get(self, resource, **kwargs):
filename = '.'.join([resource.owner.device.name,
resource.stage, 'revent']).lower()
location = _d(os.path.join(
self.get_base_location(resource), 'revent_files'))
for candidate in os.listdir(location):
if candidate.lower() == filename.lower():
return os.path.join(location, candidate)
class PackageApkGetter(PackageFileGetter):
name = 'package_apk'
extension = 'apk'
class PackageJarGetter(PackageFileGetter):
name = 'package_jar'
extension = 'jar'
class PackageReventGetter(ReventGetter):
name = 'package_revent'
def get_base_location(self, resource):
return _get_owner_path(resource)
class EnvironmentApkGetter(EnvironmentFileGetter):
name = 'environment_apk'
extension = 'apk'
class EnvironmentJarGetter(EnvironmentFileGetter):
name = 'environment_jar'
extension = 'jar'
class EnvironmentReventGetter(ReventGetter):
name = 'enviroment_revent'
def get_base_location(self, resource):
return resource.owner.dependencies_directory
class ExecutableGetter(ResourceGetter):
name = 'exe_getter'
resource_type = 'executable'
priority = GetterPriority.environment
def get(self, resource, **kwargs):
if settings.binaries_repository:
path = os.path.join(settings.binaries_repository,
resource.platform, resource.filename)
if os.path.isfile(path):
return path
class PackageExecutableGetter(ExecutableGetter):
name = 'package_exe_getter'
priority = GetterPriority.package
def get(self, resource, **kwargs):
path = os.path.join(_get_owner_path(resource), 'bin',
resource.platform, resource.filename)
if os.path.isfile(path):
return path
class EnvironmentExecutableGetter(ExecutableGetter):
name = 'env_exe_getter'
def get(self, resource, **kwargs):
paths = [
os.path.join(resource.owner.dependencies_directory, 'bin',
resource.platform, resource.filename),
os.path.join(settings.environment_root, 'bin',
resource.platform, resource.filename),
]
for path in paths:
if os.path.isfile(path):
return path
class DependencyFileGetter(ResourceGetter):
name = 'filer'
description = """
Gets resources from the specified mount point. Copies them the local dependencies
directory, and returns the path to the local copy.
"""
resource_type = 'file'
relative_path = '' # May be overridden by subclasses.
default_mount_point = '/'
priority = GetterPriority.remote
parameters = [
Parameter('mount_point', default='/', global_alias='filer_mount_point',
description='Local mount point for the remote filer.'),
]
def __init__(self, resolver, **kwargs):
super(DependencyFileGetter, self).__init__(resolver, **kwargs)
self.mount_point = settings.filer_mount_point or self.default_mount_point
def get(self, resource, **kwargs):
force = kwargs.get('force')
remote_path = os.path.join(
self.mount_point, self.relative_path, resource.path)
local_path = os.path.join(
resource.owner.dependencies_directory, os.path.basename(resource.path))
if not os.path.isfile(local_path) or force:
if not os.path.isfile(remote_path):
return None
self.logger.debug('Copying {} to {}'.format(
remote_path, local_path))
shutil.copy(remote_path, local_path)
return local_path
class PackageCommonDependencyGetter(ResourceGetter):
name = 'packaged_common_dependency'
resource_type = 'file'
priority = GetterPriority.package - 1 # check after owner-specific locations
def get(self, resource, **kwargs):
path = os.path.join(settings.package_directory,
'common', resource.path)
if os.path.exists(path):
return path
class EnvironmentCommonDependencyGetter(ResourceGetter):
name = 'environment_common_dependency'
resource_type = 'file'
# check after owner-specific locations
priority = GetterPriority.environment - 1
def get(self, resource, **kwargs):
path = os.path.join(settings.dependencies_directory,
os.path.basename(resource.path))
if os.path.exists(path):
return path
class PackageDependencyGetter(ResourceGetter):
name = 'packaged_dependency'
resource_type = 'file'
priority = GetterPriority.package
def get(self, resource, **kwargs):
owner_path = inspect.getfile(resource.owner.__class__)
path = os.path.join(os.path.dirname(owner_path), resource.path)
if os.path.exists(path):
return path
class EnvironmentDependencyGetter(ResourceGetter):
name = 'environment_dependency'
resource_type = 'file'
priority = GetterPriority.environment
def get(self, resource, **kwargs):
path = os.path.join(resource.owner.dependencies_directory,
os.path.basename(resource.path))
if os.path.exists(path):
return path
class ExtensionAssetGetter(DependencyFileGetter):
name = 'extension_asset'
resource_type = 'extension_asset'
relative_path = 'workload_automation/assets'
class RemoteFilerGetter(ResourceGetter):
name = 'filer_assets'
description = """
Finds resources on a (locally mounted) remote filer and caches them locally.
This assumes that the filer is mounted on the local machine (e.g. as a samba share).
"""
priority = GetterPriority.remote
resource_type = ['apk', 'file', 'jar', 'revent']
parameters = [
Parameter('remote_path', global_alias='remote_assets_path', default='',
description="""
Path, on the local system, where the assets are located.
"""),
Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets',
description="""
If ``True``, will always attempt to fetch assets from the
remote, even if a local cached copy is available.
"""),
]
def get(self, resource, **kwargs):
version = kwargs.get('version')
if resource.owner:
remote_path = os.path.join(self.remote_path, resource.owner.name)
local_path = os.path.join(
settings.environment_root, resource.owner.dependencies_directory)
return self.try_get_resource(resource, version, remote_path, local_path)
else:
result = None
for entry in os.listdir(remote_path):
remote_path = os.path.join(self.remote_path, entry)
local_path = os.path.join(
settings.environment_root, settings.dependencies_directory, entry)
result = self.try_get_resource(
resource, version, remote_path, local_path)
if result:
break
return result
def try_get_resource(self, resource, version, remote_path, local_path):
if not self.always_fetch:
result = self.get_from(resource, version, local_path)
if result:
return result
if remote_path:
# Didn't find it cached locally; now check the remoted
result = self.get_from(resource, version, remote_path)
if not result:
return result
else: # remote path is not set
return None
# Found it remotely, cache locally, then return it
local_full_path = os.path.join(
_d(local_path), os.path.basename(result))
self.logger.debug('cp {} {}'.format(result, local_full_path))
shutil.copy(result, local_full_path)
return local_full_path
def get_from(self, resource, version, location): # pylint: disable=no-self-use
if resource.name in ['apk', 'jar']:
return get_from_location_by_extension(resource, location, resource.name, version)
elif resource.name == 'file':
filepath = os.path.join(location, resource.path)
if os.path.exists(filepath):
return filepath
elif resource.name == 'revent':
filename = '.'.join(
[resource.owner.device.name, resource.stage, 'revent']).lower()
alternate_location = os.path.join(location, 'revent_files')
# There tends to be some confusion as to where revent files should
# be placed. This looks both in the extension's directory, and in
# 'revent_files' subdirectory under it, if it exists.
if os.path.isdir(alternate_location):
for candidate in os.listdir(alternate_location):
if candidate.lower() == filename.lower():
return os.path.join(alternate_location, candidate)
if os.path.isdir(location):
for candidate in os.listdir(location):
if candidate.lower() == filename.lower():
return os.path.join(location, candidate)
else:
message = 'Unexpected resource type: {}'.format(resource.name)
raise ValueError(message)
# Utility functions
def get_from_location_by_extension(resource, location, extension, version=None):
found_files = glob.glob(os.path.join(location, '*.{}'.format(extension)))
if version:
found_files = [ff for ff in found_files
if version.lower() in os.path.basename(ff).lower()]
if len(found_files) == 1:
return found_files[0]
elif not found_files:
return None
else:
raise ResourceError('More than one .{} found in {} for {}.'.format(extension,
location,
resource.owner.name))
def _get_owner_path(resource):
if resource.owner is NO_ONE:
return os.path.join(os.path.dirname(__base_filepath), 'common')
else:
return os.path.dirname(sys.modules[resource.owner.__module__].__file__)

355
wa/framework/run.py Normal file
View File

@@ -0,0 +1,355 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
import logging
from copy import copy
from datetime import datetime, timedelta
from collections import OrderedDict
from wa.framework import signal, pluginloader, log
from wa.framework.plugin import Plugin
from wa.framework.output import Status
from wa.framework.resource import ResourceResolver
from wa.framework.exception import JobError
from wa.utils import counter
from wa.utils.serializer import json
from wa.utils.misc import ensure_directory_exists as _d
from wa.utils.types import TreeNode, caseless_string
class JobActor(object):
def get_config(self):
return {}
def initialize(self, context):
pass
def run(self):
pass
def finalize(self):
pass
def restart(self):
pass
def complete(self):
pass
class RunnerJob(object):
@property
def status(self):
return self.output.status
@status.setter
def status(self, value):
self.output.status = value
@property
def should_retry(self):
return self.attempt <= self.max_retries
def __init__(self, id, actor, output, max_retries):
self.id = id
self.actor = actor
self.output = output
self.max_retries = max_retries
self.status = Status.NEW
self.attempt = 0
def initialize(self, context):
self.actor.initialize(context)
self.status = Status.PENDING
def run(self):
self.status = Status.RUNNING
self.attempt += 1
self.output.config = self.actor.get_config()
self.output.initialize()
self.actor.run()
self.status = Status.COMPLETE
def finalize(self):
self.actor.finalize()
def restart(self):
self.actor.restart()
def complete(self):
self.actor.complete()
__run_methods = set()
def runmethod(method):
"""
A method decorator that ensures that a method is invoked only once per run.
"""
def _method_wrapper(*args, **kwargs):
if method in __run_methods:
return
__run_methods.add(method)
ret = method(*args, **kwargs)
if ret is not None:
message = 'runmethod()\'s must return None; method "{}" returned "{}"'
raise RuntimeError(message.format(method, ret))
return _method_wrapper
def reset_runmethods():
global __run_methods
__run_methods = set()
class Runner(object):
@property
def info(self):
return self.output.info
@property
def status(self):
return self.output.status
@status.setter
def status(self, value):
self.output.status = value
@property
def jobs_pending(self):
return len(self.job_queue) > 0
@property
def current_job(self):
if self.job_queue:
return self.job_queue[0]
@property
def previous_job(self):
if self.completed_jobs:
return self.completed_jobs[-1]
@property
def next_job(self):
if len(self.job_queue) > 1:
return self.job_queue[1]
def __init__(self, output):
self.logger = logging.getLogger('runner')
self.output = output
self.context = RunContext(self)
self.status = Status.NEW
self.job_queue = []
self.completed_jobs = []
self._known_ids = set([])
def add_job(self, job_id, actor, max_retries=2):
job_id = caseless_string(job_id)
if job_id in self._known_ids:
raise JobError('Job with id "{}" already exists'.format(job_id))
output = self.output.create_job_output(job_id)
self.job_queue.append(RunnerJob(job_id, actor, output, max_retries))
self._known_ids.add(job_id)
def initialize(self):
self.logger.info('Initializing run')
self.start_time = datetime.now()
if not self.info.start_time:
self.info.start_time = self.start_time
self.info.duration = timedelta()
self.context.initialize()
for job in self.job_queue:
job.initialize(self.context)
self.persist_state()
self.logger.info('Run initialized')
def run(self):
self.status = Status.RUNNING
reset_runmethods()
signal.send(signal.RUN_STARTED, self, self.context)
self.initialize()
signal.send(signal.RUN_INITIALIZED, self, self.context)
self.run_jobs()
signal.send(signal.RUN_COMPLETED, self, self.context)
self.finalize()
signal.send(signal.RUN_FINALIZED, self, self.context)
def run_jobs(self):
try:
self.logger.info('Running jobs')
while self.jobs_pending:
self.begin_job()
log.indent()
try:
self.current_job.run()
except KeyboardInterrupt:
self.current_job.status = Status.ABORTED
signal.send(signal.JOB_ABORTED, self, self.current_job)
raise
except Exception as e:
self.current_job.status = Status.FAILED
log.log_error(e, self.logger)
signal.send(signal.JOB_FAILED, self, self.current_job)
else:
self.current_job.status = Status.COMPLETE
finally:
log.dedent()
self.complete_job()
except KeyboardInterrupt:
self.status = Status.ABORTED
while self.job_queue:
job = self.job_queue.pop(0)
job.status = RunnerJob.ABORTED
self.completed_jobs.append(job)
signal.send(signal.RUN_ABORTED, self, self)
raise
except Exception as e:
self.status = Status.FAILED
log.log_error(e, self.logger)
signal.send(signal.RUN_FAILED, self, self)
else:
self.status = Status.COMPLETE
def finalize(self):
self.logger.info('Finalizing run')
for job in self.job_queue:
job.finalize()
self.end_time = datetime.now()
self.info.end_time = self.end_time
self.info.duration += self.end_time - self.start_time
self.persist_state()
signal.send(signal.RUN_FINALIZED, self, self)
self.logger.info('Run completed')
def begin_job(self):
self.logger.info('Starting job {}'.format(self.current_job.id))
signal.send(signal.JOB_STARTED, self, self.current_job)
self.persist_state()
def complete_job(self):
if self.current_job.status == Status.FAILED:
self.output.move_failed(self.current_job.output)
if self.current_job.should_retry:
self.logger.info('Restarting job {}'.format(self.current_job.id))
self.persist_state()
self.current_job.restart()
signal.send(signal.JOB_RESTARTED, self, self.current_job)
return
self.logger.info('Completing job {}'.format(self.current_job.id))
self.current_job.complete()
self.persist_state()
signal.send(signal.JOB_COMPLETED, self, self.current_job)
job = self.job_queue.pop(0)
self.completed_jobs.append(job)
def persist_state(self):
self.output.persist()
class RunContext(object):
"""
Provides a context for instrumentation. Keeps track of things like
current workload and iteration.
"""
@property
def run_output(self):
return self.runner.output
@property
def current_job(self):
return self.runner.current_job
@property
def run_output_directory(self):
return self.run_output.output_directory
@property
def output_directory(self):
if self.runner.current_job:
return self.runner.current_job.output.output_directory
else:
return self.run_output.output_directory
@property
def info_directory(self):
return self.run_output.info_directory
@property
def config_directory(self):
return self.run_output.config_directory
@property
def failed_directory(self):
return self.run_output.failed_directory
@property
def log_file(self):
return os.path.join(self.output_directory, 'run.log')
def __init__(self, runner):
self.runner = runner
self.job = None
self.iteration = None
self.job_output = None
self.resolver = ResourceResolver()
def initialize(self):
self.resolver.load()
def get_path(self, subpath):
if self.current_job is None:
return self.run_output.get_path(subpath)
else:
return self.current_job.output.get_path(subpath)
def add_metric(self, *args, **kwargs):
if self.current_job is None:
self.run_output.add_metric(*args, **kwargs)
else:
self.current_job.output.add_metric(*args, **kwargs)
def add_artifact(self, name, path, kind, *args, **kwargs):
if self.current_job is None:
self.add_run_artifact(name, path, kind, *args, **kwargs)
else:
self.add_job_artifact(name, path, kind, *args, **kwargs)
def add_run_artifact(self, *args, **kwargs):
self.run_output.add_artifiact(*args, **kwargs)
def add_job_artifact(self, *args, **kwargs):
self.current_job.output.add_artifact(*args, **kwargs)
def get_artifact(self, name):
if self.iteration_artifacts:
for art in self.iteration_artifacts:
if art.name == name:
return art
for art in self.run_artifacts:
if art.name == name:
return art
return None

287
wa/framework/signal.py Normal file
View File

@@ -0,0 +1,287 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module wraps louie signalling mechanism. It relies on modified version of loiue
that has prioritization added to handler invocation.
"""
import logging
from contextlib import contextmanager
from louie import dispatcher
from wa.utils.types import prioritylist
logger = logging.getLogger('dispatcher')
class Signal(object):
"""
This class implements the signals to be used for notifiying callbacks
registered to respond to different states and stages of the execution of workload
automation.
"""
def __init__(self, name, description='no description', invert_priority=False):
"""
Instantiates a Signal.
:param name: name is the identifier of the Signal object. Signal instances with
the same name refer to the same execution stage/stage.
:param invert_priority: boolean parameter that determines whether multiple
callbacks for the same signal should be ordered with
ascending or descending priorities. Typically this flag
should be set to True if the Signal is triggered AFTER an
a state/stage has been reached. That way callbacks with high
priorities will be called right after the event has occured.
"""
self.name = name
self.description = description
self.invert_priority = invert_priority
def __str__(self):
return self.name
__repr__ = __str__
def __hash__(self):
return id(self.name)
RUN_STARTED = Signal('run-started', 'sent at the beginning of the run')
RUN_INITIALIZED = Signal('run-initialized', 'set after the run has been initialized')
RUN_ABORTED = Signal('run-aborted', 'set when the run has been aborted due to a keyboard interrupt')
RUN_FAILED = Signal('run-failed', 'set if the run has failed to complete all jobs.' )
RUN_COMPLETED = Signal('run-completed', 'set upon completion of the run (regardless of whether or not it has failed')
RUN_FINALIZED = Signal('run-finalized', 'set after the run has been finalized')
JOB_STARTED = Signal('job-started', 'set when a a new job has been started')
JOB_ABORTED = Signal('job-aborted',
description='''
sent if a job has been aborted due to a keyboard interrupt.
.. note:: While the status of every job that has not had a chance to run
due to being interrupted will be set to "ABORTED", this signal will
only be sent for the job that was actually running at the time.
''')
JOB_FAILED = Signal('job-failed', description='set if the job has failed')
JOB_RESTARTED = Signal('job-restarted')
JOB_COMPLETED = Signal('job-completed')
JOB_FINALIZED = Signal('job-finalized')
ERROR_LOGGED = Signal('error-logged')
WARNING_LOGGED = Signal('warning-logged')
# These are paired events -- if the before_event is sent, the after_ signal is
# guaranteed to also be sent. In particular, the after_ signals will be sent
# even if there is an error, so you cannot assume in the handler that the
# device has booted successfully. In most cases, you should instead use the
# non-paired signals below.
BEFORE_FLASHING = Signal('before-flashing', invert_priority=True)
SUCCESSFUL_FLASHING = Signal('successful-flashing')
AFTER_FLASHING = Signal('after-flashing')
BEFORE_BOOT = Signal('before-boot', invert_priority=True)
SUCCESSFUL_BOOT = Signal('successful-boot')
AFTER_BOOT = Signal('after-boot')
BEFORE_TARGET_CONNECT = Signal('before-target-connect', invert_priority=True)
SUCCESSFUL_TARGET_CONNECT = Signal('successful-target-connect')
AFTER_TARGET_CONNECT = Signal('after-target-connect')
BEFORE_TARGET_DISCONNECT = Signal('before-target-disconnect', invert_priority=True)
SUCCESSFUL_TARGET_DISCONNECT = Signal('successful-target-disconnect')
AFTER_TARGET_DISCONNECT = Signal('after-target-disconnect')
BEFORE_WORKLOAD_SETUP = Signal(
'before-workload-setup', invert_priority=True)
SUCCESSFUL_WORKLOAD_SETUP = Signal('successful-workload-setup')
AFTER_WORKLOAD_SETUP = Signal('after-workload-setup')
BEFORE_WORKLOAD_EXECUTION = Signal(
'before-workload-execution', invert_priority=True)
SUCCESSFUL_WORKLOAD_EXECUTION = Signal('successful-workload-execution')
AFTER_WORKLOAD_EXECUTION = Signal('after-workload-execution')
BEFORE_WORKLOAD_RESULT_UPDATE = Signal(
'before-workload-result-update', invert_priority=True)
SUCCESSFUL_WORKLOAD_RESULT_UPDATE = Signal(
'successful-workload-result-update')
AFTER_WORKLOAD_RESULT_UPDATE = Signal('after-workload-result-update')
BEFORE_WORKLOAD_TEARDOWN = Signal(
'before-workload-teardown', invert_priority=True)
SUCCESSFUL_WORKLOAD_TEARDOWN = Signal('successful-workload-teardown')
AFTER_WORKLOAD_TEARDOWN = Signal('after-workload-teardown')
BEFORE_OVERALL_RESULTS_PROCESSING = Signal(
'before-overall-results-process', invert_priority=True)
SUCCESSFUL_OVERALL_RESULTS_PROCESSING = Signal(
'successful-overall-results-process')
AFTER_OVERALL_RESULTS_PROCESSING = Signal(
'after-overall-results-process')
class CallbackPriority(object):
EXTREMELY_HIGH = 30
VERY_HIGH = 20
HIGH = 10
NORMAL = 0
LOW = -10
VERY_LOW = -20
EXTREMELY_LOW = -30
def __init__(self):
raise ValueError('Cannot instantiate')
class _prioritylist_wrapper(prioritylist):
"""
This adds a NOP append() method so that when louie invokes it to add the
handler to receivers, nothing will happen; the handler is actually added inside
the connect() below according to priority, before louie's connect() gets invoked.
"""
def append(self, *args, **kwargs):
pass
def connect(handler, signal, sender=dispatcher.Any, priority=0):
"""
Connects a callback to a signal, so that the callback will be automatically invoked
when that signal is sent.
Parameters:
:handler: This can be any callable that that takes the right arguments for
the signal. For most signals this means a single argument that
will be an ``ExecutionContext`` instance. But please see documentation
for individual signals in the :ref:`signals reference <instrumentation_method_map>`.
:signal: The signal to which the handler will be subscribed. Please see
:ref:`signals reference <instrumentation_method_map>` for the list of standard WA
signals.
.. note:: There is nothing that prevents instrumentation from sending their
own signals that are not part of the standard set. However the signal
must always be an :class:`wlauto.core.signal.Signal` instance.
:sender: The handler will be invoked only for the signals emitted by this sender. By
default, this is set to :class:`louie.dispatcher.Any`, so the handler will
be invoked for signals from any sender.
:priority: An integer (positive or negative) the specifies the priority of the handler.
Handlers with higher priority will be called before handlers with lower
priority. The call order of handlers with the same priority is not specified.
Defaults to 0.
.. note:: Priorities for some signals are inverted (so highest priority
handlers get executed last). Please see :ref:`signals reference <instrumentation_method_map>`
for details.
"""
if getattr(signal, 'invert_priority', False):
priority = -priority
senderkey = id(sender)
if senderkey in dispatcher.connections:
signals = dispatcher.connections[senderkey]
else:
dispatcher.connections[senderkey] = signals = {}
if signal in signals:
receivers = signals[signal]
else:
receivers = signals[signal] = _prioritylist_wrapper()
receivers.add(handler, priority)
dispatcher.connect(handler, signal, sender)
def disconnect(handler, signal, sender=dispatcher.Any):
"""
Disconnect a previously connected handler form the specified signal, optionally, only
for the specified sender.
Parameters:
:handler: The callback to be disconnected.
:signal: The signal the handler is to be disconnected form. It will
be an :class:`wlauto.core.signal.Signal` instance.
:sender: If specified, the handler will only be disconnected from the signal
sent by this sender.
"""
dispatcher.disconnect(handler, signal, sender)
def send(signal, sender=dispatcher.Anonymous, *args, **kwargs):
"""
Sends a signal, causing connected handlers to be invoked.
Paramters:
:signal: Signal to be sent. This must be an instance of :class:`wlauto.core.signal.Signal`
or its subclasses.
:sender: The sender of the signal (typically, this would be ``self``). Some handlers may only
be subscribed to signals from a particular sender.
The rest of the parameters will be passed on as aruments to the handler.
"""
return dispatcher.send(signal, sender, *args, **kwargs)
# This will normally be set to log_error() by init_logging(); see wa.framework/log.py.
# Done this way to prevent a circular import dependency.
log_error_func = logger.error
def safe_send(signal, sender=dispatcher.Anonymous,
propagate=[KeyboardInterrupt], *args, **kwargs):
"""
Same as ``send``, except this will catch and log all exceptions raised
by handlers, except those specified in ``propagate`` argument (defaults
to just ``[KeyboardInterrupt]``).
"""
try:
send(singnal, sender, *args, **kwargs)
except Exception as e:
if any(isinstance(e, p) for p in propagate):
raise e
log_error_func(e)
@contextmanager
def wrap(signal_name, sender=dispatcher.Anonymous, safe=False, *args, **kwargs):
"""Wraps the suite in before/after signals, ensuring
that after signal is always sent."""
signal_name = signal_name.upper().replace('-', '_')
send_func = safe_send if safe else send
try:
before_signal = globals()['BEFORE_' + signal_name]
success_signal = globals()['SUCCESSFUL_' + signal_name]
after_signal = globals()['AFTER_' + signal_name]
except KeyError:
raise ValueError('Invalid wrapped signal name: {}'.format(signal_name))
try:
send_func(before_signal, sender, *args, **kwargs)
yield
send_func(success_signal, sender, *args, **kwargs)
finally:
send_func(after_signal, sender, *args, **kwargs)

27
wa/framework/version.py Normal file
View File

@@ -0,0 +1,27 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision'])
version = VersionTuple(3, 0, 0)
def get_wa_version():
version_string = '{}.{}.{}'.format(
version.major, version.minor, version.revision)
return version_string

281
wa/framework/workload.py Normal file
View File

@@ -0,0 +1,281 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from wa.framework.plugin import TargetedPlugin
from wa.framework.resource import JarFile, ReventFile, NO_ONE
from wa.framework.exception import WorkloadError
from devlib.utils.android import ApkInfo
class Workload(TargetedPlugin):
"""
This is the base class for the workloads executed by the framework.
Each of the methods throwing NotImplementedError *must* be implemented
by the derived classes.
"""
kind = 'workload'
def init_resources(self, context):
"""
This method may be used to perform early resource discovery and initialization. This is invoked
during the initial loading stage and before the device is ready, so cannot be used for any
device-dependent initialization. This method is invoked before the workload instance is
validated.
"""
pass
def initialize(self, context):
"""
This method should be used to perform once-per-run initialization of a
workload instance, i.e., unlike ``setup()`` it will not be invoked on
each iteration.
"""
pass
def setup(self, context):
"""
Perform the setup necessary to run the workload, such as copying the
necessary files to the device, configuring the environments, etc.
This is also the place to perform any on-device checks prior to
attempting to execute the workload.
"""
pass
def run(self, context):
"""Execute the workload. This is the method that performs the actual "work" of the"""
pass
def update_result(self, context):
"""
Update the result within the specified execution context with the
metrics form this workload iteration.
"""
pass
def teardown(self, context):
""" Perform any final clean up for the Workload. """
pass
def finalize(self, context):
pass
def __str__(self):
return '<Workload {}>'.format(self.name)
class UiAutomatorGUI(object):
def __init__(self, target, package='', klass='UiAutomation', method='runUiAutoamtion'):
self.target = target
self.uiauto_package = package
self.uiauto_class = klass
self.uiauto_method = method
self.uiauto_file = None
self.target_uiauto_file = None
self.command = None
self.uiauto_params = {}
def init_resources(self, context):
self.uiauto_file = context.resolver.get(JarFile(self))
self.target_uiauto_file = self.target.path.join(self.target.working_directory,
os.path.basename(self.uiauto_file))
if not self.uiauto_package:
self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0]
def validate(self):
if not self.uiauto_file:
raise WorkloadError('No UI automation JAR file found for workload {}.'.format(self.name))
if not self.uiauto_package:
raise WorkloadError('No UI automation package specified for workload {}.'.format(self.name))
def setup(self, context):
method_string = '{}.{}#{}'.format(self.uiauto_package, self.uiauto_class, self.uiauto_method)
params_dict = self.uiauto_params
params_dict['workdir'] = self.target.working_directory
params = ''
for k, v in self.uiauto_params.iteritems():
params += ' -e {} {}'.format(k, v)
self.command = 'uiautomator runtest {}{} -c {}'.format(self.target_uiauto_file, params, method_string)
self.target.push_file(self.uiauto_file, self.target_uiauto_file)
self.target.killall('uiautomator')
def run(self, context):
result = self.target.execute(self.command, self.run_timeout)
if 'FAILURE' in result:
raise WorkloadError(result)
else:
self.logger.debug(result)
time.sleep(DELAY)
def teardown(self, context):
self.target.delete_file(self.target_uiauto_file)
class ReventGUI(object):
def __init__(self, workload, target, setup_timeout=5 * 60, run_timeout=10 * 60):
self.workload = workload
self.target = target
self.setup_timeout = setup_timeout
self.run_timeout = run_timeout
self.on_target_revent_binary = self.target.get_workpath('revent')
self.on_target_setup_revent = self.target.get_workpath('{}.setup.revent'.format(self.target.name))
self.on_target_run_revent = self.target.get_workpath('{}.run.revent'.format(self.target.name))
self.logger = logging.getLogger('revent')
self.revent_setup_file = None
self.revent_run_file = None
def init_resources(self, context):
self.revent_setup_file = context.resolver.get(ReventFile(self.workload, 'setup'))
self.revent_run_file = context.resolver.get(ReventFile(self.workload, 'run'))
def setup(self, context):
self._check_revent_files(context)
self.target.killall('revent')
command = '{} replay {}'.format(self.on_target_revent_binary, self.on_target_setup_revent)
self.target.execute(command, timeout=self.setup_timeout)
def run(self, context):
command = '{} replay {}'.format(self.on_target_revent_binary, self.on_target_run_revent)
self.logger.debug('Replaying {}'.format(os.path.basename(self.on_target_run_revent)))
self.target.execute(command, timeout=self.run_timeout)
self.logger.debug('Replay completed.')
def teardown(self, context):
self.target.remove(self.on_target_setup_revent)
self.target.remove(self.on_target_run_revent)
def _check_revent_files(self, context):
# check the revent binary
revent_binary = context.resolver.get(Executable(NO_ONE, self.target.abi, 'revent'))
if not os.path.isfile(revent_binary):
message = '{} does not exist. '.format(revent_binary)
message += 'Please build revent for your system and place it in that location'
raise WorkloadError(message)
if not self.revent_setup_file:
# pylint: disable=too-few-format-args
message = '{0}.setup.revent file does not exist, Please provide one for your target, {0}'
raise WorkloadError(message.format(self.target.name))
if not self.revent_run_file:
# pylint: disable=too-few-format-args
message = '{0}.run.revent file does not exist, Please provide one for your target, {0}'
raise WorkloadError(message.format(self.target.name))
self.on_target_revent_binary = self.target.install(revent_binary)
self.target.push(self.revent_run_file, self.on_target_run_revent)
self.target.push(self.revent_setup_file, self.on_target_setup_revent)
class ApkHander(object):
def __init__(self, owner, target, view, install_timeout=300, version=None,
strict=True, force_install=False, uninstall=False):
self.logger = logging.getLogger('apk')
self.owner = owner
self.target = target
self.version = version
self.apk_file = None
self.apk_info = None
self.apk_version = None
self.logcat_log = None
def init_resources(self, context):
self.apk_file = context.resolver.get(ApkFile(self.owner),
version=self.version,
strict=strict)
self.apk_info = ApkInfo(self.apk_file)
def setup(self, context):
self.initialize_package(context)
self.start_activity()
self.target.execute('am kill-all') # kill all *background* activities
self.target.clear_logcat()
def initialize_package(self, context):
installed_version = self.target.get_package_version(self.apk_info.package)
if self.strict:
self.initialize_with_host_apk(context, installed_version)
else:
if not installed_version:
message = '''{} not found found on the device and check_apk is set to "False"
so host version was not checked.'''
raise WorkloadError(message.format(self.package))
message = 'Version {} installed on device; skipping host APK check.'
self.logger.debug(message.format(installed_version))
self.reset(context)
self.version = installed_version
def initialize_with_host_apk(self, context, installed_version):
if installed_version != self.apk_file.version_name:
if installed_version:
message = '{} host version: {}, device version: {}; re-installing...'
self.logger.debug(message.format(os.path.basename(self.apk_file),
host_version, installed_version))
else:
message = '{} host version: {}, not found on device; installing...'
self.logger.debug(message.format(os.path.basename(self.apk_file),
host_version))
self.force_install = True # pylint: disable=attribute-defined-outside-init
else:
message = '{} version {} found on both device and host.'
self.logger.debug(message.format(os.path.basename(self.apk_file),
host_version))
if self.force_install:
if installed_version:
self.device.uninstall(self.package)
self.install_apk(context)
else:
self.reset(context)
self.apk_version = host_version
def start_activity(self):
output = self.device.execute('am start -W -n {}/{}'.format(self.package, self.activity))
if 'Error:' in output:
self.device.execute('am force-stop {}'.format(self.package)) # this will dismiss any erro dialogs
raise WorkloadError(output)
self.logger.debug(output)
def reset(self, context): # pylint: disable=W0613
self.device.execute('am force-stop {}'.format(self.package))
self.device.execute('pm clear {}'.format(self.package))
def install_apk(self, context):
output = self.device.install(self.apk_file, self.install_timeout)
if 'Failure' in output:
if 'ALREADY_EXISTS' in output:
self.logger.warn('Using already installed APK (did not unistall properly?)')
else:
raise WorkloadError(output)
else:
self.logger.debug(output)
def update_result(self, context):
self.logcat_log = os.path.join(context.output_directory, 'logcat.log')
self.device.dump_logcat(self.logcat_log)
context.add_iteration_artifact(name='logcat',
path='logcat.log',
kind='log',
description='Logact dump for the run.')
def teardown(self, context):
self.device.execute('am force-stop {}'.format(self.package))
if self.uninstall_apk:
self.device.uninstall(self.package)