1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2024-10-05 18:31:12 +01:00

New target description + moving target stuff under "framework"

Changing the way target descriptions work from a static mapping to
something that is dynamically generated and is extensible via plugins.
Also moving core target implementation stuff under "framework".
This commit is contained in:
Sergei Trofimov 2017-03-06 11:10:25 +00:00
parent 18d001fd76
commit 42539bbe0d
43 changed files with 6229 additions and 2586 deletions

View File

@ -13,5 +13,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.core.entry_point import main
from wa.framework.entrypoint import main
main()

View File

@ -24,9 +24,9 @@ except ImportError:
from distutils.core import setup
wlauto_dir = os.path.join(os.path.dirname(__file__), 'wlauto')
wlauto_dir = os.path.join(os.path.dirname(__file__), 'wa')
sys.path.insert(0, os.path.join(wlauto_dir, 'core'))
sys.path.insert(0, os.path.join(wlauto_dir, 'framework'))
from version import get_wa_version
# happends if falling back to distutils

View File

@ -2,8 +2,6 @@ from wa.framework import pluginloader, log, signal
from wa.framework.configuration import settings
from wa.framework.plugin import Plugin, Parameter
from wa.framework.command import Command
from wa.framework.run import runmethod
from wa.framework.output import RunOutput
from wa.framework.workload import Workload
from wa.framework.exception import WAError, NotFoundError, ValidationError, WorkloadError

106
wa/commands/list.py Normal file
View File

@ -0,0 +1,106 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wa import Command, settings
from wa.framework import pluginloader
from wa.framework.plugin import PluginLoader
from wa.framework.target.descriptor import get_target_descriptions
from wa.utils.doc import get_summary
from wa.utils.formatter import DescriptionListFormatter
class ListCommand(Command):
name = 'list'
description = 'List available WA plugins with a short description of each.'
def initialize(self, context):
kinds = get_kinds()
self.parser.add_argument('kind', metavar='KIND',
help=('Specify the kind of plugin to list. Must be '
'one of: {}'.format(', '.join(kinds))),
choices=kinds)
self.parser.add_argument('-n', '--name',
help='Filter results by the name specified')
self.parser.add_argument('-o', '--packaged-only', action='store_true',
help='''
Only list plugins packaged with WA itself. Do
not list plugins installed locally or from
other packages.
''')
self.parser.add_argument('-p', '--platform',
help='''
Only list results that are supported by the
specified platform.
''')
def execute(self, state, args):
filters = {}
if args.name:
filters['name'] = args.name
if args.kind == 'targets':
list_targets()
else:
list_plugins(args, filters)
def get_kinds():
kinds = pluginloader.kinds
if 'target_descriptor' in kinds:
kinds.remove('target_descriptor')
kinds.append('target')
return ['{}s'.format(name) for name in kinds]
def list_targets():
targets = get_target_descriptions()
targets = sorted(targets, key=lambda x: x.name)
output = DescriptionListFormatter()
for target in targets:
output.add_item(target.description or '', target.name)
print output.format_data()
def list_plugins(args, filters):
results = pluginloader.list_plugins(args.kind[:-1])
if filters or args.platform:
filtered_results = []
for result in results:
passed = True
for k, v in filters.iteritems():
if getattr(result, k) != v:
passed = False
break
if passed and args.platform:
passed = check_platform(result, args.platform)
if passed:
filtered_results.append(result)
else: # no filters specified
filtered_results = results
if filtered_results:
output = DescriptionListFormatter()
for result in sorted(filtered_results, key=lambda x: x.name):
output.add_item(get_summary(result), result.name)
print output.format_data()
def check_platform(plugin, platform):
supported_platforms = getattr(plugin, 'supported_platforms', [])
if supported_platforms:
return platform in supported_platforms
return True

View File

@ -18,70 +18,117 @@ import os
import sys
import shutil
import wa
from wa import Command, settings
from wa.framework import log
from wa.framework.agenda import Agenda
from wa.framework.output import RunOutput
from wa.framework import pluginloader
from wa.framework.configuration import RunConfiguration
from wa.framework.configuration.parsers import AgendaParser, ConfigParser
from wa.framework.execution import Executor
from wa.framework.output import init_wa_output
from wa.framework.version import get_wa_version
from wa.framework.exception import NotFoundError, ConfigError
from wa.utils import log
from wa.utils.types import toggle_set
class RunCommand(Command):
name = 'run'
description = """
description = '''
Execute automated workloads on a remote device and process the resulting output.
"""
'''
def initialize(self, context):
self.parser.add_argument('agenda', metavar='AGENDA',
help="""
Agenda for this workload automation run. This defines which
workloads will be executed, how many times, with which
tunables, etc. See example agendas in {} for an example of
how this file should be structured.
""".format(os.path.dirname(wlauto.__file__)))
Agenda for this workload automation run. This
defines which workloads will be executed, how
many times, with which tunables, etc. See
example agendas in {} for an example of how
this file should be structured.
""".format(os.path.dirname(wa.__file__)))
self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None,
help="""
Specify a directory where the output will be generated. If
the directory already exists, the script will abort unless -f
option (see below) is used, in which case the contents of the
directory will be overwritten. If this option is not specified,
then {} will be used instead.
""".format(settings.output_directory))
Specify a directory where the output will be
generated. If the directory already exists,
the script will abort unless -f option (see
below) is used, in which case the contents of
the directory will be overwritten. If this
option is not specified, then {} will be used
instead.
""".format(settings.default_output_directory))
self.parser.add_argument('-f', '--force', action='store_true',
help="""
Overwrite output directory if it exists. By default, the script
will abort in this situation to prevent accidental data loss.
Overwrite output directory if it exists. By
default, the script will abort in this
situation to prevent accidental data loss.
""")
self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID',
help="""
Specify a workload spec ID from an agenda to run. If this is
specified, only that particular spec will be run, and other
workloads in the agenda will be ignored. This option may be
used to specify multiple IDs.
Specify a workload spec ID from an agenda to
run. If this is specified, only that
particular spec will be run, and other
workloads in the agenda will be ignored. This
option may be used to specify multiple IDs.
""")
self.parser.add_argument('--disable', action='append', dest='instruments_to_disable',
default=[],
metavar='INSTRUMENT', help="""
Specify an instrument to disable from the command line. This
equivalent to adding "~{metavar}" to the instrumentation list in
the agenda. This can be used to temporarily disable a troublesome
instrument for a particular run without introducing permanent
change to the config (which one might then forget to revert).
This option may be specified multiple times.
Specify an instrument to disable from the
command line. This equivalent to adding
"~{metavar}" to the instrumentation list in
the agenda. This can be used to temporarily
disable a troublesome instrument for a
particular run without introducing permanent
change to the config (which one might then
forget to revert). This option may be
specified multiple times.
""")
def execute(self, args): # NOQA
def execute(self, config, args):
output = self.set_up_output_directory(config, args)
log.add_file(output.logfile)
self.logger.debug('Version: {}'.format(get_wa_version()))
self.logger.debug('Command Line: {}'.format(' '.join(sys.argv)))
disabled_instruments = toggle_set(["~{}".format(i)
for i in args.instruments_to_disable])
config.jobs_config.disable_instruments(disabled_instruments)
config.jobs_config.only_run_ids(args.only_run_ids)
parser = AgendaParser()
if os.path.isfile(args.agenda):
parser.load_from_path(config, args.agenda)
shutil.copy(args.agenda, output.raw_config_dir)
else:
try:
pluginloader.get_plugin_class(args.agenda, kind='workload')
agenda = {'workloads': [{'name': args.agenda}]}
parser.load(config, agenda, 'CMDLINE_ARGS')
except NotFoundError:
msg = 'Agenda file "{}" does not exist, and there no workload '\
'with that name.\nYou can get a list of available '\
'by running "wa list workloads".'
raise ConfigError(msg.format(args.agenda))
executor = Executor()
executor.execute(config, output)
def set_up_output_directory(self, config, args):
if args.output_directory:
output_directory = args.output_directory
else:
output_directory = settings.default_output_directory
self.logger.debug('Using output directory: {}'.format(output_directory))
try:
executor = Executor(args.output_directory, args.force)
except RuntimeError:
self.logger.error('Output directory {} exists.'.format(args.output_directory))
self.logger.error('Please specify another location, or use -f option to overwrite.\n')
return 2
for path in settings.get_config_paths():
executor.load_config(path)
executor.load_agenda(args.agenda)
for itd in args.instruments_to_disable:
self.logger.debug('Globally disabling instrument "{}" (from command line option)'.format(itd))
executor.disable_instrument(itd)
executor.initialize()
executor.execute(selectors={'ids': args.only_run_ids})
executor.finalize()
return init_wa_output(output_directory, config, args.force)
except RuntimeError as e:
if 'path exists' in str(e):
msg = 'Output directory "{}" exists.\nPlease specify another '\
'location, or use -f option to overwrite.'
self.logger.critical(msg.format(output_directory))
sys.exit(1)
else:
raise e

View File

@ -16,32 +16,42 @@
import textwrap
from wa.framework.plugin import Plugin
from wa.framework.entrypoint import init_argument_parser
from wa.framework.version import get_wa_version
from wa.utils.doc import format_body
def init_argument_parser(parser):
parser.add_argument('-c', '--config', action='append', default=[],
help='specify an additional config.py')
parser.add_argument('-v', '--verbose', action='count',
help='The scripts will produce verbose output.')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(get_wa_version()))
return parser
class Command(Plugin):
"""
Defines a Workload Automation command. This will be executed from the command line as
``wa <command> [args ...]``. This defines the name to be used when invoking wa, the
code that will actually be executed on invocation and the argument parser to be used
to parse the reset of the command line arguments.
Defines a Workload Automation command. This will be executed from the
command line as ``wa <command> [args ...]``. This defines the name to be
used when invoking wa, the code that will actually be executed on
invocation and the argument parser to be used to parse the reset of the
command line arguments.
"""
kind = 'command'
kind = "command"
help = None
usage = None
description = None
epilog = None
formatter_class = None
def __init__(self, subparsers, **kwargs):
super(Command, self).__init__(**kwargs)
def __init__(self, subparsers):
super(Command, self).__init__()
self.group = subparsers
desc = format_body(textwrap.dedent(self.description), 80)
parser_params = dict(help=(self.help or self.description), usage=self.usage,
description=format_body(textwrap.dedent(self.description), 80),
epilog=self.epilog)
description=desc, epilog=self.epilog)
if self.formatter_class:
parser_params['formatter_class'] = self.formatter_class
self.parser = subparsers.add_parser(self.name, **parser_params)
@ -50,19 +60,22 @@ class Command(Plugin):
def initialize(self, context):
"""
Perform command-specific initialisation (e.g. adding command-specific options to the command's
parser). ``context`` is always ``None``.
Perform command-specific initialisation (e.g. adding command-specific
options to the command's parser). ``context`` is always ``None``.
"""
pass
def execute(self, args):
def execute(self, state, args):
"""
Execute this command.
:args: An ``argparse.Namespace`` containing command line arguments (as returned by
``argparse.ArgumentParser.parse_args()``. This would usually be the result of
invoking ``self.parser``.
:state: An initialized ``ConfigManager`` that contains the current state of
WA exeuction up to that point (processed configuraition, loaded
plugins, etc).
:args: An ``argparse.Namespace`` containing command line arguments (as
returned by ``argparse.ArgumentParser.parse_args()``. This would
usually be the result of invoking ``self.parser``.
"""
raise NotImplementedError()

View File

@ -1,2 +1,19 @@
from wa.framework.configuration.core import settings, ConfigurationPoint, PluginConfiguration
from wa.framework.configuration.core import merge_config_values, WA_CONFIGURATION
# Copyright 2013-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.core.configuration.configuration import (settings,
RunConfiguration,
JobGenerator,
ConfigurationPoint)
from wlauto.core.configuration.plugin_cache import PluginCache

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,42 @@
from wlauto.core.configuration.configuration import MetaConfiguration, RunConfiguration
from wlauto.core.configuration.plugin_cache import PluginCache
from wlauto.utils.serializer import yaml
from wlauto.utils.doc import strip_inlined_text
DEFAULT_INSTRUMENTS = ['execution_time',
'interrupts',
'cpufreq',
'status',
'standard',
'csv']
def _format_yaml_comment(param, short_description=False):
comment = param.description
comment = strip_inlined_text(comment)
if short_description:
comment = comment.split('\n\n')[0]
comment = comment.replace('\n', '\n# ')
comment = "# {}\n".format(comment)
return comment
def _format_instruments(output):
plugin_cache = PluginCache()
output.write("instrumentation:\n")
for plugin in DEFAULT_INSTRUMENTS:
plugin_cls = plugin_cache.loader.get_plugin_class(plugin)
output.writelines(_format_yaml_comment(plugin_cls, short_description=True))
output.write(" - {}\n".format(plugin))
output.write("\n")
def generate_default_config(path):
with open(path, 'w') as output:
for param in MetaConfiguration.config_points + RunConfiguration.config_points:
entry = {param.name: param.default}
comment = _format_yaml_comment(param)
output.writelines(comment)
yaml.dump(entry, output, default_flow_style=False)
output.write("\n")
_format_instruments(output)

View File

@ -1,67 +1,222 @@
from copy import copy
from collections import OrderedDict
import random
from itertools import izip_longest, groupby, chain
from wa.framework import pluginloader
from wa.framework.exception import ConfigError
from wa.framework.configuration.core import ConfigurationPoint
from wa.framework.utils.types import TreeNode, list_of, identifier
from wa.framework.configuration.core import (MetaConfiguration, RunConfiguration,
JobGenerator, settings)
from wa.framework.configuration.parsers import ConfigParser
from wa.framework.configuration.plugin_cache import PluginCache
class ExecConfig(object):
class CombinedConfig(object):
static_config_points = [
ConfigurationPoint(
'components',
kind=list_of(identifier),
description="""
Components to be activated.
""",
),
ConfigurationPoint(
'runtime_parameters',
kind=list_of(identifier),
aliases=['runtime_params'],
description="""
Components to be activated.
""",
),
ConfigurationPoint(
'classifiers',
kind=list_of(str),
description="""
Classifiers to be used. Classifiers are arbitrary key-value
pairs associated with with config. They may be used during output
proicessing and should be used to provide additional context for
collected results.
""",
),
]
@staticmethod
def from_pod(pod):
instance = CombinedConfig()
instance.settings = MetaConfiguration.from_pod(pod.get('settings', {}))
instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {}))
return instance
config_points = None
def __init__(self, settings=None, run_config=None):
self.settings = settings
self.run_config = run_config
@classmethod
def _load(cls, load_global=False, loader=pluginloader):
if cls.config_points is None:
cls.config_points = {c.name: c for c in cls.static_config_points}
for plugin in loader.list_plugins():
cp = ConfigurationPoint(
plugin.name,
kind=OrderedDict,
description="""
Configuration for {} plugin.
""".format(plugin.name)
)
cls._add_config_point(plugin.name, cp)
for alias in plugin.aliases:
cls._add_config_point(alias.name, cp)
@classmethod
def _add_config_point(cls, name, cp):
if name in cls.config_points:
message = 'Cofig point for "{}" already exists ("{}")'
raise ValueError(message.format(name, cls.config_points[name].name))
def to_pod(self):
return {'settings': self.settings.to_pod(),
'run_config': self.run_config.to_pod()}
class JobStatus:
PENDING = 0
RUNNING = 1
OK = 2
FAILED = 3
PARTIAL = 4
ABORTED = 5
PASSED = 6
class GlobalExecConfig(ExecConfig):
class Job(object):
def __init__(self, spec, iteration, context):
self.spec = spec
self.iteration = iteration
self.context = context
self.status = 'new'
self.workload = None
self.output = None
def load(self, target, loader=pluginloader):
self.workload = loader.get_workload(self.spec.workload_name,
target,
**self.spec.workload_parameters)
self.workload.init_resources(self.context)
self.workload.validate()
class ConfigManager(object):
"""
Represents run-time state of WA. Mostly used as a container for loaded
configuration and discovered plugins.
This exists outside of any command or run and is associated with the running
instance of wA itself.
"""
@property
def enabled_instruments(self):
return self.jobs_config.enabled_instruments
@property
def job_specs(self):
if not self._jobs_generated:
msg = 'Attempting to access job specs before '\
'jobs have been generated'
raise RuntimeError(msg)
return [j.spec for j in self._jobs]
@property
def jobs(self):
if not self._jobs_generated:
msg = 'Attempting to access jobs before '\
'they have been generated'
raise RuntimeError(msg)
return self._jobs
def __init__(self, settings=settings):
self.settings = settings
self.run_config = RunConfiguration()
self.plugin_cache = PluginCache()
self.jobs_config = JobGenerator(self.plugin_cache)
self.loaded_config_sources = []
self._config_parser = ConfigParser()
self._jobs = []
self._jobs_generated = False
self.agenda = None
def load_config_file(self, filepath):
self._config_parser.load_from_path(self, filepath)
self.loaded_config_sources.append(filepath)
def load_config(self, values, source, wrap_exceptions=True):
self._config_parser.load(self, values, source)
self.loaded_config_sources.append(source)
def get_plugin(self, name=None, kind=None, *args, **kwargs):
return self.plugin_cache.get_plugin(name, kind, *args, **kwargs)
def get_instruments(self, target):
instruments = []
for name in self.enabled_instruments:
instruments.append(self.get_plugin(name, kind='instrument',
target=target))
return instruments
def finalize(self):
if not self.agenda:
msg = 'Attempting to finalize config before agenda has been set'
raise RuntimeError(msg)
self.run_config.merge_device_config(self.plugin_cache)
return CombinedConfig(self.settings, self.run_config)
def generate_jobs(self, context):
job_specs = self.jobs_config.generate_job_specs(context.tm)
exec_order = self.run_config.execution_order
for spec, i in permute_iterations(job_specs, exec_order):
job = Job(spec, i, context)
job.load(context.tm.target)
self._jobs.append(job)
self._jobs_generated = True
def permute_by_job(specs):
"""
This is that "classic" implementation that executes all iterations of a
workload spec before proceeding onto the next spec.
"""
for spec in specs:
for i in range(1, spec.iterations + 1):
yield (spec, i)
def permute_by_iteration(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all
sections for the first global spec first, followed by all sections for the
second spec, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
"""
groups = [list(g) for k, g in groupby(specs, lambda s: s.workload_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in xrange(spec.iterations)])
for t in chain(*map(list, izip_longest(*all_tuples))):
if t is not None:
yield t
def permute_by_section(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all specs
for the first section followed by all specs for the seciod section, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
"""
groups = [list(g) for k, g in groupby(specs, lambda s: s.section_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in xrange(spec.iterations)])
for t in chain(*map(list, izip_longest(*all_tuples))):
if t is not None:
yield t
def permute_randomly(specs):
"""
This will generate a random permutation of specs/iteration tuples.
"""
result = []
for spec in specs:
for i in xrange(1, spec.iterations + 1):
result.append((spec, i))
random.shuffle(result)
for t in result:
yield t
permute_map = {
'by_iteration': permute_by_iteration,
'by_job': permute_by_job,
'by_section': permute_by_section,
'random': permute_randomly,
}
def permute_iterations(specs, exec_order):
if exec_order not in permute_map:
msg = 'Unknown execution order "{}"; must be in: {}'
raise ValueError(msg.format(exec_order, permute_map.keys()))
return permute_map[exec_order](specs)

View File

@ -0,0 +1,308 @@
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from wlauto.exceptions import ConfigError
from wlauto.utils.serializer import read_pod, SerializerSyntaxError
from wlauto.utils.types import toggle_set, counter
from wlauto.core.configuration.configuration import JobSpec
###############
### Parsers ###
###############
class ConfigParser(object):
def load_from_path(self, state, filepath):
self.load(state, _load_file(filepath, "Config"), filepath)
def load(self, state, raw, source, wrap_exceptions=True): # pylint: disable=too-many-branches
try:
if 'run_name' in raw:
msg = '"run_name" can only be specified in the config '\
'section of an agenda'
raise ConfigError(msg)
if 'id' in raw:
raise ConfigError('"id" cannot be set globally')
merge_result_processors_instruments(raw)
# Get WA core configuration
for cfg_point in state.settings.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.settings.set(cfg_point.name, value)
# Get run specific configuration
for cfg_point in state.run_config.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.run_config.set(cfg_point.name, value)
# Get global job spec configuration
for cfg_point in JobSpec.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.jobs_config.set_global_value(cfg_point.name, value)
for name, values in raw.iteritems():
# Assume that all leftover config is for a plug-in or a global
# alias it is up to PluginCache to assert this assumption
state.plugin_cache.add_configs(name, values, source)
except ConfigError as e:
if wrap_exceptions:
raise ConfigError('Error in "{}":\n{}'.format(source, str(e)))
else:
raise e
class AgendaParser(object):
def load_from_path(self, state, filepath):
raw = _load_file(filepath, 'Agenda')
self.load(state, raw, filepath)
def load(self, state, raw, source):
try:
if not isinstance(raw, dict):
raise ConfigError('Invalid agenda, top level entry must be a dict')
self._populate_and_validate_config(state, raw, source)
sections = self._pop_sections(raw)
global_workloads = self._pop_workloads(raw)
if raw:
msg = 'Invalid top level agenda entry(ies): "{}"'
raise ConfigError(msg.format('", "'.join(raw.keys())))
sect_ids, wkl_ids = self._collect_ids(sections, global_workloads)
self._process_global_workloads(state, global_workloads, wkl_ids)
self._process_sections(state, sections, sect_ids, wkl_ids)
state.agenda = source
except (ConfigError, SerializerSyntaxError) as e:
raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
def _populate_and_validate_config(self, state, raw, source):
for name in ['config', 'global']:
entry = raw.pop(name, None)
if entry is None:
continue
if not isinstance(entry, dict):
msg = 'Invalid entry "{}" - must be a dict'
raise ConfigError(msg.format(name))
if 'run_name' in entry:
state.run_config.set('run_name', entry.pop('run_name'))
state.load_config(entry, source, wrap_exceptions=False)
def _pop_sections(self, raw):
sections = raw.pop("sections", [])
if not isinstance(sections, list):
raise ConfigError('Invalid entry "sections" - must be a list')
return sections
def _pop_workloads(self, raw):
workloads = raw.pop("workloads", [])
if not isinstance(workloads, list):
raise ConfigError('Invalid entry "workloads" - must be a list')
return workloads
def _collect_ids(self, sections, global_workloads):
seen_section_ids = set()
seen_workload_ids = set()
for workload in global_workloads:
workload = _get_workload_entry(workload)
_collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
for section in sections:
_collect_valid_id(section.get("id"), seen_section_ids, "section")
for workload in section["workloads"] if "workloads" in section else []:
workload = _get_workload_entry(workload)
_collect_valid_id(workload.get("id"), seen_workload_ids,
"workload")
return seen_section_ids, seen_workload_ids
def _process_global_workloads(self, state, global_workloads, seen_wkl_ids):
for workload_entry in global_workloads:
workload = _process_workload_entry(workload_entry, seen_wkl_ids,
state.jobs_config)
state.jobs_config.add_workload(workload)
def _process_sections(self, state, sections, seen_sect_ids, seen_wkl_ids):
for section in sections:
workloads = []
for workload_entry in section.pop("workloads", []):
workload = _process_workload_entry(workload_entry, seen_workload_ids,
state.jobs_config)
workloads.append(workload)
section = _construct_valid_entry(section, seen_sect_ids,
"s", state.jobs_config)
state.jobs_config.add_section(section, workloads)
########################
### Helper functions ###
########################
def get_aliased_param(cfg_point, d, default=None, pop=True):
"""
Given a ConfigurationPoint and a dict, this function will search the dict for
the ConfigurationPoint's name/aliases. If more than one is found it will raise
a ConfigError. If one (and only one) is found then it will return the value
for the ConfigurationPoint. If the name or aliases are present in the dict it will
return the "default" parameter of this function.
"""
aliases = [cfg_point.name] + cfg_point.aliases
alias_map = [a for a in aliases if a in d]
if len(alias_map) > 1:
raise ConfigError(DUPLICATE_ENTRY_ERROR.format(aliases))
elif alias_map:
if pop:
return d.pop(alias_map[0])
else:
return d[alias_map[0]]
else:
return default
def _load_file(filepath, error_name):
if not os.path.isfile(filepath):
raise ValueError("{} does not exist".format(filepath))
try:
raw = read_pod(filepath)
except SerializerSyntaxError as e:
raise ConfigError('Error parsing {} {}: {}'.format(error_name, filepath, e))
if not isinstance(raw, dict):
message = '{} does not contain a valid {} structure; top level must be a dict.'
raise ConfigError(message.format(filepath, error_name))
return raw
def merge_result_processors_instruments(raw):
instr_config = JobSpec.configuration['instrumentation']
instruments = toggle_set(get_aliased_param(instr_config, raw, default=[]))
result_processors = toggle_set(raw.pop('result_processors', []))
if instruments and result_processors:
conflicts = instruments.conflicts_with(result_processors)
if conflicts:
msg = '"instrumentation" and "result_processors" have '\
'conflicting entries: {}'
entires = ', '.join('"{}"'.format(c.strip("~")) for c in conflicts)
raise ConfigError(msg.format(entires))
raw['instrumentation'] = instruments.merge_with(result_processors)
def _pop_aliased(d, names, entry_id):
name_count = sum(1 for n in names if n in d)
if name_count > 1:
names_list = ', '.join(names)
msg = 'Inivalid workload entry "{}": at moust one of ({}}) must be specified.'
raise ConfigError(msg.format(workload_entry['id'], names_list))
for name in names:
if name in d:
return d.pop(name)
return None
def _construct_valid_entry(raw, seen_ids, prefix, jobs_config):
workload_entry = {}
# Generate an automatic ID if the entry doesn't already have one
if 'id' not in raw:
while True:
new_id = '{}{}'.format(prefix, counter(name=prefix))
if new_id not in seen_ids:
break
workload_entry['id'] = new_id
seen_ids.add(new_id)
else:
workload_entry['id'] = raw.pop('id')
# Process instrumentation
merge_result_processors_instruments(raw)
# Validate all workload_entry
for name, cfg_point in JobSpec.configuration.iteritems():
value = get_aliased_param(cfg_point, raw)
if value is not None:
value = cfg_point.kind(value)
cfg_point.validate_value(name, value)
workload_entry[name] = value
wk_id = workload_entry['id']
param_names = ['workload_params', 'workload_parameters']
if prefix == 'wk':
param_names += ['params', 'parameters']
workload_entry["workload_parameters"] = _pop_aliased(raw, param_names, wk_id)
param_names = ['runtime_parameters', 'runtime_params']
if prefix == 's':
param_names += ['params', 'parameters']
workload_entry["runtime_parameters"] = _pop_aliased(raw, param_names, wk_id)
param_names = ['boot_parameters', 'boot_params']
workload_entry["boot_parameters"] = _pop_aliased(raw, param_names, wk_id)
if "instrumentation" in workload_entry:
jobs_config.update_enabled_instruments(workload_entry["instrumentation"])
# error if there are unknown workload_entry
if raw:
msg = 'Invalid entry(ies) in "{}": "{}"'
raise ConfigError(msg.format(workload_entry['id'], ', '.join(raw.keys())))
return workload_entry
def _collect_valid_id(entry_id, seen_ids, entry_type):
if entry_id is None:
return
if entry_id in seen_ids:
raise ConfigError('Duplicate {} ID "{}".'.format(entry_type, entry_id))
# "-" is reserved for joining section and workload IDs
if "-" in entry_id:
msg = 'Invalid {} ID "{}"; IDs cannot contain a "-"'
raise ConfigError(msg.format(entry_type, entry_id))
if entry_id == "global":
msg = 'Invalid {} ID "global"; is a reserved ID'
raise ConfigError(msg.format(entry_type))
seen_ids.add(entry_id)
def _get_workload_entry(workload):
if isinstance(workload, basestring):
workload = {'name': workload}
elif not isinstance(workload, dict):
raise ConfigError('Invalid workload entry: "{}"')
return workload
def _process_workload_entry(workload, seen_workload_ids, jobs_config):
workload = _get_workload_entry(workload)
workload = _construct_valid_entry(workload, seen_workload_ids,
"wk", jobs_config)
return workload

View File

@ -0,0 +1,227 @@
# Copyright 2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from collections import defaultdict
from itertools import chain
from devlib.utils.misc import memoized
from wa.framework import pluginloader
from wa.framework.exception import ConfigError
from wa.framework.target.descriptor import get_target_descriptions
from wa.utils.types import obj_dict
GENERIC_CONFIGS = ["device_config", "workload_parameters",
"boot_parameters", "runtime_parameters"]
class PluginCache(object):
"""
The plugin cache is used to store configuration that cannot be processed at
this stage, whether thats because it is unknown if its needed
(in the case of disabled plug-ins) or it is not know what it belongs to (in
the case of "device-config" ect.). It also maintains where configuration came
from, and the priority order of said sources.
"""
def __init__(self, loader=pluginloader):
self.loader = loader
self.sources = []
self.plugin_configs = defaultdict(lambda: defaultdict(dict))
self.global_alias_values = defaultdict(dict)
self.targets = {td.name: td for td in get_target_descriptions()}
# Generate a mapping of what global aliases belong to
self._global_alias_map = defaultdict(dict)
self._list_of_global_aliases = set()
for plugin in self.loader.list_plugins():
for param in plugin.parameters:
if param.global_alias:
self._global_alias_map[plugin.name][param.global_alias] = param
self._list_of_global_aliases.add(param.global_alias)
def add_source(self, source):
if source in self.sources:
raise Exception("Source has already been added.")
self.sources.append(source)
def add_global_alias(self, alias, value, source):
if source not in self.sources:
msg = "Source '{}' has not been added to the plugin cache."
raise RuntimeError(msg.format(source))
if not self.is_global_alias(alias):
msg = "'{} is not a valid global alias'"
raise RuntimeError(msg.format(alias))
self.global_alias_values[alias][source] = value
def add_configs(self, plugin_name, values, source):
if self.is_global_alias(plugin_name):
self.add_global_alias(plugin_name, values, source)
return
for name, value in values.iteritems():
self.add_config(plugin_name, name, value, source)
def add_config(self, plugin_name, name, value, source):
if source not in self.sources:
msg = "Source '{}' has not been added to the plugin cache."
raise RuntimeError(msg.format(source))
if (not self.loader.has_plugin(plugin_name) and
plugin_name not in GENERIC_CONFIGS):
msg = 'configuration provided for unknown plugin "{}"'
raise ConfigError(msg.format(plugin_name))
if (plugin_name not in GENERIC_CONFIGS and
name not in self.get_plugin_parameters(plugin_name)):
msg = "'{}' is not a valid parameter for '{}'"
raise ConfigError(msg.format(name, plugin_name))
self.plugin_configs[plugin_name][source][name] = value
def is_global_alias(self, name):
return name in self._list_of_global_aliases
def get_plugin_config(self, plugin_name, generic_name=None):
config = obj_dict(not_in_dict=['name'])
config.name = plugin_name
if plugin_name not in GENERIC_CONFIGS:
self._set_plugin_defaults(plugin_name, config)
self._set_from_global_aliases(plugin_name, config)
if generic_name is None:
# Perform a simple merge with the order of sources representing
# priority
plugin_config = self.plugin_configs[plugin_name]
for source in self.sources:
if source not in plugin_config:
continue
for name, value in plugin_config[source].iteritems():
cfg_points[name].set_value(config, value=value)
else:
# A more complicated merge that involves priority of sources and
# specificity
self._merge_using_priority_specificity(plugin_name, generic_name, config)
return config
def get_plugin(self, name, kind=None, *args, **kwargs):
config = self.get_plugin_config(name)
kwargs = dict(config.items() + kwargs.items())
return self.loader.get_plugin(name, kind=kind, *args, **kwargs)
@memoized
def get_plugin_parameters(self, name):
if name in self.targets:
return self._get_target_params(name)
params = self.loader.get_plugin_class(name).parameters
return {param.name: param for param in params}
def _set_plugin_defaults(self, plugin_name, config):
cfg_points = self.get_plugin_parameters(plugin_name)
for cfg_point in cfg_points.itervalues():
cfg_point.set_value(config, check_mandatory=False)
def _set_from_global_aliases(self, plugin_name, config):
for alias, param in self._global_alias_map[plugin_name].iteritems():
if alias in self.global_alias_values:
for source in self.sources:
if source not in self.global_alias_values[alias]:
continue
val = self.global_alias_values[alias][source]
param.set_value(config, value=val)
def _get_target_params(self, name):
td = self.targets[name]
params = {p.name: p for p in chain(td.target_params, td.platform_params)}
#params['connection_settings'] = {p.name: p for p in td.conn_params}
return params
# pylint: disable=too-many-nested-blocks, too-many-branches
def _merge_using_priority_specificity(self, specific_name,
generic_name, final_config):
"""
WA configuration can come from various sources of increasing priority,
as well as being specified in a generic and specific manner (e.g
``device_config`` and ``nexus10`` respectivly). WA has two rules for
the priority of configuration:
- Configuration from higher priority sources overrides
configuration from lower priority sources.
- More specific configuration overrides less specific configuration.
There is a situation where these two rules come into conflict. When a
generic configuration is given in config source of high priority and a
specific configuration is given in a config source of lower priority.
In this situation it is not possible to know the end users intention
and WA will error.
:param generic_name: The name of the generic configuration
e.g ``device_config``
:param specific_name: The name of the specific configuration used
e.g ``nexus10``
:param cfg_point: A dict of ``ConfigurationPoint``s to be used when
merging configuration. keys=config point name,
values=config point
:rtype: A fully merged and validated configuration in the form of a
obj_dict.
"""
generic_config = copy(self.plugin_configs[generic_name])
specific_config = copy(self.plugin_configs[specific_name])
cfg_points = self.get_plugin_parameters(specific_name)
sources = self.sources
seen_specific_config = defaultdict(list)
# set_value uses the 'name' attribute of the passed object in it error
# messages, to ensure these messages make sense the name will have to be
# changed several times during this function.
final_config.name = specific_name
# pylint: disable=too-many-nested-blocks
for source in sources:
try:
if source in generic_config:
final_config.name = generic_name
for name, cfg_point in cfg_points.iteritems():
if name in generic_config[source]:
if name in seen_specific_config:
msg = ('"{generic_name}" configuration "{config_name}" has already been '
'specified more specifically for {specific_name} in:\n\t\t{sources}')
msg = msg.format(generic_name=generic_name,
config_name=name,
specific_name=specific_name,
sources=", ".join(seen_specific_config[name]))
raise ConfigError(msg)
value = generic_config[source][name]
cfg_point.set_value(final_config, value, check_mandatory=False)
if source in specific_config:
final_config.name = specific_name
for name, cfg_point in cfg_points.iteritems():
if name in specific_config[source]:
seen_specific_config[name].append(str(source))
value = specific_config[source][name]
cfg_point.set_value(final_config, value, check_mandatory=False)
except ConfigError as e:
raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
# Validate final configuration
final_config.name = specific_name
for cfg_point in cfg_points.itervalues():
cfg_point.validate(final_config)

View File

@ -0,0 +1,89 @@
# Copyright 2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class JobSpecSource(object):
kind = ""
def __init__(self, config, parent=None):
self.config = config
self.parent = parent
@property
def id(self):
return self.config['id']
def name(self):
raise NotImplementedError()
class WorkloadEntry(JobSpecSource):
kind = "workload"
@property
def name(self):
if self.parent.id == "global":
return 'workload "{}"'.format(self.id)
else:
return 'workload "{}" from section "{}"'.format(self.id, self.parent.id)
class SectionNode(JobSpecSource):
kind = "section"
@property
def name(self):
if self.id == "global":
return "globally specified configuration"
else:
return 'section "{}"'.format(self.id)
@property
def is_leaf(self):
return not bool(self.children)
def __init__(self, config, parent=None):
super(SectionNode, self).__init__(config, parent=parent)
self.workload_entries = []
self.children = []
def add_section(self, section):
new_node = SectionNode(section, parent=self)
self.children.append(new_node)
return new_node
def add_workload(self, workload_config):
self.workload_entries.append(WorkloadEntry(workload_config, self))
def descendants(self):
for child in self.children:
for n in child.descendants():
yield n
yield child
def ancestors(self):
if self.parent is not None:
yield self.parent
for ancestor in self.parent.ancestors():
yield ancestor
def leaves(self):
if self.is_leaf:
yield self
else:
for n in self.descendants():
if n.is_leaf:
yield n

View File

@ -12,72 +12,100 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import logging
import os
import subprocess
from wa.framework import pluginloader, log
from wa.framework.configuration import settings
from wa.framework.exception import WAError
from wa.utils.doc import format_body
from wa.utils.misc import init_argument_parser
import warnings
from wa.framework import pluginloader
from wa.framework.command import init_argument_parser
from wa.framework.configuration import settings
from wa.framework.configuration.execution import ConfigManager
from wa.framework.host import init_user_directory
from wa.framework.exception import WAError, DevlibError, ConfigError
from wa.utils import log
from wa.utils.doc import format_body
from wa.utils.misc import get_traceback
warnings.filterwarnings(action='ignore', category=UserWarning, module='zope')
logger = logging.getLogger('wa')
def init_settings():
settings.load_environment()
if not os.path.isdir(settings.user_directory):
settings.initialize_user_directory()
settings.load_user_config()
def get_argument_parser():
description = ("Execute automated workloads on a remote device and process "
"the resulting output.\n\nUse \"wa <subcommand> -h\" to see "
"help for individual subcommands.")
parser = argparse.ArgumentParser(description=format_body(description, 80),
prog='wa',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
init_argument_parser(parser)
return parser
logger = logging.getLogger('command_line')
def load_commands(subparsers):
commands = {}
for command in pluginloader.list_commands():
commands[command.name] = pluginloader.get_command(command.name, subparsers=subparsers)
commands[command.name] = pluginloader.get_command(command.name,
subparsers=subparsers)
return commands
def main():
if not os.path.exists(settings.user_directory):
init_user_directory()
try:
log.init()
init_settings()
parser = get_argument_parser()
commands = load_commands(parser.add_subparsers(dest='command')) # each command will add its own subparser
description = ("Execute automated workloads on a remote device and process "
"the resulting output.\n\nUse \"wa <subcommand> -h\" to see "
"help for individual subcommands.")
parser = argparse.ArgumentParser(description=format_body(description, 80),
prog='wa',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
init_argument_parser(parser)
# load_commands will trigger plugin enumeration, and we want logging
# to be enabled for that, which requires the verbosity setting; however
# full argument parse cannot be complted until the commands are loaded; so
# parse just the base args for know so we can get verbosity.
args, _ = parser.parse_known_args()
settings.set("verbosity", args.verbose)
log.init(settings.verbosity)
# each command will add its own subparser
commands = load_commands(parser.add_subparsers(dest='command'))
args = parser.parse_args()
settings.set('verbosity', args.verbose)
if args.config:
settings.load_config_file(args.config)
log.set_level(settings.verbosity)
config = ConfigManager()
config.load_config_file(settings.user_config_file)
for config_file in args.config:
if not os.path.exists(config_file):
raise ConfigError("Config file {} not found".format(config_file))
config.load_config_file(config_file)
command = commands[args.command]
sys.exit(command.execute(args))
sys.exit(command.execute(config, args))
except KeyboardInterrupt:
logging.info('Got CTRL-C. Aborting.')
sys.exit(3)
except (WAError, DevlibError) as e:
logging.critical(e)
sys.exit(1)
except subprocess.CalledProcessError as e:
tb = get_traceback()
logging.critical(tb)
command = e.cmd
if e.args:
command = '{} {}'.format(command, ' '.join(e.args))
message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n'
logging.critical(message.format(command, e.returncode, e.output))
sys.exit(2)
except SyntaxError as e:
tb = get_traceback()
logging.critical(tb)
message = 'Syntax Error in {}, line {}, offset {}:'
logging.critical(message.format(e.filename, e.lineno, e.offset))
logging.critical('\t{}'.format(e.msg))
sys.exit(2)
except Exception as e: # pylint: disable=broad-except
log_error(e, logger, critical=True)
if isinstance(e, WAError):
sys.exit(2)
else:
sys.exit(3)
tb = get_traceback()
logging.critical(tb)
logging.critical('{}({})'.format(e.__class__.__name__, e))
sys.exit(2)

View File

@ -12,7 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wa.utils.misc import get_traceback, TimeoutError # NOQA pylint: disable=W0611
from devlib.exception import (DevlibError, HostError, TimeoutError,
TargetError, TargetNotRespondingError)
from wa.utils.misc import get_traceback
class WAError(Exception):
@ -35,11 +38,6 @@ class WorkloadError(WAError):
pass
class HostError(WAError):
"""Problem with the host on which WA is running."""
pass
class JobError(WAError):
"""Job execution error."""
pass
@ -113,7 +111,8 @@ class PluginLoaderError(WAError):
if isinstance(orig, WAError):
reason = 'because of:\n{}: {}'.format(orig_name, orig)
else:
reason = 'because of:\n{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
text = 'because of:\n{}\n{}: {}'
reason = text.format(get_traceback(self.exc_info), orig_name, orig)
return '\n'.join([self.message, reason])
else:
return self.message
@ -121,10 +120,12 @@ class PluginLoaderError(WAError):
class WorkerThreadError(WAError):
"""
This should get raised in the main thread if a non-WAError-derived exception occurs on
a worker/background thread. If a WAError-derived exception is raised in the worker, then
it that exception should be re-raised on the main thread directly -- the main point of this is
to preserve the backtrace in the output, and backtrace doesn't get output for WAErrors.
This should get raised in the main thread if a non-WAError-derived
exception occurs on a worker/background thread. If a WAError-derived
exception is raised in the worker, then it that exception should be
re-raised on the main thread directly -- the main point of this is to
preserve the backtrace in the output, and backtrace doesn't get output for
WAErrors.
"""
@ -133,7 +134,8 @@ class WorkerThreadError(WAError):
self.exc_info = exc_info
orig = self.exc_info[1]
orig_name = type(orig).__name__
message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread)
message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
text = 'Exception of type {} occured on thread {}:\n{}\n{}: {}'
message = text.format(orig_name, thread, get_traceback(self.exc_info),
orig_name, orig)
super(WorkerThreadError, self).__init__(message)

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +1,33 @@
import os
from wa.framework.configuration import settings
from wa.framework.exception import ConfigError
from wa.utils.misc import ensure_directory_exists
from wlauto.core.configuration import settings
class HostRunConfig(object):
def init_user_directory(overwrite_existing=False): # pylint: disable=R0914
"""
Host-side configuration for a run.
Initialise a fresh user directory.
"""
if os.path.exists(settings.user_directory):
if not overwrite_existing:
raise RuntimeError('Environment {} already exists.'.format(settings.user_directory))
shutil.rmtree(settings.user_directory)
def __init__(self, output_directory,
run_info_directory=None,
run_config_directory=None):
self.output_directory = output_directory
self.run_info_directory = run_info_directory or os.path.join(self.output_directory, '_info')
self.run_config_directory = run_config_directory or os.path.join(self.output_directory, '_config')
os.makedirs(settings.user_directory)
os.makedirs(settings.dependencies_directory)
os.makedirs(settings.plugins_directory)
def initialize(self):
ensure_directory_exists(self.output_directory)
ensure_directory_exists(self.run_info_directory)
ensure_directory_exists(self.run_config_directory)
# TODO: generate default config.yaml here
if os.getenv('USER') == 'root':
# If running with sudo on POSIX, change the ownership to the real user.
real_user = os.getenv('SUDO_USER')
if real_user:
import pwd # done here as module won't import on win32
user_entry = pwd.getpwnam(real_user)
uid, gid = user_entry.pw_uid, user_entry.pw_gid
os.chown(settings.user_directory, uid, gid)
# why, oh why isn't there a recusive=True option for os.chown?
for root, dirs, files in os.walk(settings.user_directory):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)

View File

@ -0,0 +1,399 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Adding New Instrument
=====================
Any new instrument should be a subclass of Instrument and it must have a name.
When a new instrument is added to Workload Automation, the methods of the new
instrument will be found automatically and hooked up to the supported signals.
Once a signal is broadcasted, the corresponding registered method is invoked.
Each method in Instrument must take two arguments, which are self and context.
Supported signals can be found in [... link to signals ...] To make
implementations easier and common, the basic steps to add new instrument is
similar to the steps to add new workload.
Hence, the following methods are sufficient to implement to add new instrument:
- setup: This method is invoked after the workload is setup. All the
necessary setups should go inside this method. Setup, includes operations
like, pushing the files to the target device, install them, clear logs,
etc.
- start: It is invoked just before the workload start execution. Here is
where instrument measures start being registered/taken.
- stop: It is invoked just after the workload execution stops. The measures
should stop being taken/registered.
- update_result: It is invoked after the workload updated its result.
update_result is where the taken measures are added to the result so it
can be processed by Workload Automation.
- teardown is invoked after the workload is teared down. It is a good place
to clean any logs generated by the instrument.
For example, to add an instrument which will trace device errors, we subclass
Instrument and overwrite the variable name.::
#BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace')
class TraceErrorsInstrument(Instrument):
name = 'trace-errors'
def __init__(self, device):
super(TraceErrorsInstrument, self).__init__(device)
self.trace_on_device = os.path.join(self.device.working_directory, 'trace')
We then declare and implement the aforementioned methods. For the setup method,
we want to push the file to the target device and then change the file mode to
755 ::
def setup(self, context):
self.device.push(BINARY_FILE, self.device.working_directory)
self.device.execute('chmod 755 {}'.format(self.trace_on_device))
Then we implemented the start method, which will simply run the file to start
tracing. ::
def start(self, context):
self.device.execute('{} start'.format(self.trace_on_device))
Lastly, we need to stop tracing once the workload stops and this happens in the
stop method::
def stop(self, context):
self.device.execute('{} stop'.format(self.trace_on_device))
The generated result can be updated inside update_result, or if it is trace, we
just pull the file to the host device. context has a result variable which
has add_metric method. It can be used to add the instrumentation results metrics
to the final result for the workload. The method can be passed 4 params, which
are metric key, value, unit and lower_is_better, which is a boolean. ::
def update_result(self, context):
# pull the trace file to the device
result = os.path.join(self.device.working_directory, 'trace.txt')
self.device.pull(result, context.working_directory)
# parse the file if needs to be parsed, or add result to
# context.result
At the end, we might want to delete any files generated by the instrumentation
and the code to clear these file goes in teardown method. ::
def teardown(self, context):
self.device.remove(os.path.join(self.device.working_directory, 'trace.txt'))
"""
import logging
import inspect
from collections import OrderedDict
import wa.framework.signal as signal
from wa.framework.plugin import Plugin
from wa.framework.exception import WAError, TargetNotRespondingError, TimeoutError
from wa.utils.misc import get_traceback, isiterable
from wa.utils.types import identifier
logger = logging.getLogger('instrumentation')
# Maps method names onto signals the should be registered to.
# Note: the begin/end signals are paired -- if a begin_ signal is sent,
# then the corresponding end_ signal is guaranteed to also be sent.
# Note: using OrderedDict to preserve logical ordering for the table generated
# in the documentation
SIGNAL_MAP = OrderedDict([
# Below are "aliases" for some of the more common signals to allow
# instrumentation to have similar structure to workloads
('initialize', signal.SUCCESSFUL_RUN_INIT),
# ('setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
# ('start', signal.BEFORE_WORKLOAD_EXECUTION),
# ('stop', signal.AFTER_WORKLOAD_EXECUTION),
# ('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
# ('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE),
# ('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
# ('finalize', signal.RUN_FIN),
# ('on_run_start', signal.RUN_START),
# ('on_run_end', signal.RUN_END),
# ('on_workload_spec_start', signal.WORKLOAD_SPEC_START),
# ('on_workload_spec_end', signal.WORKLOAD_SPEC_END),
# ('on_iteration_start', signal.ITERATION_START),
# ('on_iteration_end', signal.ITERATION_END),
# ('before_initial_boot', signal.BEFORE_INITIAL_BOOT),
# ('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT),
# ('after_initial_boot', signal.AFTER_INITIAL_BOOT),
# ('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT),
# ('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT),
# ('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT),
# ('before_boot', signal.BEFORE_BOOT),
# ('on_successful_boot', signal.SUCCESSFUL_BOOT),
# ('after_boot', signal.AFTER_BOOT),
# ('on_spec_init', signal.SPEC_INIT),
# ('on_run_init', signal.RUN_INIT),
# ('on_iteration_init', signal.ITERATION_INIT),
# ('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP),
# ('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
# ('after_workload_setup', signal.AFTER_WORKLOAD_SETUP),
# ('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION),
# ('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION),
# ('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION),
# ('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE),
# ('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
# ('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE),
# ('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN),
# ('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN),
# ('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN),
# ('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING),
# ('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING),
# ('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING),
# ('on_error', signal.ERROR_LOGGED),
# ('on_warning', signal.WARNING_LOGGED),
])
PRIORITY_MAP = OrderedDict([
('very_fast_', 20),
('fast_', 10),
('normal_', 0),
('slow_', -10),
('very_slow_', -20),
])
installed = []
def is_installed(instrument):
if isinstance(instrument, Instrument):
if instrument in installed:
return True
if instrument.name in [i.name for i in installed]:
return True
elif isinstance(instrument, type):
if instrument in [i.__class__ for i in installed]:
return True
else: # assume string
if identifier(instrument) in [identifier(i.name) for i in installed]:
return True
return False
def is_enabled(instrument):
if isinstance(instrument, Instrument) or isinstance(instrument, type):
name = instrument.name
else: # assume string
name = instrument
try:
installed_instrument = get_instrument(name)
return installed_instrument.is_enabled
except ValueError:
return False
failures_detected = False
def reset_failures():
global failures_detected # pylint: disable=W0603
failures_detected = False
def check_failures():
result = failures_detected
reset_failures()
return result
class ManagedCallback(object):
"""
This wraps instruments' callbacks to ensure that errors do interfer
with run execution.
"""
def __init__(self, instrument, callback):
self.instrument = instrument
self.callback = callback
def __call__(self, context):
if self.instrument.is_enabled:
try:
self.callback(context)
except (KeyboardInterrupt, TargetNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception as e: # pylint: disable=W0703
logger.error('Error in insturment {}'.format(self.instrument.name))
global failures_detected # pylint: disable=W0603
failures_detected = True
if isinstance(e, WAError):
logger.error(e)
else:
tb = get_traceback()
logger.error(tb)
logger.error('{}({})'.format(e.__class__.__name__, e))
if not context.current_iteration:
# Error occureed outside of an iteration (most likely
# during intial setup or teardown). Since this would affect
# the rest of the run, mark the instument as broken so that
# it doesn't get re-enabled for subsequent iterations.
self.instrument.is_broken = True
disable(self.instrument)
# Need this to keep track of callbacks, because the dispatcher only keeps
# weak references, so if the callbacks aren't referenced elsewhere, they will
# be deallocated before they've had a chance to be invoked.
_callbacks = []
def install(instrument):
"""
This will look for methods (or any callable members) with specific names
in the instrument and hook them up to the corresponding signals.
:param instrument: Instrument instance to install.
"""
logger.debug('Installing instrument %s.', instrument)
if is_installed(instrument):
raise ValueError('Instrument {} is already installed.'.format(instrument.name))
for attr_name in dir(instrument):
priority = 0
stripped_attr_name = attr_name
for key, value in PRIORITY_MAP.iteritems():
if attr_name.startswith(key):
stripped_attr_name = attr_name[len(key):]
priority = value
break
if stripped_attr_name in SIGNAL_MAP:
attr = getattr(instrument, attr_name)
if not callable(attr):
raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument))
argspec = inspect.getargspec(attr)
arg_num = len(argspec.args)
# Instrument callbacks will be passed exactly two arguments: self
# (the instrument instance to which the callback is bound) and
# context. However, we also allow callbacks to capture the context
# in variable arguments (declared as "*args" in the definition).
if arg_num > 2 or (arg_num < 2 and argspec.varargs is None):
message = '{} must take exactly 2 positional arguments; {} given.'
raise ValueError(message.format(attr_name, arg_num))
logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name])
mc = ManagedCallback(instrument, attr)
_callbacks.append(mc)
signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority)
installed.append(instrument)
def uninstall(instrument):
instrument = get_instrument(instrument)
installed.remove(instrument)
def validate():
for instrument in installed:
instrument.validate()
def get_instrument(inst):
if isinstance(inst, Instrument):
return inst
for installed_inst in installed:
if identifier(installed_inst.name) == identifier(inst):
return installed_inst
raise ValueError('Instrument {} is not installed'.format(inst))
def disable_all():
for instrument in installed:
_disable_instrument(instrument)
def enable_all():
for instrument in installed:
_enable_instrument(instrument)
def enable(to_enable):
if isiterable(to_enable):
for inst in to_enable:
_enable_instrument(inst)
else:
_enable_instrument(to_enable)
def disable(to_disable):
if isiterable(to_disable):
for inst in to_disable:
_disable_instrument(inst)
else:
_disable_instrument(to_disable)
def _enable_instrument(inst):
inst = get_instrument(inst)
if not inst.is_broken:
logger.debug('Enabling instrument {}'.format(inst.name))
inst.is_enabled = True
else:
logger.debug('Not enabling broken instrument {}'.format(inst.name))
def _disable_instrument(inst):
inst = get_instrument(inst)
if inst.is_enabled:
logger.debug('Disabling instrument {}'.format(inst.name))
inst.is_enabled = False
def get_enabled():
return [i for i in installed if i.is_enabled]
def get_disabled():
return [i for i in installed if not i.is_enabled]
class Instrument(Plugin):
"""
Base class for instrumentation implementations.
"""
kind = "instrument"
def __init__(self, target, **kwargs):
super(Instrument, self).__init__(**kwargs)
self.target = target
self.is_enabled = True
self.is_broken = False
def initialize(self, context):
pass
def finalize(self, context):
pass
def __str__(self):
return self.name
def __repr__(self):
return 'Instrument({})'.format(self.name)

362
wa/framework/old_output.py Normal file
View File

@ -0,0 +1,362 @@
import os
import shutil
import logging
import uuid
from copy import copy
from datetime import datetime, timedelta
from wa.framework import signal, log
from wa.framework.configuration.core import merge_config_values
from wa.utils import serializer
from wa.utils.misc import enum_metaclass, ensure_directory_exists as _d
from wa.utils.types import numeric
class Status(object):
__metaclass__ = enum_metaclass('values', return_name=True)
values = [
'NEW',
'PENDING',
'RUNNING',
'COMPLETE',
'OK',
'OKISH',
'NONCRITICAL',
'PARTIAL',
'FAILED',
'ABORTED',
'SKIPPED',
'UNKNOWN',
]
class WAOutput(object):
basename = '.wa-output'
@classmethod
def load(cls, source):
if os.path.isfile(source):
pod = serializer.load(source)
elif os.path.isdir(source):
pod = serializer.load(os.path.join(source, cls.basename))
else:
message = 'Cannot load {} from {}'
raise ValueError(message.format(cls.__name__, source))
return cls.from_pod(pod)
@classmethod
def from_pod(cls, pod):
instance = cls(pod['output_directory'])
instance.status = pod['status']
instance.metrics = [Metric.from_pod(m) for m in pod['metrics']]
instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']]
instance.events = [RunEvent.from_pod(e) for e in pod['events']]
instance.classifiers = pod['classifiers']
return instance
def __init__(self, output_directory):
self.logger = logging.getLogger('output')
self.output_directory = output_directory
self.status = Status.UNKNOWN
self.classifiers = {}
self.metrics = []
self.artifacts = []
self.events = []
def initialize(self, overwrite=False):
if os.path.exists(self.output_directory):
if not overwrite:
raise RuntimeError('"{}" already exists.'.format(self.output_directory))
self.logger.info('Removing existing output directory.')
shutil.rmtree(self.output_directory)
self.logger.debug('Creating output directory {}'.format(self.output_directory))
os.makedirs(self.output_directory)
def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
classifiers = merge_config_values(self.classifiers, classifiers or {})
self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
def get_path(self, subpath):
return os.path.join(self.output_directory, subpath)
def to_pod(self):
return {
'output_directory': self.output_directory,
'status': self.status,
'metrics': [m.to_pod() for m in self.metrics],
'artifacts': [a.to_pod() for a in self.artifacts],
'events': [e.to_pod() for e in self.events],
'classifiers': copy(self.classifiers),
}
def persist(self):
statefile = os.path.join(self.output_directory, self.basename)
with open(statefile, 'wb') as wfh:
serializer.dump(self, wfh)
class RunInfo(object):
default_name_format = 'wa-run-%y%m%d-%H%M%S'
def __init__(self, project=None, project_stage=None, name=None):
self.uuid = uuid.uuid4()
self.project = project
self.project_stage = project_stage
self.name = name or datetime.now().strftime(self.default_name_format)
self.start_time = None
self.end_time = None
self.duration = None
@staticmethod
def from_pod(pod):
instance = RunInfo()
instance.uuid = uuid.UUID(pod['uuid'])
instance.project = pod['project']
instance.project_stage = pod['project_stage']
instance.name = pod['name']
instance.start_time = pod['start_time']
instance.end_time = pod['end_time']
instance.duration = timedelta(seconds=pod['duration'])
return instance
def to_pod(self):
d = copy(self.__dict__)
d['uuid'] = str(self.uuid)
d['duration'] = self.duration.days * 3600 * 24 + self.duration.seconds
return d
class RunOutput(WAOutput):
@property
def info_directory(self):
return _d(os.path.join(self.output_directory, '_info'))
@property
def config_directory(self):
return _d(os.path.join(self.output_directory, '_config'))
@property
def failed_directory(self):
return _d(os.path.join(self.output_directory, '_failed'))
@property
def log_file(self):
return os.path.join(self.output_directory, 'run.log')
@classmethod
def from_pod(cls, pod):
instance = WAOutput.from_pod(pod)
instance.info = RunInfo.from_pod(pod['info'])
instance.jobs = [JobOutput.from_pod(i) for i in pod['jobs']]
instance.failed = [JobOutput.from_pod(i) for i in pod['failed']]
return instance
def __init__(self, output_directory):
super(RunOutput, self).__init__(output_directory)
self.logger = logging.getLogger('output')
self.info = RunInfo()
self.jobs = []
self.failed = []
def initialize(self, overwrite=False):
super(RunOutput, self).initialize(overwrite)
log.add_file(self.log_file)
self.add_artifact('runlog', self.log_file, 'log')
def create_job_output(self, id):
outdir = os.path.join(self.output_directory, id)
job_output = JobOutput(outdir)
self.jobs.append(job_output)
return job_output
def move_failed(self, job_output):
basename = os.path.basename(job_output.output_directory)
i = 1
dest = os.path.join(self.failed_directory, basename + '-{}'.format(i))
while os.path.exists(dest):
i += 1
dest = '{}-{}'.format(dest[:-2], i)
shutil.move(job_output.output_directory, dest)
def to_pod(self):
pod = super(RunOutput, self).to_pod()
pod['info'] = self.info.to_pod()
pod['jobs'] = [i.to_pod() for i in self.jobs]
pod['failed'] = [i.to_pod() for i in self.failed]
return pod
class JobOutput(WAOutput):
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
class Artifact(object):
"""
This is an artifact generated during execution/post-processing of a workload.
Unlike metrics, this represents an actual artifact, such as a file, generated.
This may be "result", such as trace, or it could be "meta data" such as logs.
These are distinguished using the ``kind`` attribute, which also helps WA decide
how it should be handled. Currently supported kinds are:
:log: A log file. Not part of "results" as such but contains information about the
run/workload execution that be useful for diagnostics/meta analysis.
:meta: A file containing metadata. This is not part of "results", but contains
information that may be necessary to reproduce the results (contrast with
``log`` artifacts which are *not* necessary).
:data: This file contains new data, not available otherwise and should be considered
part of the "results" generated by WA. Most traces would fall into this category.
:export: Exported version of results or some other artifact. This signifies that
this artifact does not contain any new data that is not available
elsewhere and that it may be safely discarded without losing information.
:raw: Signifies that this is a raw dump/log that is normally processed to extract
useful information and is then discarded. In a sense, it is the opposite of
``export``, but in general may also be discarded.
.. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on
how important it is to preserve this file, e.g. when archiving, vs
how much space it takes up. Unlike ``export`` artifacts which are
(almost) always ignored by other exporters as that would never result
in data loss, ``raw`` files *may* be processed by exporters if they
decided that the risk of losing potentially (though unlikely) useful
data is greater than the time/space cost of handling the artifact (e.g.
a database uploader may choose to ignore ``raw`` artifacts, where as a
network filer archiver may choose to archive them).
.. note: The kind parameter is intended to represent the logical function of a particular
artifact, not it's intended means of processing -- this is left entirely up to the
result processors.
"""
RUN = 'run'
ITERATION = 'iteration'
valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
@staticmethod
def from_pod(pod):
return Artifact(**pod)
def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
""""
:param name: Name that uniquely identifies this artifact.
:param path: The *relative* path of the artifact. Depending on the ``level``
must be either relative to the run or iteration output directory.
Note: this path *must* be delimited using ``/`` irrespective of the
operating system.
:param kind: The type of the artifact this is (e.g. log file, result, etc.) this
will be used a hit to result processors. This must be one of ``'log'``,
``'meta'``, ``'data'``, ``'export'``, ``'raw'``.
:param level: The level at which the artifact will be generated. Must be either
``'iteration'`` or ``'run'``.
:param mandatory: Boolean value indicating whether this artifact must be present
at the end of result processing for its level.
:param description: A free-form description of what this artifact is.
"""
if kind not in self.valid_kinds:
raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds))
self.name = name
self.path = path.replace('/', os.sep) if path is not None else path
self.kind = kind
self.level = level
self.mandatory = mandatory
self.description = description
def exists(self, context):
"""Returns ``True`` if artifact exists within the specified context, and
``False`` otherwise."""
fullpath = os.path.join(context.output_directory, self.path)
return os.path.exists(fullpath)
def to_pod(self):
return copy(self.__dict__)
class RunEvent(object):
"""
An event that occured during a run.
"""
@staticmethod
def from_pod(pod):
instance = RunEvent(pod['message'])
instance.timestamp = pod['timestamp']
return instance
def __init__(self, message):
self.timestamp = datetime.utcnow()
self.message = message
def to_pod(self):
return copy(self.__dict__)
def __str__(self):
return '{} {}'.format(self.timestamp, self.message)
__repr__ = __str__
class Metric(object):
"""
This is a single metric collected from executing a workload.
:param name: the name of the metric. Uniquely identifies the metric
within the results.
:param value: The numerical value of the metric for this execution of
a workload. This can be either an int or a float.
:param units: Units for the collected value. Can be None if the value
has no units (e.g. it's a count or a standardised score).
:param lower_is_better: Boolean flag indicating where lower values are
better than higher ones. Defaults to False.
:param classifiers: A set of key-value pairs to further classify this metric
beyond current iteration (e.g. this can be used to identify
sub-tests).
"""
@staticmethod
def from_pod(pod):
return Metric(**pod)
def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None):
self.name = name
self.value = numeric(value)
self.units = units
self.lower_is_better = lower_is_better
self.classifiers = classifiers or {}
def to_pod(self):
return copy(self.__dict__)
def __str__(self):
result = '{}: {}'.format(self.name, self.value)
if self.units:
result += ' ' + self.units
result += ' ({})'.format('-' if self.lower_is_better else '+')
return '<{}>'.format(result)
__repr__ = __str__
def _check_artifact_path(path, rootpath):
if path.startswith(rootpath):
return os.path.abspath(path)
rootpath = os.path.abspath(rootpath)
full_path = os.path.join(rootpath, path)
if not os.path.isfile(full_path):
raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
return full_path

View File

@ -1,362 +1,188 @@
import logging
import os
import shutil
import logging
import string
import sys
import uuid
from copy import copy
from datetime import datetime, timedelta
from wa.framework import signal, log
from wa.framework.configuration.core import merge_config_values
from wa.utils import serializer
from wa.utils.misc import enum_metaclass, ensure_directory_exists as _d
from wa.utils.types import numeric
from wlauto.core.configuration.configuration import JobSpec
from wlauto.core.configuration.manager import ConfigManager
from wlauto.core.device_manager import TargetInfo
from wlauto.utils.misc import touch
from wlauto.utils.serializer import write_pod, read_pod
class Status(object):
logger = logging.getLogger('output')
__metaclass__ = enum_metaclass('values', return_name=True)
values = [
'NEW',
'PENDING',
'RUNNING',
'COMPLETE',
'OK',
'OKISH',
'NONCRITICAL',
'PARTIAL',
'FAILED',
'ABORTED',
'SKIPPED',
'UNKNOWN',
]
class WAOutput(object):
basename = '.wa-output'
@classmethod
def load(cls, source):
if os.path.isfile(source):
pod = serializer.load(source)
elif os.path.isdir(source):
pod = serializer.load(os.path.join(source, cls.basename))
else:
message = 'Cannot load {} from {}'
raise ValueError(message.format(cls.__name__, source))
return cls.from_pod(pod)
@classmethod
def from_pod(cls, pod):
instance = cls(pod['output_directory'])
instance.status = pod['status']
instance.metrics = [Metric.from_pod(m) for m in pod['metrics']]
instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']]
instance.events = [RunEvent.from_pod(e) for e in pod['events']]
instance.classifiers = pod['classifiers']
return instance
def __init__(self, output_directory):
self.logger = logging.getLogger('output')
self.output_directory = output_directory
self.status = Status.UNKNOWN
self.classifiers = {}
self.metrics = []
self.artifacts = []
self.events = []
def initialize(self, overwrite=False):
if os.path.exists(self.output_directory):
if not overwrite:
raise RuntimeError('"{}" already exists.'.format(self.output_directory))
self.logger.info('Removing existing output directory.')
shutil.rmtree(self.output_directory)
self.logger.debug('Creating output directory {}'.format(self.output_directory))
os.makedirs(self.output_directory)
def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
classifiers = merge_config_values(self.classifiers, classifiers or {})
self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
def get_path(self, subpath):
return os.path.join(self.output_directory, subpath)
def to_pod(self):
return {
'output_directory': self.output_directory,
'status': self.status,
'metrics': [m.to_pod() for m in self.metrics],
'artifacts': [a.to_pod() for a in self.artifacts],
'events': [e.to_pod() for e in self.events],
'classifiers': copy(self.classifiers),
}
def persist(self):
statefile = os.path.join(self.output_directory, self.basename)
with open(statefile, 'wb') as wfh:
serializer.dump(self, wfh)
class RunInfo(object):
"""
Information about the current run, such as its unique ID, run
time, etc.
default_name_format = 'wa-run-%y%m%d-%H%M%S'
"""
@staticmethod
def from_pod(pod):
uid = pod.pop('uuid')
if uid is not None:
uid = uuid.UUID(uid)
instance = RunInfo(**pod)
instance.uuid = uid
return instance
def __init__(self, project=None, project_stage=None, name=None):
def __init__(self, run_name=None, project=None, project_stage=None,
start_time=None, end_time=None, duration=None):
self.uuid = uuid.uuid4()
self.project = project
self.project_stage = project_stage
self.name = name or datetime.now().strftime(self.default_name_format)
self.run_name = None
self.project = None
self.project_stage = None
self.start_time = None
self.end_time = None
self.duration = None
@staticmethod
def from_pod(pod):
instance = RunInfo()
instance.uuid = uuid.UUID(pod['uuid'])
instance.project = pod['project']
instance.project_stage = pod['project_stage']
instance.name = pod['name']
instance.start_time = pod['start_time']
instance.end_time = pod['end_time']
instance.duration = timedelta(seconds=pod['duration'])
return instance
def to_pod(self):
d = copy(self.__dict__)
d['uuid'] = str(self.uuid)
d['duration'] = self.duration.days * 3600 * 24 + self.duration.seconds
return d
class RunOutput(WAOutput):
@property
def info_directory(self):
return _d(os.path.join(self.output_directory, '_info'))
@property
def config_directory(self):
return _d(os.path.join(self.output_directory, '_config'))
@property
def failed_directory(self):
return _d(os.path.join(self.output_directory, '_failed'))
@property
def log_file(self):
return os.path.join(self.output_directory, 'run.log')
@classmethod
def from_pod(cls, pod):
instance = WAOutput.from_pod(pod)
instance.info = RunInfo.from_pod(pod['info'])
instance.jobs = [JobOutput.from_pod(i) for i in pod['jobs']]
instance.failed = [JobOutput.from_pod(i) for i in pod['failed']]
return instance
def __init__(self, output_directory):
super(RunOutput, self).__init__(output_directory)
self.logger = logging.getLogger('output')
self.info = RunInfo()
self.jobs = []
self.failed = []
def initialize(self, overwrite=False):
super(RunOutput, self).initialize(overwrite)
log.add_file(self.log_file)
self.add_artifact('runlog', self.log_file, 'log')
def create_job_output(self, id):
outdir = os.path.join(self.output_directory, id)
job_output = JobOutput(outdir)
self.jobs.append(job_output)
return job_output
def move_failed(self, job_output):
basename = os.path.basename(job_output.output_directory)
i = 1
dest = os.path.join(self.failed_directory, basename + '-{}'.format(i))
while os.path.exists(dest):
i += 1
dest = '{}-{}'.format(dest[:-2], i)
shutil.move(job_output.output_directory, dest)
def to_pod(self):
pod = super(RunOutput, self).to_pod()
pod['info'] = self.info.to_pod()
pod['jobs'] = [i.to_pod() for i in self.jobs]
pod['failed'] = [i.to_pod() for i in self.failed]
return pod
class JobOutput(WAOutput):
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
class Artifact(object):
class RunState(object):
"""
This is an artifact generated during execution/post-processing of a workload.
Unlike metrics, this represents an actual artifact, such as a file, generated.
This may be "result", such as trace, or it could be "meta data" such as logs.
These are distinguished using the ``kind`` attribute, which also helps WA decide
how it should be handled. Currently supported kinds are:
:log: A log file. Not part of "results" as such but contains information about the
run/workload execution that be useful for diagnostics/meta analysis.
:meta: A file containing metadata. This is not part of "results", but contains
information that may be necessary to reproduce the results (contrast with
``log`` artifacts which are *not* necessary).
:data: This file contains new data, not available otherwise and should be considered
part of the "results" generated by WA. Most traces would fall into this category.
:export: Exported version of results or some other artifact. This signifies that
this artifact does not contain any new data that is not available
elsewhere and that it may be safely discarded without losing information.
:raw: Signifies that this is a raw dump/log that is normally processed to extract
useful information and is then discarded. In a sense, it is the opposite of
``export``, but in general may also be discarded.
.. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on
how important it is to preserve this file, e.g. when archiving, vs
how much space it takes up. Unlike ``export`` artifacts which are
(almost) always ignored by other exporters as that would never result
in data loss, ``raw`` files *may* be processed by exporters if they
decided that the risk of losing potentially (though unlikely) useful
data is greater than the time/space cost of handling the artifact (e.g.
a database uploader may choose to ignore ``raw`` artifacts, where as a
network filer archiver may choose to archive them).
.. note: The kind parameter is intended to represent the logical function of a particular
artifact, not it's intended means of processing -- this is left entirely up to the
result processors.
Represents the state of a WA run.
"""
RUN = 'run'
ITERATION = 'iteration'
valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
@staticmethod
def from_pod(pod):
return Artifact(**pod)
return RunState()
def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
""""
:param name: Name that uniquely identifies this artifact.
:param path: The *relative* path of the artifact. Depending on the ``level``
must be either relative to the run or iteration output directory.
Note: this path *must* be delimited using ``/`` irrespective of the
operating system.
:param kind: The type of the artifact this is (e.g. log file, result, etc.) this
will be used a hit to result processors. This must be one of ``'log'``,
``'meta'``, ``'data'``, ``'export'``, ``'raw'``.
:param level: The level at which the artifact will be generated. Must be either
``'iteration'`` or ``'run'``.
:param mandatory: Boolean value indicating whether this artifact must be present
at the end of result processing for its level.
:param description: A free-form description of what this artifact is.
"""
if kind not in self.valid_kinds:
raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds))
self.name = name
self.path = path.replace('/', os.sep) if path is not None else path
self.kind = kind
self.level = level
self.mandatory = mandatory
self.description = description
def exists(self, context):
"""Returns ``True`` if artifact exists within the specified context, and
``False`` otherwise."""
fullpath = os.path.join(context.output_directory, self.path)
return os.path.exists(fullpath)
def __init__(self):
pass
def to_pod(self):
return copy(self.__dict__)
return {}
class RunEvent(object):
"""
An event that occured during a run.
class RunOutput(object):
"""
@property
def logfile(self):
return os.path.join(self.basepath, 'run.log')
@staticmethod
def from_pod(pod):
instance = RunEvent(pod['message'])
instance.timestamp = pod['timestamp']
return instance
@property
def metadir(self):
return os.path.join(self.basepath, '__meta')
def __init__(self, message):
self.timestamp = datetime.utcnow()
self.message = message
@property
def infofile(self):
return os.path.join(self.metadir, 'run_info.json')
def to_pod(self):
return copy(self.__dict__)
@property
def statefile(self):
return os.path.join(self.basepath, '.run_state.json')
def __str__(self):
return '{} {}'.format(self.timestamp, self.message)
@property
def configfile(self):
return os.path.join(self.metadir, 'config.json')
__repr__ = __str__
@property
def targetfile(self):
return os.path.join(self.metadir, 'target_info.json')
@property
def jobsfile(self):
return os.path.join(self.metadir, 'jobs.json')
@property
def raw_config_dir(self):
return os.path.join(self.metadir, 'raw_config')
def __init__(self, path):
self.basepath = path
self.info = None
self.state = None
if (not os.path.isfile(self.statefile) or
not os.path.isfile(self.infofile)):
msg = '"{}" does not exist or is not a valid WA output directory.'
raise ValueError(msg.format(self.basepath))
self.reload()
def reload(self):
self.info = RunInfo.from_pod(read_pod(self.infofile))
self.state = RunState.from_pod(read_pod(self.statefile))
def write_info(self):
write_pod(self.info.to_pod(), self.infofile)
def write_state(self):
write_pod(self.state.to_pod(), self.statefile)
def write_config(self, config):
write_pod(config.to_pod(), self.configfile)
def read_config(self):
if not os.path.isfile(self.configfile):
return None
return ConfigManager.from_pod(read_pod(self.configfile))
def write_target_info(self, ti):
write_pod(ti.to_pod(), self.targetfile)
def read_config(self):
if not os.path.isfile(self.targetfile):
return None
return TargetInfo.from_pod(read_pod(self.targetfile))
def write_job_specs(self, job_specs):
job_specs[0].to_pod()
js_pod = {'jobs': [js.to_pod() for js in job_specs]}
write_pod(js_pod, self.jobsfile)
def read_job_specs(self):
if not os.path.isfile(self.jobsfile):
return None
pod = read_pod(self.jobsfile)
return [JobSpec.from_pod(jp) for jp in pod['jobs']]
class Metric(object):
"""
This is a single metric collected from executing a workload.
def init_wa_output(path, wa_state, force=False):
if os.path.exists(path):
if force:
logger.info('Removing existing output directory.')
shutil.rmtree(os.path.abspath(path))
else:
raise RuntimeError('path exists: {}'.format(path))
:param name: the name of the metric. Uniquely identifies the metric
within the results.
:param value: The numerical value of the metric for this execution of
a workload. This can be either an int or a float.
:param units: Units for the collected value. Can be None if the value
has no units (e.g. it's a count or a standardised score).
:param lower_is_better: Boolean flag indicating where lower values are
better than higher ones. Defaults to False.
:param classifiers: A set of key-value pairs to further classify this metric
beyond current iteration (e.g. this can be used to identify
sub-tests).
logger.info('Creating output directory.')
os.makedirs(path)
meta_dir = os.path.join(path, '__meta')
os.makedirs(meta_dir)
_save_raw_config(meta_dir, wa_state)
touch(os.path.join(path, 'run.log'))
"""
info = RunInfo(
run_name=wa_state.run_config.run_name,
project=wa_state.run_config.project,
project_stage=wa_state.run_config.project_stage,
)
write_pod(info.to_pod(), os.path.join(meta_dir, 'run_info.json'))
with open(os.path.join(path, '.run_state.json'), 'w') as wfh:
wfh.write('{}')
@staticmethod
def from_pod(pod):
return Metric(**pod)
def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None):
self.name = name
self.value = numeric(value)
self.units = units
self.lower_is_better = lower_is_better
self.classifiers = classifiers or {}
def to_pod(self):
return copy(self.__dict__)
def __str__(self):
result = '{}: {}'.format(self.name, self.value)
if self.units:
result += ' ' + self.units
result += ' ({})'.format('-' if self.lower_is_better else '+')
return '<{}>'.format(result)
__repr__ = __str__
return RunOutput(path)
def _check_artifact_path(path, rootpath):
if path.startswith(rootpath):
return os.path.abspath(path)
rootpath = os.path.abspath(rootpath)
full_path = os.path.join(rootpath, path)
if not os.path.isfile(full_path):
raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
return full_path
def _save_raw_config(meta_dir, state):
raw_config_dir = os.path.join(meta_dir, 'raw_config')
os.makedirs(raw_config_dir)
for i, source in enumerate(state.loaded_config_sources):
if not os.path.isfile(source):
continue
basename = os.path.basename(source)
dest_path = os.path.join(raw_config_dir, 'cfg{}-{}'.format(i, basename))
shutil.copy(source, dest_path)

View File

@ -21,69 +21,28 @@ import inspect
import imp
import string
import logging
from copy import copy
from itertools import chain
from collections import OrderedDict, defaultdict
from itertools import chain
from copy import copy
from wa.framework import log
from wa.framework.exception import ValidationError, ConfigError, NotFoundError, PluginLoaderError
from wa.framework.configuration.core import ConfigurationPoint, ConfigurationPointCollection
from wa.utils.misc import isiterable, ensure_directory_exists as _d, get_article
from wa.utils.misc import walk_modules, get_article
from wa.utils.types import identifier, integer, boolean, caseless_string
from wa.framework.configuration.core import settings, ConfigurationPoint as Parameter
from wa.framework.exception import (NotFoundError, PluginLoaderError, ValidationError,
ConfigError, HostError)
from wa.utils import log
from wa.utils.misc import (ensure_directory_exists as _d, walk_modules, load_class,
merge_dicts_simple, get_article)
from wa.utils.types import identifier, boolean
class Parameter(ConfigurationPoint):
is_runtime = False
def __init__(self, name,
kind=None,
mandatory=None,
default=None,
override=False,
allowed_values=None,
description=None,
constraint=None,
convert_types=True,
global_alias=None,
reconfigurable=True):
"""
:param global_alias: This is an alternative alias for this parameter,
unlike the name, this alias will not be
namespaced under the owning extension's name
(hence the global part). This is introduced
primarily for backward compatibility -- so that
old extension settings names still work. This
should not be used for new parameters.
:param reconfigurable: This indicated whether this parameter may be
reconfigured during the run (e.g. between different
iterations). This determines where in run configruation
this parameter may appear.
For other parameters, see docstring for
``wa.framework.configuration.core.ConfigurationPoint``
"""
super(Parameter, self).__init__(name, kind, mandatory,
default, override, allowed_values,
description, constraint,
convert_types)
self.global_alias = global_alias
self.reconfigurable = reconfigurable
def __repr__(self):
d = copy(self.__dict__)
del d['description']
return 'Param({})'.format(d)
MODNAME_TRANS = string.maketrans(':/\\.', '____')
class PluginAliasCollection(object):
class AttributeCollection(object):
"""
Accumulator for extension attribute objects (such as Parameters). This will
replace any class member list accumulating such attributes through the magic of
metaprogramming\ [*]_.
Accumulator for plugin attribute objects (such as Parameters or Artifacts).
This will replace any class member list accumulating such attributes
through the magic of metaprogramming\ [*]_.
.. [*] which is totally safe and not going backfire in any way...
@ -93,7 +52,8 @@ class PluginAliasCollection(object):
def values(self):
return self._attrs.values()
def __init__(self):
def __init__(self, attrcls):
self._attrcls = attrcls
self._attrs = OrderedDict()
def add(self, p):
@ -104,6 +64,8 @@ class PluginAliasCollection(object):
for a, v in p.__dict__.iteritems():
if v is not None:
setattr(newp, a, v)
if not hasattr(newp, "_overridden"):
newp._overridden = p._owner
self._attrs[p.name] = newp
else:
# Duplicate attribute condition is check elsewhere.
@ -119,13 +81,19 @@ class PluginAliasCollection(object):
__repr__ = __str__
def _to_attrcls(self, p):
if isinstance(p, tuple) or isinstance(p, list):
# must be in the form (name, {param: value, ...})
p = Alias(p[1], **p[1])
elif not isinstance(p, Alias):
old_owner = getattr(p, "_owner", None)
if isinstance(p, basestring):
p = self._attrcls(p)
elif isinstance(p, tuple) or isinstance(p, list):
p = self._attrcls(*p)
elif isinstance(p, dict):
p = self._attrcls(**p)
elif not isinstance(p, self._attrcls):
raise ValueError('Invalid parameter value: {}'.format(p))
if p.name in self._attrs:
if (p.name in self._attrs and not p.override and
p.name != 'modules'): # TODO: HACK due to "diamond dependecy" in workloads...
raise ValueError('Attribute {} has already been defined.'.format(p.name))
p._owner = old_owner
return p
def __iadd__(self, other):
@ -146,83 +114,209 @@ class PluginAliasCollection(object):
return len(self._attrs)
class AliasCollection(AttributeCollection):
def __init__(self):
super(AliasCollection, self).__init__(Alias)
def _to_attrcls(self, p):
if isinstance(p, tuple) or isinstance(p, list):
# must be in the form (name, {param: value, ...})
p = self._attrcls(p[1], **p[1])
elif not isinstance(p, self._attrcls):
raise ValueError('Invalid parameter value: {}'.format(p))
if p.name in self._attrs:
raise ValueError('Attribute {} has already been defined.'.format(p.name))
return p
class ListCollection(list):
def __init__(self, attrcls): # pylint: disable=unused-argument
super(ListCollection, self).__init__()
class Artifact(object):
"""
This is an artifact generated during execution/post-processing of a workload.
Unlike metrics, this represents an actual artifact, such as a file, generated.
This may be "result", such as trace, or it could be "meta data" such as logs.
These are distinguished using the ``kind`` attribute, which also helps WA decide
how it should be handled. Currently supported kinds are:
:log: A log file. Not part of "results" as such but contains information
about the run/workload execution that be useful for diagnostics/meta
analysis.
:meta: A file containing metadata. This is not part of "results", but contains
information that may be necessary to reproduce the results
(contrast with ``log`` artifacts which are *not* necessary).
:data: This file contains new data, not available otherwise and should be
considered part of the "results" generated by WA. Most traces
would fall into this category.
:export: Exported version of results or some other artifact. This signifies
that this artifact does not contain any new data that is not
available elsewhere and that it may be safely discarded
without losing information.
:raw: Signifies that this is a raw dump/log that is normally processed to
extract useful information and is then discarded. In a sense, it
is the opposite of ``export``, but in general may also be
discarded.
.. note:: whether a file is marked as ``log``/``data`` or ``raw``
depends on how important it is to preserve this file,
e.g. when archiving, vs how much space it takes up.
Unlike ``export`` artifacts which are (almost) always
ignored by other exporters as that would never result
in data loss, ``raw`` files *may* be processed by
exporters if they decided that the risk of losing
potentially (though unlikely) useful data is greater
than the time/space cost of handling the artifact (e.g.
a database uploader may choose to ignore ``raw``
artifacts, where as a network filer archiver may choose
to archive them).
.. note: The kind parameter is intended to represent the logical function of
a particular artifact, not its intended means of processing --
this is left entirely up to the result processors.
"""
RUN = 'run'
ITERATION = 'iteration'
valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
""""
:param name: Name that uniquely identifies this artifact.
:param path: The *relative* path of the artifact. Depending on the ``level``
must be either relative to the run or iteration output directory.
.. note:: this path *must* be delimited using ``/``
irrespective of the operating system.
:param kind: The type of the artifact this is (e.g. log file, result, etc.)
this will be used a hit to result processors. This must be
one of ``'log'``, ``'meta'``, ``'data'``, ``'export'``,
``'raw'``.
:param level: The level at which the artifact will be generated. Must be
either ``'iteration'`` or ``'run'``.
:param mandatory: Boolean value indicating whether this artifact must be
present at the end of result processing for its level.
:param description: A free-form description of what this artifact is.
"""
if kind not in self.valid_kinds:
msg = 'Invalid Artifact kind: {}; must be in {}'
raise ValueError(msg.format(kind, self.valid_kinds))
self.name = name
self.path = path.replace('/', os.sep) if path is not None else path
self.kind = kind
self.level = level
self.mandatory = mandatory
self.description = description
def exists(self, context):
"""
Returns ``True`` if artifact exists within the specified context, and
``False`` otherwise.
"""
fullpath = os.path.join(context.output_directory, self.path)
return os.path.exists(fullpath)
def to_dict(self):
return copy(self.__dict__)
class Alias(object):
"""
This represents a configuration alias for an extension, mapping an alternative name to
a set of parameter values, effectively providing an alternative set of default values.
This represents a configuration alias for an plugin, mapping an alternative
name to a set of parameter values, effectively providing an alternative set
of default values.
"""
def __init__(self, name, **kwargs):
self.name = name
self.parameters = kwargs
self.params = kwargs
self.plugin_name = None # gets set by the MetaClass
def validate(self, plugin):
plugin_params = set(p.name for p in plugin.parameters)
for param in self.parameters:
if param not in plugin_params:
def validate(self, ext):
ext_params = set(p.name for p in ext.parameters)
for param in self.params:
if param not in ext_params:
# Raising config error because aliases might have come through
# the config.
msg = 'Parameter {} (defined in alias {}) is invalid for {}'
raise ValueError(msg.format(param, self.name, plugin.name))
raise ConfigError(msg.format(param, self.name, ext.name))
class PluginMeta(type):
"""
This basically adds some magic to extensions to make implementing new extensions, such as
workloads less complicated.
This basically adds some magic to plugins to make implementing new plugins,
such as workloads less complicated.
It ensures that certain class attributes (specified by the ``to_propagate``
attribute of the metaclass) get propagated down the inheritance hierarchy. The assumption
is that the values of the attributes specified in the class are iterable; if that is not met,
Bad Things(tm) will happen.
attribute of the metaclass) get propagated down the inheritance hierarchy.
The assumption is that the values of the attributes specified in the class
are iterable; if that is not met, Bad Things (tm) will happen.
This also provides "virtual" method implementations. The ``super``'s version of these
methods (specified by the ``virtual_methods`` attribute of the metaclass) will be
automatically invoked.
This also provides virtual method implementation, similar to those in
C-derived OO languages, and alias specifications.
"""
to_propagate = [
('parameters', ConfigurationPointCollection),
('parameters', Parameter, AttributeCollection),
('artifacts', Artifact, AttributeCollection),
('core_modules', str, ListCollection),
]
#virtual_methods = ['validate', 'initialize', 'finalize']
virtual_methods = []
virtual_methods = ['validate', 'initialize', 'finalize']
global_virtuals = ['initialize', 'finalize']
def __new__(mcs, clsname, bases, attrs):
mcs._propagate_attributes(bases, attrs)
mcs._propagate_attributes(bases, attrs, clsname)
cls = type.__new__(mcs, clsname, bases, attrs)
mcs._setup_aliases(cls)
mcs._implement_virtual(cls, bases)
return cls
@classmethod
def _propagate_attributes(mcs, bases, attrs):
def _propagate_attributes(mcs, bases, attrs, clsname):
"""
For attributes specified by to_propagate, their values will be a union of
that specified for cls and it's bases (cls values overriding those of bases
that specified for cls and its bases (cls values overriding those of bases
in case of conflicts).
"""
for prop_attr, attr_collector_cls in mcs.to_propagate:
for prop_attr, attr_cls, attr_collector_cls in mcs.to_propagate:
should_propagate = False
propagated = attr_collector_cls()
propagated = attr_collector_cls(attr_cls)
for base in bases:
if hasattr(base, prop_attr):
propagated += getattr(base, prop_attr) or []
should_propagate = True
if prop_attr in attrs:
propagated += attrs[prop_attr] or []
pattrs = attrs[prop_attr] or []
for pa in pattrs:
if not isinstance(pa, basestring):
pa._owner = clsname
propagated += pattrs
should_propagate = True
if should_propagate:
for p in propagated:
override = bool(getattr(p, "override", None))
overridden = bool(getattr(p, "_overridden", None))
if override != overridden:
msg = "Overriding non existing parameter '{}' inside '{}'"
raise ValueError(msg.format(p.name, p._owner))
attrs[prop_attr] = propagated
@classmethod
def _setup_aliases(mcs, cls):
if hasattr(cls, 'aliases'):
aliases, cls.aliases = cls.aliases, PluginAliasCollection()
aliases, cls.aliases = cls.aliases, AliasCollection()
for alias in aliases:
if isinstance(alias, basestring):
alias = Alias(alias)
@ -248,7 +342,8 @@ class PluginMeta(type):
for vmname in mcs.virtual_methods:
clsmethod = getattr(cls, vmname, None)
if clsmethod:
basemethods = [getattr(b, vmname) for b in bases if hasattr(b, vmname)]
basemethods = [getattr(b, vmname) for b in bases
if hasattr(b, vmname)]
methods[vmname] = [bm for bm in basemethods if bm != clsmethod]
methods[vmname].append(clsmethod)
@ -261,7 +356,12 @@ class PluginMeta(type):
def wrapper(self, *args, **kwargs):
for dm in methods[name__]:
dm(self, *args, **kwargs)
if name__ in mcs.global_virtuals:
if dm not in called_globals:
dm(self, *args, **kwargs)
called_globals.add(dm)
else:
dm(self, *args, **kwargs)
return wrapper
setattr(cls, vmname, generate_method_wrapper(vmname))
@ -269,35 +369,52 @@ class PluginMeta(type):
class Plugin(object):
"""
Base class for all WA plugins.
A plugin extends the functionality of WA in some way. Plugins are discovered
and loaded dynamically by the plugin loader upon invocation of WA scripts.
Adding an extension is a matter of placing a class that implements an appropriate
interface somewhere it would be discovered by the loader. That "somewhere" is
typically one of the plugin subdirectories under ``~/.workload_automation/``.
Base class for all WA plugins. An plugin is basically a plug-in. It
extends the functionality of WA in some way. Plugins are discovered and
loaded dynamically by the plugin loader upon invocation of WA scripts.
Adding an plugin is a matter of placing a class that implements an
appropriate interface somewhere it would be discovered by the loader. That
"somewhere" is typically one of the plugin subdirectories under
``~/.workload_automation/``.
"""
__metaclass__ = PluginMeta
name = None
kind = None
parameters = []
name = None
parameters = [
Parameter('modules', kind=list,
description="""
Lists the modules to be loaded by this plugin. A module is a
plug-in that further extends functionality of an plugin.
"""),
]
artifacts = []
aliases = []
core_modules = []
@classmethod
def get_default_config(cls):
return {p.name: p.default for p in cls.parameters}
@classmethod
def get_parameter(cls, name):
for param in cls.parameters:
if param.name == name or name in param.aliases:
return param
@property
def dependencies_directory(self):
return _d(os.path.join(settings.dependencies_directory, self.name))
@property
def _classname(self):
return self.__class__.__name__
def __init__(self, **kwargs):
self.logger = logging.getLogger(self.name)
self.logger = logging.getLogger(self._classname)
self._modules = []
self.capabilities = getattr(self.__class__, 'capabilities', [])
self.update_config(**kwargs)
for param in self.parameters:
param.set_value(self, kwargs.get(param.name))
for key in kwargs:
if key not in self.parameters:
message = 'Unexpected parameter "{}" for {}'
raise ConfigError(message.format(key, self.name))
def get_config(self):
"""
@ -309,35 +426,21 @@ class Plugin(object):
config[param.name] = getattr(self, param.name, None)
return config
def update_config(self, **kwargs):
"""
Updates current configuration (i.e. parameter values) of this plugin.
"""
for param in self.parameters:
param.set_value(self, kwargs.get(param.name))
for key in kwargs:
if key not in self.parameters:
message = 'Unexpected parameter "{}" for {}'
raise ConfigError(message.format(key, self.name))
def validate(self):
"""
Perform basic validation to ensure that this extension is capable of running.
This is intended as an early check to ensure the extension has not been mis-configured,
rather than a comprehensive check (that may, e.g., require access to the execution
context).
Perform basic validation to ensure that this plugin is capable of
running. This is intended as an early check to ensure the plugin has
not been mis-configured, rather than a comprehensive check (that may,
e.g., require access to the execution context).
This method may also be used to enforce (i.e. set as well as check) inter-parameter
constraints for the extension (e.g. if valid values for parameter A depend on the value
of parameter B -- something that is not possible to enforce using ``Parameter``\ 's
``constraint`` attribute.
This method may also be used to enforce (i.e. set as well as check)
inter-parameter constraints for the plugin (e.g. if valid values for
parameter A depend on the value of parameter B -- something that is not
possible to enfroce using ``Parameter``\ 's ``constraint`` attribute.
"""
if self.name is None:
raise ValidationError('name not set for {}'.format(self.__class__.__name__))
if self.kind is None:
raise ValidationError('kind not set for {}'.format(self.name))
raise ValidationError('Name not set for {}'.format(self._classname))
for param in self.parameters:
param.validate(self)
@ -347,109 +450,120 @@ class Plugin(object):
def finalize(self, context):
pass
def check_artifacts(self, context, level):
"""
Make sure that all mandatory artifacts have been generated.
"""
for artifact in self.artifacts:
if artifact.level != level or not artifact.mandatory:
continue
fullpath = os.path.join(context.output_directory, artifact.path)
if not os.path.exists(fullpath):
message = 'Mandatory "{}" has not been generated for {}.'
raise ValidationError(message.format(artifact.path, self.name))
def __getattr__(self, name):
if name == '_modules':
raise ValueError('_modules accessed too early!')
for module in self._modules:
if hasattr(module, name):
return getattr(module, name)
raise AttributeError(name)
def load_modules(self, loader):
"""
Load the modules specified by the "modules" Parameter using the
provided loader. A loader can be any object that has an atribute called
"get_module" that implements the following signature::
get_module(name, owner, **kwargs)
and returns an instance of :class:`wlauto.core.plugin.Module`. If the
module with the specified name is not found, the loader must raise an
appropriate exception.
"""
modules = list(reversed(self.core_modules)) +\
list(reversed(self.modules or []))
if not modules:
return
for module_spec in modules:
if not module_spec:
continue
module = self._load_module(loader, module_spec)
self._install_module(module)
def has(self, capability):
"""Check if this extension has the specified capability. The alternative method ``can`` is
identical to this. Which to use is up to the caller depending on what makes semantic sense
in the context of the capability, e.g. ``can('hard_reset')`` vs ``has('active_cooling')``."""
"""
Check if this plugin has the specified capability. The alternative
method ``can`` is identical to this. Which to use is up to the caller
depending on what makes semantic sense in the context of the
capability, e.g. ``can('hard_reset')`` vs ``has('active_cooling')``.
"""
return capability in self.capabilities
can = has
def _load_module(self, loader, module_spec):
if isinstance(module_spec, basestring):
name = module_spec
params = {}
elif isinstance(module_spec, dict):
if len(module_spec) != 1:
msg = 'Invalid module spec: {}; dict must have exctly one key -- '\
'the module name.'
raise ValueError(msg.format(module_spec))
name, params = module_spec.items()[0]
else:
message = 'Invalid module spec: {}; must be a string or a one-key dict.'
raise ValueError(message.format(module_spec))
class TargetedPluginMeta(PluginMeta):
if not isinstance(params, dict):
message = 'Invalid module spec: {}; dict value must also be a dict.'
raise ValueError(message.format(module_spec))
to_propagate = PluginMeta.to_propagate + [
('supported_targets', list),
('supported_platforms', list),
]
virtual_methods = PluginMeta.virtual_methods + [
'validate_on_target',
]
module = loader.get_module(name, owner=self, **params)
module.initialize(None)
return module
def _install_module(self, module):
for capability in module.capabilities:
if capability not in self.capabilities:
self.capabilities.append(capability)
self._modules.append(module)
class TargetedPlugin(Plugin):
"""
A plugin that operates on a target device. These kinds of plugins are created
with a ``devlib.Target`` instance and may only support certain kinds of targets.
A plugin that interacts with a target device.
"""
__metaclass__ = TargetedPluginMeta
suppoted_targets = []
supported_targets = []
supported_platforms = []
@classmethod
def check_compatible(cls, target):
if cls.suppoted_targets:
if target.os not in cls.suppoted_targets:
msg = 'Incompatible target OS "{}" for {}'
raise TargetError(msg.format(target.os, cls.name))
def __init__(self, target, **kwargs):
super(TargetedPlugin, self).__init__(**kwargs)
if self.supported_targets and target.os not in self.supported_targets:
raise TargetError('Plugin {} does not support target {}'.format(self.name, target.name))
if self.supported_platforms and target.platform.name not in self.supported_platforms:
raise TargetError('Plugin {} does not support platform {}'.format(self.name, target.platform))
self.check_compatible(target)
self.target = target
def validate_on_target(self):
"""
This will be invoked once at the beginning of a run after a ``Target``
has been connected and initialized. This is intended for validation
that cannot be performed offline but does not depend on ephemeral
state that is likely to change during the course of a run (validation
against such states should be done during setup of a particular
execution.
"""
pass
class PluginLoaderItem(object):
def __init__(self, ext_tuple):
self.name = ext_tuple.name
self.default_package = ext_tuple.default_package
self.default_path = ext_tuple.default_path
self.cls = load_class(ext_tuple.cls)
class GlobalParameterAlias(object):
"""
Represents a "global alias" for an plugin parameter. A global alias
is specified at the top-level of config rather namespaced under an plugin
name.
Multiple plugins may have parameters with the same global_alias if they are
part of the same inheritance hierarchy and one parameter is an override of the
other. This class keeps track of all such cases in its plugins dict.
"""
def __init__(self, name):
self.name = name
self.plugins = {}
def iteritems(self):
for ext in self.plugins.itervalues():
yield (self.get_param(ext), ext)
def get_param(self, ext):
for param in ext.parameters:
if param.global_alias == self.name:
return param
message = 'Plugin {} does not have a parameter with global alias {}'
raise ValueError(message.format(ext.name, self.name))
def update(self, other_ext):
self._validate_ext(other_ext)
self.plugins[other_ext.name] = other_ext
def _validate_ext(self, other_ext):
other_param = self.get_param(other_ext)
for param, ext in self.iteritems():
if ((not (issubclass(ext, other_ext) or issubclass(other_ext, ext))) and
other_param.kind != param.kind):
message = 'Duplicate global alias {} declared in {} and {} plugins with different types'
raise PluginLoaderError(message.format(self.name, ext.name, other_ext.name))
if not param.name == other_param.name:
message = 'Two params {} in {} and {} in {} both declare global alias {}'
raise PluginLoaderError(message.format(param.name, ext.name,
other_param.name, other_ext.name, self.name))
def __str__(self):
text = 'GlobalAlias({} => {})'
extlist = ', '.join(['{}.{}'.format(e.name, p.name) for p, e in self.iteritems()])
return text.format(self.name, extlist)
MODNAME_TRANS = string.maketrans(':/\\.', '____')
class PluginLoader(object):
"""
@ -461,19 +575,19 @@ class PluginLoader(object):
"""
def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False):
def __init__(self, packages=None, paths=None, ignore_paths=None,
keep_going=False):
"""
params::
:packages: List of packages to load plugins from.
:paths: List of paths to be searched for Python modules containing
WA plugins.
:ignore_paths: List of paths to ignore when search for WA plugins (these would
typically be subdirectories of one or more locations listed in
``paths`` parameter.
:keep_going: Specifies whether to keep going if an error occurs while loading
plugins.
:ignore_paths: List of paths to ignore when search for WA plugins
(these would typically be subdirectories of one or
more locations listed in ``paths`` parameter.
:keep_going: Specifies whether to keep going if an error occurs while
loading plugins.
"""
self.logger = logging.getLogger('pluginloader')
self.keep_going = keep_going
@ -490,6 +604,8 @@ class PluginLoader(object):
def update(self, packages=None, paths=None, ignore_paths=None):
""" Load plugins from the specified paths/packages
without clearing or reloading existing plugin. """
msg = 'Updating from: packages={} paths={}'
self.logger.debug(msg.format(packages, paths))
if packages:
self.packages.extend(packages)
self._discover_from_packages(packages)
@ -505,6 +621,7 @@ class PluginLoader(object):
def reload(self):
""" Clear all discovered items and re-run the discovery. """
self.logger.debug('Reloading')
self.clear()
self._discover_from_packages(self.packages)
self._discover_from_paths(self.paths, self.ignore_paths)
@ -519,15 +636,16 @@ class PluginLoader(object):
try:
return self.plugins[name]
except KeyError:
raise NotFoundError('Plugins {} not found.'.format(name))
raise NotFoundError('plugins {} not found.'.format(name))
if kind not in self.kind_map:
raise ValueError('Unknown plugin type: {}'.format(kind))
store = self.kind_map[kind]
if name not in store:
raise NotFoundError('Plugins {} is not {} {}.'.format(name, get_article(kind), kind))
msg = 'plugins {} is not {} {}.'
raise NotFoundError(msg.format(name, get_article(kind), kind))
return store[name]
def get_plugin(self, name, kind=None, *args, **kwargs):
def get_plugin(self, name=None, kind=None, *args, **kwargs):
"""
Return plugin of the specified kind with the specified name. Any
additional parameters will be passed to the plugin's __init__.
@ -548,7 +666,7 @@ class PluginLoader(object):
"""
real_name, alias_config = self.resolve_alias(name)
base_default_config = self.get_plugin_class(real_name).get_default_config()
return merge_dicts(base_default_config, alias_config, list_duplicates='last', dict_type=OrderedDict)
return merge_dicts_simple(base_default_config, alias_config)
def list_plugins(self, kind=None):
"""
@ -588,7 +706,7 @@ class PluginLoader(object):
return (alias_name, {})
if alias_name in self.aliases:
alias = self.aliases[alias_name]
return (alias.plugin_name, alias.parameters)
return (alias.plugin_name, alias.params)
raise NotFoundError('Could not find plugin or alias "{}"'.format(alias_name))
# Internal methods.
@ -605,41 +723,45 @@ class PluginLoader(object):
loader.get_plugin('foo', kind='device')
"""
error_msg = 'No plugins of type "{}" discovered'
if name.startswith('get_'):
name = name.replace('get_', '', 1)
if name in self.kind_map:
def __wrapper(pname, *args, **kwargs):
return self.get_plugin(pname, name, *args, **kwargs)
return __wrapper
raise NotFoundError(error_msg.format(name))
if name.startswith('list_'):
name = name.replace('list_', '', 1).rstrip('s')
if name in self.kind_map:
def __wrapper(*args, **kwargs):
def __wrapper(*args, **kwargs): # pylint: disable=E0102
return self.list_plugins(name, *args, **kwargs)
return __wrapper
raise NotFoundError(error_msg.format(name))
if name.startswith('has_'):
name = name.replace('has_', '', 1)
if name in self.kind_map:
def __wrapper(pname, *args, **kwargs):
def __wrapper(pname, *args, **kwargs): # pylint: disable=E0102
return self.has_plugin(pname, name, *args, **kwargs)
return __wrapper
raise NotFoundError(error_msg.format(name))
raise AttributeError(name)
def _discover_from_packages(self, packages):
self.logger.debug('Discovering plugins in packages')
try:
for package in packages:
for module in walk_modules(package):
self._discover_in_module(module)
except ImportError as e:
source = getattr(e, 'path', package)
except HostError as e:
message = 'Problem loading plugins from {}: {}'
raise PluginLoaderError(message.format(source, e.message))
raise PluginLoaderError(message.format(e.module, str(e.orig_exc)),
e.exc_info)
def _discover_from_paths(self, paths, ignore_paths):
paths = paths or []
ignore_paths = ignore_paths or []
self.logger.debug('Discovering plugins in paths')
for path in paths:
self.logger.debug('Checking path %s', path)
@ -654,7 +776,7 @@ class PluginLoader(object):
if should_skip:
continue
for fname in files:
if not os.path.splitext(fname)[1].lower() == '.py':
if os.path.splitext(fname)[1].lower() != '.py':
continue
filepath = os.path.join(root, fname)
self._discover_from_file(filepath)
@ -669,10 +791,11 @@ class PluginLoader(object):
self.logger.warning('Failed to load {}'.format(filepath))
self.logger.warning('Got: {}'.format(e))
else:
raise PluginLoaderError('Failed to load {}'.format(filepath), sys.exc_info())
msg = 'Failed to load {}'
raise LoaderError(msg.format(filepath), sys.exc_info())
except Exception as e:
message = 'Problem loading plugins from {}: {}'
raise PluginLoaderError(message.format(filepath, e))
raise LoaderError(message.format(filepath, e))
def _discover_in_module(self, module): # NOQA pylint: disable=too-many-branches
self.logger.debug('Checking module %s', module.__name__)
@ -699,6 +822,7 @@ class PluginLoader(object):
raise e
finally:
log.dedent()
pass
def _add_found_plugin(self, obj):
"""
@ -708,8 +832,9 @@ class PluginLoader(object):
self.logger.debug('Adding %s %s', obj.kind, obj.name)
key = identifier(obj.name.lower())
if key in self.plugins or key in self.aliases:
raise PluginLoaderError('{} "{}" already exists.'.format(obj.kind, obj.name))
# Plugins are tracked both, in a common plugins
msg = '{} "{}" already exists.'
raise PluginLoaderError(msg.format(obj.kind, obj.name))
# plugins are tracked both, in a common plugins
# dict, and in per-plugin kind dict (as retrieving
# plugins by kind is a common use case.
self.plugins[key] = obj
@ -718,17 +843,6 @@ class PluginLoader(object):
for alias in obj.aliases:
alias_id = identifier(alias.name.lower())
if alias_id in self.plugins or alias_id in self.aliases:
raise PluginLoaderError('{} "{}" already exists.'.format(obj.kind, obj.name))
msg = '{} "{}" already exists.'
raise PluginLoaderError(msg.format(obj.kind, obj.name))
self.aliases[alias_id] = alias
# Update global aliases list. If a global alias is already in the list,
# then make sure this plugin is in the same parent/child hierarchy
# as the one already found.
for param in obj.parameters:
if param.global_alias:
if param.global_alias not in self.global_param_aliases:
ga = GlobalParameterAlias(param.global_alias)
ga.update(obj)
self.global_param_aliases[ga.name] = ga
else: # global alias already exists.
self.global_param_aliases[param.global_alias].update(obj)

View File

@ -17,53 +17,73 @@ import sys
class __LoaderWrapper(object):
@property
def kinds(self):
if not self._loader:
self.reset()
return self._loader.kind_map.keys()
@property
def kind_map(self):
if not self._loader:
self.reset()
return self._loader.kind_map
def __init__(self):
self._loader = None
def reset(self):
# These imports cannot be done at top level, because of
# These imports cannot be done at top level, because of
# sys.modules manipulation below
from wa.framework.plugin import PluginLoader
from wa.framework.configuration.core import settings
self._loader = PluginLoader(settings.plugin_packages,
settings.plugin_paths,
settings.plugin_ignore_paths)
[settings.plugins_directory], [])
def update(self, packages=None, paths=None, ignore_paths=None):
if not self._loader: self.reset()
if not self._loader:
self.reset()
self._loader.update(packages, paths, ignore_paths)
def reload(self):
if not self._loader: self.reset()
if not self._loader:
self.reset()
self._loader.reload()
def list_plugins(self, kind=None):
if not self._loader: self.reset()
if not self._loader:
self.reset()
return self._loader.list_plugins(kind)
def has_plugin(self, name, kind=None):
if not self._loader: self.reset()
if not self._loader:
self.reset()
return self._loader.has_plugin(name, kind)
def get_plugin_class(self, name, kind=None):
if not self._loader: self.reset()
return _load.get_plugin_class(name, kind)
if not self._loader:
self.reset()
return self._loader.get_plugin_class(name, kind)
def get_plugin(self, name, kind=None, *args, **kwargs):
if not self._loader: self.reset()
return self._loader.get_plugin(name, kind=kind, *args, **kwargs)
def get_plugin(self, name=None, kind=None, *args, **kwargs):
if not self._loader:
self.reset()
return self._loader.get_plugin(name=name, kind=kind, *args, **kwargs)
def get_default_config(self, name):
if not self._loader: self.reset()
if not self._loader:
self.reset()
return self._loader.get_default_config(name)
def resolve_alias(self, name):
if not self._loader: self.reset()
if not self._loader:
self.reset()
return self._loader.resolve_alias(name)
def __getattr__(self, name):
if not self._loader: self.reset()
if not self._loader:
self.reset()
return getattr(self._loader, name)
sys.modules[__name__] = __LoaderWrapper()
sys.modules[__name__] = __LoaderWrapper()

View File

@ -60,6 +60,23 @@ class GetterPriority(object):
remote = -20
class __NullOwner(object):
"""Represents an owner for a resource not owned by anyone."""
name = 'noone'
dependencies_directory = settings.dependencies_directory
def __getattr__(self, name):
return None
def __str__(self):
return 'no-one'
__repr__ = __str__
NO_ONE = __NullOwner()
class Resource(object):
"""
Represents a resource that needs to be resolved. This can be pretty much
@ -95,6 +112,73 @@ class Resource(object):
return '<{}\'s {}>'.format(self.owner, self.name)
class FileResource(Resource):
"""
Base class for all resources that are a regular file in the
file system.
"""
def delete(self, instance):
os.remove(instance)
class File(FileResource):
name = 'file'
def __init__(self, owner, path, url=None):
super(File, self).__init__(owner)
self.path = path
self.url = url
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url)
class PluginAsset(File):
name = 'plugin_asset'
def __init__(self, owner, path):
super(PluginAsset, self).__init__(owner, os.path.join(owner.name, path))
class Executable(FileResource):
name = 'executable'
def __init__(self, owner, platform, filename):
super(Executable, self).__init__(owner)
self.platform = platform
self.filename = filename
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename)
class ReventFile(FileResource):
name = 'revent'
def __init__(self, owner, stage):
super(ReventFile, self).__init__(owner)
self.stage = stage
class JarFile(FileResource):
name = 'jar'
class ApkFile(FileResource):
name = 'apk'
def __init__(self, owner, version):
super(ApkFile, self).__init__(owner)
self.version = version
class ResourceGetter(Plugin):
"""
Base class for implementing resolvers. Defines resolver
@ -201,18 +285,20 @@ class ResourceResolver(object):
"""
def __init__(self):
self.logger = logging.getLogger('resolver')
def __init__(self, config):
self.logger = logging.getLogger(self.__class__.__name__)
self.getters = defaultdict(prioritylist)
self.config = config
def load(self, loader=pluginloader):
def load(self):
"""
Discover getters under the specified source. The source could
be either a python package/module or a path.
"""
for rescls in loader.list_resource_getters():
getter = loader.get_resource_getter(rescls.name, resolver=self)
for rescls in pluginloader.list_resource_getters():
getter = self.config.get_plugin(name=rescls.name, kind="resource_getter", resolver=self)
getter.register()
def get(self, resource, strict=True, *args, **kwargs):
@ -259,7 +345,7 @@ class ResourceResolver(object):
means should register with lower (negative) priorities.
"""
self.logger.debug('Registering {}'.format(getter.name))
self.logger.debug('Registering {} for {} resources'.format(getter.name, kind))
self.getters[kind].add(getter, priority)
def unregister(self, getter, kind):
@ -273,420 +359,6 @@ class ResourceResolver(object):
except ValueError:
raise ValueError('Resource getter {} is not installed.'.format(getter.name))
class __NullOwner(object):
"""Represents an owner for a resource not owned by anyone."""
name = 'noone'
dependencies_directory = settings.dependencies_directory
def __getattr__(self, name):
return None
def __str__(self):
return 'no-one'
__repr__ = __str__
NO_ONE = __NullOwner()
class FileResource(Resource):
"""
Base class for all resources that are a regular file in the
file system.
"""
def delete(self, instance):
os.remove(instance)
class File(FileResource):
name = 'file'
def __init__(self, owner, path, url=None):
super(File, self).__init__(owner)
self.path = path
self.url = url
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url)
class ExtensionAsset(File):
name = 'extension_asset'
def __init__(self, owner, path):
super(ExtensionAsset, self).__init__(
owner, os.path.join(owner.name, path))
class Executable(FileResource):
name = 'executable'
def __init__(self, owner, platform, filename):
super(Executable, self).__init__(owner)
self.platform = platform
self.filename = filename
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename)
class ReventFile(FileResource):
name = 'revent'
def __init__(self, owner, stage):
super(ReventFile, self).__init__(owner)
self.stage = stage
class JarFile(FileResource):
name = 'jar'
class ApkFile(FileResource):
name = 'apk'
class PackageFileGetter(ResourceGetter):
name = 'package_file'
description = """
Looks for exactly one file with the specified extension in the owner's
directory. If a version is specified on invocation of get, it will filter
the discovered file based on that version. Versions are treated as
case-insensitive.
"""
extension = None
def register(self):
self.resolver.register(self, self.extension, GetterPriority.package)
def get(self, resource, **kwargs):
resource_dir = os.path.dirname(
sys.modules[resource.owner.__module__].__file__)
version = kwargs.get('version')
return get_from_location_by_extension(resource, resource_dir, self.extension, version)
class EnvironmentFileGetter(ResourceGetter):
name = 'environment_file'
description = """
Looks for exactly one file with the specified extension in the owner's
directory. If a version is specified on invocation of get, it will filter
the discovered file based on that version. Versions are treated as
case-insensitive.
"""
extension = None
def register(self):
self.resolver.register(self, self.extension,
GetterPriority.environment)
def get(self, resource, **kwargs):
resource_dir = resource.owner.dependencies_directory
version = kwargs.get('version')
return get_from_location_by_extension(resource, resource_dir, self.extension, version)
class ReventGetter(ResourceGetter):
"""Implements logic for identifying revent files."""
def get_base_location(self, resource):
raise NotImplementedError()
def register(self):
self.resolver.register(self, 'revent', GetterPriority.package)
def get(self, resource, **kwargs):
filename = '.'.join([resource.owner.device.name,
resource.stage, 'revent']).lower()
location = _d(os.path.join(
self.get_base_location(resource), 'revent_files'))
for candidate in os.listdir(location):
if candidate.lower() == filename.lower():
return os.path.join(location, candidate)
class PackageApkGetter(PackageFileGetter):
name = 'package_apk'
extension = 'apk'
class PackageJarGetter(PackageFileGetter):
name = 'package_jar'
extension = 'jar'
class PackageReventGetter(ReventGetter):
name = 'package_revent'
def get_base_location(self, resource):
return _get_owner_path(resource)
class EnvironmentApkGetter(EnvironmentFileGetter):
name = 'environment_apk'
extension = 'apk'
class EnvironmentJarGetter(EnvironmentFileGetter):
name = 'environment_jar'
extension = 'jar'
class EnvironmentReventGetter(ReventGetter):
name = 'enviroment_revent'
def get_base_location(self, resource):
return resource.owner.dependencies_directory
class ExecutableGetter(ResourceGetter):
name = 'exe_getter'
resource_type = 'executable'
priority = GetterPriority.environment
def get(self, resource, **kwargs):
if settings.binaries_repository:
path = os.path.join(settings.binaries_repository,
resource.platform, resource.filename)
if os.path.isfile(path):
return path
class PackageExecutableGetter(ExecutableGetter):
name = 'package_exe_getter'
priority = GetterPriority.package
def get(self, resource, **kwargs):
path = os.path.join(_get_owner_path(resource), 'bin',
resource.platform, resource.filename)
if os.path.isfile(path):
return path
class EnvironmentExecutableGetter(ExecutableGetter):
name = 'env_exe_getter'
def get(self, resource, **kwargs):
paths = [
os.path.join(resource.owner.dependencies_directory, 'bin',
resource.platform, resource.filename),
os.path.join(settings.environment_root, 'bin',
resource.platform, resource.filename),
]
for path in paths:
if os.path.isfile(path):
return path
class DependencyFileGetter(ResourceGetter):
name = 'filer'
description = """
Gets resources from the specified mount point. Copies them the local dependencies
directory, and returns the path to the local copy.
"""
resource_type = 'file'
relative_path = '' # May be overridden by subclasses.
default_mount_point = '/'
priority = GetterPriority.remote
parameters = [
Parameter('mount_point', default='/', global_alias='filer_mount_point',
description='Local mount point for the remote filer.'),
]
def __init__(self, resolver, **kwargs):
super(DependencyFileGetter, self).__init__(resolver, **kwargs)
self.mount_point = settings.filer_mount_point or self.default_mount_point
def get(self, resource, **kwargs):
force = kwargs.get('force')
remote_path = os.path.join(
self.mount_point, self.relative_path, resource.path)
local_path = os.path.join(
resource.owner.dependencies_directory, os.path.basename(resource.path))
if not os.path.isfile(local_path) or force:
if not os.path.isfile(remote_path):
return None
self.logger.debug('Copying {} to {}'.format(
remote_path, local_path))
shutil.copy(remote_path, local_path)
return local_path
class PackageCommonDependencyGetter(ResourceGetter):
name = 'packaged_common_dependency'
resource_type = 'file'
priority = GetterPriority.package - 1 # check after owner-specific locations
def get(self, resource, **kwargs):
path = os.path.join(settings.package_directory,
'common', resource.path)
if os.path.exists(path):
return path
class EnvironmentCommonDependencyGetter(ResourceGetter):
name = 'environment_common_dependency'
resource_type = 'file'
# check after owner-specific locations
priority = GetterPriority.environment - 1
def get(self, resource, **kwargs):
path = os.path.join(settings.dependencies_directory,
os.path.basename(resource.path))
if os.path.exists(path):
return path
class PackageDependencyGetter(ResourceGetter):
name = 'packaged_dependency'
resource_type = 'file'
priority = GetterPriority.package
def get(self, resource, **kwargs):
owner_path = inspect.getfile(resource.owner.__class__)
path = os.path.join(os.path.dirname(owner_path), resource.path)
if os.path.exists(path):
return path
class EnvironmentDependencyGetter(ResourceGetter):
name = 'environment_dependency'
resource_type = 'file'
priority = GetterPriority.environment
def get(self, resource, **kwargs):
path = os.path.join(resource.owner.dependencies_directory,
os.path.basename(resource.path))
if os.path.exists(path):
return path
class ExtensionAssetGetter(DependencyFileGetter):
name = 'extension_asset'
resource_type = 'extension_asset'
relative_path = 'workload_automation/assets'
class RemoteFilerGetter(ResourceGetter):
name = 'filer_assets'
description = """
Finds resources on a (locally mounted) remote filer and caches them locally.
This assumes that the filer is mounted on the local machine (e.g. as a samba share).
"""
priority = GetterPriority.remote
resource_type = ['apk', 'file', 'jar', 'revent']
parameters = [
Parameter('remote_path', global_alias='remote_assets_path', default='',
description="""
Path, on the local system, where the assets are located.
"""),
Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets',
description="""
If ``True``, will always attempt to fetch assets from the
remote, even if a local cached copy is available.
"""),
]
def get(self, resource, **kwargs):
version = kwargs.get('version')
if resource.owner:
remote_path = os.path.join(self.remote_path, resource.owner.name)
local_path = os.path.join(
settings.environment_root, resource.owner.dependencies_directory)
return self.try_get_resource(resource, version, remote_path, local_path)
else:
result = None
for entry in os.listdir(remote_path):
remote_path = os.path.join(self.remote_path, entry)
local_path = os.path.join(
settings.environment_root, settings.dependencies_directory, entry)
result = self.try_get_resource(
resource, version, remote_path, local_path)
if result:
break
return result
def try_get_resource(self, resource, version, remote_path, local_path):
if not self.always_fetch:
result = self.get_from(resource, version, local_path)
if result:
return result
if remote_path:
# Didn't find it cached locally; now check the remoted
result = self.get_from(resource, version, remote_path)
if not result:
return result
else: # remote path is not set
return None
# Found it remotely, cache locally, then return it
local_full_path = os.path.join(
_d(local_path), os.path.basename(result))
self.logger.debug('cp {} {}'.format(result, local_full_path))
shutil.copy(result, local_full_path)
return local_full_path
def get_from(self, resource, version, location): # pylint: disable=no-self-use
if resource.name in ['apk', 'jar']:
return get_from_location_by_extension(resource, location, resource.name, version)
elif resource.name == 'file':
filepath = os.path.join(location, resource.path)
if os.path.exists(filepath):
return filepath
elif resource.name == 'revent':
filename = '.'.join(
[resource.owner.device.name, resource.stage, 'revent']).lower()
alternate_location = os.path.join(location, 'revent_files')
# There tends to be some confusion as to where revent files should
# be placed. This looks both in the extension's directory, and in
# 'revent_files' subdirectory under it, if it exists.
if os.path.isdir(alternate_location):
for candidate in os.listdir(alternate_location):
if candidate.lower() == filename.lower():
return os.path.join(alternate_location, candidate)
if os.path.isdir(location):
for candidate in os.listdir(location):
if candidate.lower() == filename.lower():
return os.path.join(location, candidate)
else:
message = 'Unexpected resource type: {}'.format(resource.name)
raise ValueError(message)
# Utility functions
def get_from_location_by_extension(resource, location, extension, version=None):

View File

@ -0,0 +1,510 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains the standard set of resource getters used by Workload Automation.
"""
import os
import sys
import shutil
import inspect
import httplib
import logging
import json
import requests
from wa import Parameter, settings, __file__ as __base_filepath
from wa.framework.resource import ResourceGetter, GetterPriority, NO_ONE
from wa.framework.exception import ResourceError
from wa.utils.misc import (ensure_directory_exists as _d,
ensure_file_directory_exists as _f, sha256, urljoin)
from wa.utils.types import boolean
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
class PackageFileGetter(ResourceGetter):
name = 'package_file'
description = """
Looks for exactly one file with the specified plugin in the owner's directory. If a version
is specified on invocation of get, it will filter the discovered file based on that version.
Versions are treated as case-insensitive.
"""
plugin = None
def register(self):
self.resolver.register(self, self.plugin, GetterPriority.package)
def get(self, resource, **kwargs):
resource_dir = os.path.dirname(sys.modules[resource.owner.__module__].__file__)
version = kwargs.get('version')
return get_from_location_by_plugin(resource, resource_dir, self.plugin, version)
class EnvironmentFileGetter(ResourceGetter):
name = 'environment_file'
description = """Looks for exactly one file with the specified plugin in the owner's directory. If a version
is specified on invocation of get, it will filter the discovered file based on that version.
Versions are treated as case-insensitive."""
plugin = None
def register(self):
self.resolver.register(self, self.plugin, GetterPriority.environment)
def get(self, resource, **kwargs):
resource_dir = resource.owner.dependencies_directory
version = kwargs.get('version')
return get_from_location_by_plugin(resource, resource_dir, self.plugin, version)
class ReventGetter(ResourceGetter):
"""Implements logic for identifying revent files."""
def get_base_location(self, resource):
raise NotImplementedError()
def register(self):
self.resolver.register(self, 'revent', GetterPriority.package)
def get(self, resource, **kwargs):
filename = '.'.join([resource.owner.device.model, resource.stage, 'revent']).lower()
location = _d(os.path.join(self.get_base_location(resource), 'revent_files'))
for candidate in os.listdir(location):
if candidate.lower() == filename.lower():
return os.path.join(location, candidate)
class PackageApkGetter(PackageFileGetter):
name = 'package_apk'
plugin = 'apk'
class PackageJarGetter(PackageFileGetter):
name = 'package_jar'
plugin = 'jar'
class PackageReventGetter(ReventGetter):
name = 'package_revent'
def get_base_location(self, resource):
return get_owner_path(resource)
class EnvironmentApkGetter(EnvironmentFileGetter):
name = 'environment_apk'
plugin = 'apk'
class EnvironmentJarGetter(EnvironmentFileGetter):
name = 'environment_jar'
plugin = 'jar'
class EnvironmentReventGetter(ReventGetter):
name = 'enviroment_revent'
def get_base_location(self, resource):
return resource.owner.dependencies_directory
class ExecutableGetter(ResourceGetter):
name = 'exe_getter'
resource_type = 'executable'
priority = GetterPriority.environment
def get(self, resource, **kwargs):
if settings.assets_repository:
path = os.path.join(settings.assets_repository, resource.platform, resource.filename)
if os.path.isfile(path):
return path
class PackageExecutableGetter(ExecutableGetter):
name = 'package_exe_getter'
priority = GetterPriority.package
def get(self, resource, **kwargs):
path = os.path.join(get_owner_path(resource), 'bin', resource.platform, resource.filename)
if os.path.isfile(path):
return path
class EnvironmentExecutableGetter(ExecutableGetter):
name = 'env_exe_getter'
def get(self, resource, **kwargs):
paths = [
os.path.join(resource.owner.dependencies_directory, 'bin',
resource.platform, resource.filename),
os.path.join(settings.user_directory, 'bin',
resource.platform, resource.filename),
]
for path in paths:
if os.path.isfile(path):
return path
class DependencyFileGetter(ResourceGetter):
name = 'filer'
description = """
Gets resources from the specified mount point. Copies them the local dependencies
directory, and returns the path to the local copy.
"""
resource_type = 'file'
relative_path = '' # May be overridden by subclasses.
priority = GetterPriority.remote
parameters = [
Parameter('mount_point', default='/', global_alias='remote_assets_path',
description='Local mount point for the remote filer.'),
]
def __init__(self, resolver, **kwargs):
super(DependencyFileGetter, self).__init__(resolver, **kwargs)
def get(self, resource, **kwargs):
force = kwargs.get('force')
remote_path = os.path.join(self.mount_point, self.relative_path, resource.path)
local_path = os.path.join(resource.owner.dependencies_directory, os.path.basename(resource.path))
if not os.path.isfile(local_path) or force:
if not os.path.isfile(remote_path):
return None
self.logger.debug('Copying {} to {}'.format(remote_path, local_path))
shutil.copy(remote_path, local_path)
return local_path
class PackageCommonDependencyGetter(ResourceGetter):
name = 'packaged_common_dependency'
resource_type = 'file'
priority = GetterPriority.package - 1 # check after owner-specific locations
def get(self, resource, **kwargs):
path = os.path.join(settings.package_directory, 'common', resource.path)
if os.path.exists(path):
return path
class EnvironmentCommonDependencyGetter(ResourceGetter):
name = 'environment_common_dependency'
resource_type = 'file'
priority = GetterPriority.environment - 1 # check after owner-specific locations
def get(self, resource, **kwargs):
path = os.path.join(settings.dependencies_directory,
os.path.basename(resource.path))
if os.path.exists(path):
return path
class PackageDependencyGetter(ResourceGetter):
name = 'packaged_dependency'
resource_type = 'file'
priority = GetterPriority.package
def get(self, resource, **kwargs):
owner_path = inspect.getfile(resource.owner.__class__)
path = os.path.join(os.path.dirname(owner_path), resource.path)
if os.path.exists(path):
return path
class EnvironmentDependencyGetter(ResourceGetter):
name = 'environment_dependency'
resource_type = 'file'
priority = GetterPriority.environment
def get(self, resource, **kwargs):
path = os.path.join(resource.owner.dependencies_directory, os.path.basename(resource.path))
if os.path.exists(path):
return path
class PluginAssetGetter(DependencyFileGetter):
name = 'plugin_asset'
resource_type = 'plugin_asset'
class HttpGetter(ResourceGetter):
name = 'http_assets'
description = """
Downloads resources from a server based on an index fetched from the specified URL.
Given a URL, this will try to fetch ``<URL>/index.json``. The index file maps plugin
names to a list of corresponing asset descriptons. Each asset description continas a path
(relative to the base URL) of the resource and a SHA256 hash, so that this Getter can
verify whether the resource on the remote has changed.
For example, let's assume we want to get the APK file for workload "foo", and that
assets are hosted at ``http://example.com/assets``. This Getter will first try to
donwload ``http://example.com/assests/index.json``. The index file may contian
something like ::
{
"foo": [
{
"path": "foo-app.apk",
"sha256": "b14530bb47e04ed655ac5e80e69beaa61c2020450e18638f54384332dffebe86"
},
{
"path": "subdir/some-other-asset.file",
"sha256": "48d9050e9802246d820625717b72f1c2ba431904b8484ca39befd68d1dbedfff"
}
]
}
This Getter will look through the list of assets for "foo" (in this case, two) check
the paths until it finds one matching the resource (in this case, "foo-app.apk").
Finally, it will try to dowload that file relative to the base URL and plugin name
(in this case, "http://example.com/assets/foo/foo-app.apk"). The downloaded version
will be cached locally, so that in the future, the getter will check the SHA256 hash
of the local file against the one advertised inside index.json, and provided that hasn't
changed, it won't try to download the file again.
"""
priority = GetterPriority.remote
resource_type = ['apk', 'file', 'jar', 'revent']
parameters = [
Parameter('url', global_alias='remote_assets_url',
description="""URL of the index file for assets on an HTTP server."""),
Parameter('username',
description="""User name for authenticating with assets URL"""),
Parameter('password',
description="""Password for authenticationg with assets URL"""),
Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets',
description="""If ``True``, will always attempt to fetch assets from the remote, even if
a local cached copy is available."""),
Parameter('chunk_size', kind=int, default=1024,
description="""Chunk size for streaming large assets."""),
]
def __init__(self, resolver, **kwargs):
super(HttpGetter, self).__init__(resolver, **kwargs)
self.index = None
def get(self, resource, **kwargs):
if not resource.owner:
return # TODO: add support for unowned resources
if not self.index:
self.index = self.fetch_index()
asset = self.resolve_resource(resource)
if not asset:
return
return self.download_asset(asset, resource.owner.name)
def fetch_index(self):
if not self.url:
return {}
index_url = urljoin(self.url, 'index.json')
response = self.geturl(index_url)
if response.status_code != httplib.OK:
message = 'Could not fetch "{}"; recieved "{} {}"'
self.logger.error(message.format(index_url, response.status_code, response.reason))
return {}
return json.loads(response.content)
def download_asset(self, asset, owner_name):
url = urljoin(self.url, owner_name, asset['path'])
local_path = _f(os.path.join(settings.dependencies_directory, '__remote',
owner_name, asset['path'].replace('/', os.sep)))
if os.path.isfile(local_path) and not self.always_fetch:
local_sha = sha256(local_path)
if local_sha == asset['sha256']:
self.logger.debug('Local SHA256 matches; not re-downloading')
return local_path
self.logger.debug('Downloading {}'.format(url))
response = self.geturl(url, stream=True)
if response.status_code != httplib.OK:
message = 'Could not download asset "{}"; recieved "{} {}"'
self.logger.warning(message.format(url, response.status_code, response.reason))
return
with open(local_path, 'wb') as wfh:
for chunk in response.iter_content(chunk_size=self.chunk_size):
wfh.write(chunk)
return local_path
def geturl(self, url, stream=False):
if self.username:
auth = (self.username, self.password)
else:
auth = None
return requests.get(url, auth=auth, stream=stream)
def resolve_resource(self, resource):
assets = self.index.get(resource.owner.name, {})
if not assets:
return {}
if resource.name in ['apk', 'jar']:
paths = [a['path'] for a in assets]
version = getattr(resource, 'version', None)
found = get_from_list_by_plugin(resource, paths, resource.name, version)
if found:
for a in assets:
if a['path'] == found:
return a
elif resource.name == 'revent':
filename = '.'.join([resource.owner.device.name, resource.stage, 'revent']).lower()
for asset in assets:
pathname = os.path.basename(asset['path']).lower()
if pathname == filename:
return asset
else: # file
for asset in assets:
if asset['path'].lower() == resource.path.lower():
return asset
class RemoteFilerGetter(ResourceGetter):
name = 'filer_assets'
description = """
Finds resources on a (locally mounted) remote filer and caches them locally.
This assumes that the filer is mounted on the local machine (e.g. as a samba share).
"""
priority = GetterPriority.remote
resource_type = ['apk', 'file', 'jar', 'revent']
parameters = [
Parameter('remote_path', global_alias='remote_assets_path', default='',
description="""Path, on the local system, where the assets are located."""),
Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets',
description="""If ``True``, will always attempt to fetch assets from the remote, even if
a local cached copy is available."""),
]
def get(self, resource, **kwargs):
version = kwargs.get('version')
if resource.owner:
remote_path = os.path.join(self.remote_path, resource.owner.name)
local_path = os.path.join(settings.user_directory, '__filer', resource.owner.dependencies_directory)
return self.try_get_resource(resource, version, remote_path, local_path)
else:
result = None
for entry in os.listdir(remote_path):
remote_path = os.path.join(self.remote_path, entry)
local_path = os.path.join(settings.user_directory, '__filer', settings.dependencies_directory, entry)
result = self.try_get_resource(resource, version, remote_path, local_path)
if result:
break
return result
def try_get_resource(self, resource, version, remote_path, local_path):
if not self.always_fetch:
result = self.get_from(resource, version, local_path)
if result:
return result
if remote_path:
# Didn't find it cached locally; now check the remoted
result = self.get_from(resource, version, remote_path)
if not result:
return result
else: # remote path is not set
return None
# Found it remotely, cache locally, then return it
local_full_path = os.path.join(_d(local_path), os.path.basename(result))
self.logger.debug('cp {} {}'.format(result, local_full_path))
shutil.copy(result, local_full_path)
return local_full_path
def get_from(self, resource, version, location): # pylint: disable=no-self-use
if resource.name in ['apk', 'jar']:
return get_from_location_by_plugin(resource, location, resource.name, version)
elif resource.name == 'file':
filepath = os.path.join(location, resource.path)
if os.path.exists(filepath):
return filepath
elif resource.name == 'revent':
filename = '.'.join([resource.owner.device.model, resource.stage, 'revent']).lower()
alternate_location = os.path.join(location, 'revent_files')
# There tends to be some confusion as to where revent files should
# be placed. This looks both in the plugin's directory, and in
# 'revent_files' subdirectory under it, if it exists.
if os.path.isdir(alternate_location):
for candidate in os.listdir(alternate_location):
if candidate.lower() == filename.lower():
return os.path.join(alternate_location, candidate)
if os.path.isdir(location):
for candidate in os.listdir(location):
if candidate.lower() == filename.lower():
return os.path.join(location, candidate)
else:
raise ValueError('Unexpected resource type: {}'.format(resource.name))
# Utility functions
def get_from_location_by_plugin(resource, location, plugin, version=None):
try:
found_files = [os.path.join(location, f) for f in os.listdir(location)]
except OSError:
return None
try:
return get_from_list_by_plugin(resource, found_files, plugin, version)
except ResourceError:
raise ResourceError('More than one .{} found in {} for {}.'.format(plugin,
location,
resource.owner.name))
def get_from_list_by_plugin(resource, filelist, plugin, version=None):
filelist = [ff for ff in filelist
if os.path.splitext(ff)[1].lower().endswith(plugin)]
if version:
filelist = [ff for ff in filelist if version.lower() in os.path.basename(ff).lower()]
if len(filelist) == 1:
return filelist[0]
elif not filelist:
return None
else:
raise ResourceError('More than one .{} found in {} for {}.'.format(plugin,
filelist,
resource.owner.name))
def get_owner_path(resource):
if resource.owner is NO_ONE:
return os.path.join(os.path.dirname(__base_filepath), 'common')
else:
return os.path.dirname(sys.modules[resource.owner.__module__].__file__)

View File

@ -26,7 +26,7 @@ from wa.framework.exception import JobError
from wa.utils import counter
from wa.utils.serializer import json
from wa.utils.misc import ensure_directory_exists as _d
from wa.utils.types import TreeNode, caseless_string
from wa.utils.types import caseless_string

View File

@ -45,11 +45,14 @@ class Signal(object):
:param name: name is the identifier of the Signal object. Signal instances with
the same name refer to the same execution stage/stage.
:param invert_priority: boolean parameter that determines whether multiple
callbacks for the same signal should be ordered with
ascending or descending priorities. Typically this flag
should be set to True if the Signal is triggered AFTER an
a state/stage has been reached. That way callbacks with high
priorities will be called right after the event has occured.
callbacks for the same signal should be
ordered with ascending or descending
priorities. Typically this flag should be
set to True if the Signal is triggered
AFTER an a state/stage has been reached.
That way callbacks with high priorities
will be called right after the event has
occured.
"""
self.name = name
self.description = description
@ -94,6 +97,10 @@ WARNING_LOGGED = Signal('warning-logged')
# even if there is an error, so you cannot assume in the handler that the
# device has booted successfully. In most cases, you should instead use the
# non-paired signals below.
BEFORE_RUN_INIT = Signal('before-run-init', invert_priority=True)
SUCCESSFUL_RUN_INIT = Signal('successful-run-init')
AFTER_RUN_INIT = Signal('after-run-init')
BEFORE_FLASHING = Signal('before-flashing', invert_priority=True)
SUCCESSFUL_FLASHING = Signal('successful-flashing')
AFTER_FLASHING = Signal('after-flashing')

80
wa/framework/target.py Normal file
View File

@ -0,0 +1,80 @@
import string
from copy import copy
from devlib import Platform, AndroidTarget
class TargetInfo(object):
@staticmethod
def from_pod(pod):
instance = TargetInfo()
instance.target = pod['target']
instance.abi = pod['abi']
instance.cpuinfo = Cpuinfo(pod['cpuinfo'])
instance.os = pod['os']
instance.os_version = pod['os_version']
instance.abi = pod['abi']
instance.is_rooted = pod['is_rooted']
instance.kernel_version = KernelVersion(pod['kernel_release'],
pod['kernel_version'])
instance.kernel_config = KernelConfig(pod['kernel_config'])
if pod["target"] == "AndroidTarget":
instance.screen_resolution = pod['screen_resolution']
instance.prop = pod['prop']
instance.prop = pod['android_id']
return instance
def __init__(self, target=None):
if target:
self.target = target.__class__.__name__
self.cpuinfo = target.cpuinfo
self.os = target.os
self.os_version = target.os_version
self.abi = target.abi
self.is_rooted = target.is_rooted
self.kernel_version = target.kernel_version
self.kernel_config = target.config
if isinstance(target, AndroidTarget):
self.screen_resolution = target.screen_resolution
self.prop = target.getprop()
self.android_id = target.android_id
else:
self.target = None
self.cpuinfo = None
self.os = None
self.os_version = None
self.abi = None
self.is_rooted = None
self.kernel_version = None
self.kernel_config = None
if isinstance(target, AndroidTarget):
self.screen_resolution = None
self.prop = None
self.android_id = None
def to_pod(self):
pod = {}
pod['target'] = self.target
pod['abi'] = self.abi
pod['cpuinfo'] = self.cpuinfo.sections
pod['os'] = self.os
pod['os_version'] = self.os_version
pod['abi'] = self.abi
pod['is_rooted'] = self.is_rooted
pod['kernel_release'] = self.kernel_version.release
pod['kernel_version'] = self.kernel_version.version
pod['kernel_config'] = dict(self.kernel_config.iteritems())
if self.target == "AndroidTarget":
pod['screen_resolution'] = self.screen_resolution
pod['prop'] = self.prop
pod['android_id'] = self.android_id
return pod

View File

@ -0,0 +1,252 @@
from collections import OrderedDict
from copy import copy
from devlib import (LinuxTarget, AndroidTarget, LocalLinuxTarget,
Platform, Juno, TC2, Gem5SimulationPlatform)
from wa.framework import pluginloader
from wa.framework.exception import PluginLoaderError
from wa.framework.plugin import Plugin, Parameter
from wa.utils.types import list_of_strings, list_of_ints
def get_target_descriptions(loader=pluginloader):
targets = {}
for cls in loader.list_target_descriptors():
descriptor = cls()
for desc in descriptor.get_descriptions():
if desc.name in targets:
msg = 'Duplicate target "{}" returned by {} and {}'
prev_dtor = targets[desc.name].source
raise PluginLoaderError(msg.format(dsc.name, prev_dtor.name,
descriptor.name))
targets[desc.name] = desc
return targets.values()
class TargetDescription(object):
def __init__(self, name, source, description=None, target=None, platform=None,
conn=None, target_params=None, platform_params=None,
conn_params=None):
self.name = name
self.source = source
self.description = description
self.target = target
self.platform = platform
self.connection = conn
self._set('target_params', target_params)
self._set('platform_params', platform_params)
self._set('conn_params', conn_params)
def _set(self, attr, vals):
if vals is None:
vals = {}
elif isiterable(vals):
if not hasattr(vals, 'iteritems'):
vals = {v.name: v for v in vals}
else:
msg = '{} must be iterable; got "{}"'
raise ValueError(msg.format(attr, vals))
setattr(self, attr, vals)
class TargetDescriptor(Plugin):
kind = 'target_descriptor'
def get_descriptions(self):
return []
COMMON_TARGET_PARAMS = [
Parameter('working_directory', kind=str,
description='''
On-target working directory that will be used by WA. This
directory must be writable by the user WA logs in as without
the need for privilege elevation.
'''),
Parameter('executables_directory', kind=str,
description='''
On-target directory where WA will install its executable
binaries. This location must allow execution. This location does
*not* need to be writable by unprivileged users or rooted devices
(WA will install with elevated privileges as necessary).
'''),
Parameter('modules', kind=list_of_strings,
description='''
A list of additional modules to be installed for the target.
``devlib`` implements functionality for particular subsystems as
modules. A number of "default" modules (e.g. for cpufreq
subsystem) are loaded automatically, unless explicitly disabled.
If additional modules need to be loaded, they may be specified
using this parameter.
Please see ``devlab`` documentation for information on the available
modules.
'''),
]
COMMON_PLATFORM_PARAMS = [
Parameter('core_names', kind=list_of_strings,
description='''
List of names of CPU cores in the order that they appear to the
kernel. If not specified, it will be inferred from the platform.
'''),
Parameter('core_clusters', kind=list_of_ints,
description='''
Cluster mapping corresponding to the cores in ``core_names``.
Cluster indexing starts at ``0``. If not specified, this will be
inferred from ``core_names`` -- consecutive cores with the same
name will be assumed to share a cluster.
'''),
Parameter('big_core', kind=str,
description='''
The name of the big cores in a big.LITTLE system. If not
specified, this will be inferred, either from the name (if one of
the names in ``core_names`` matches known big cores), or by
assuming that the last cluster is big.
'''),
Parameter('model', kind=str,
description='''
Hardware model of the platform. If not specified, an attempt will
be made to read it from target.
'''),
Parameter('modules', kind=list_of_strings,
description='''
An additional list of modules to be loaded into the target.
'''),
]
VEXPRESS_PLATFORM_PARAMS = [
Parameter('serial_port', kind=str,
description='''
The serial device/port on the host for the initial connection to
the target (used for early boot, flashing, etc).
'''),
Parameter('baudrate', kind=int,
description='''
Baud rate for the serial connection.
'''),
Parameter('vemsd_mount', kind=str,
description='''
VExpress MicroSD card mount location. This is a MicroSD card in
the VExpress device that is mounted on the host via USB. The card
contains configuration files for the platform and firmware and
kernel images to be flashed.
'''),
Parameter('bootloader', kind=str,
allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'],
description='''
Selects the bootloader mechanism used by the board. Depending on
firmware version, a number of possible boot mechanisms may be use.
Please see ``devlib`` documentation for descriptions.
'''),
Parameter('hard_reset_method', kind=str,
allowed_values=['dtr', 'reboottxt'],
description='''
There are a couple of ways to reset VersatileExpress board if the
software running on the board becomes unresponsive. Both require
configuration to be enabled (please see ``devlib`` documentation).
``dtr``: toggle the DTR line on the serial connection
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
'''),
]
GEM5_PLATFORM_PARAMS = [
Parameter('host_output_dir', kind=str, mandatory=True,
description='''
Path on the host where gem5 output (e.g. stats file) will be placed.
'''),
Parameter('gem5_bin', kind=str, mandatory=True,
description='''
Path to the gem5 binary
'''),
Parameter('gem5_args', kind=str, mandatory=True,
description='''
Arguments to be passed to the gem5 binary
'''),
Parameter('gem5_virtio', kind=str, mandatory=True,
description='''
VirtIO device setup arguments to be passed to gem5. VirtIO is used
to transfer files between the simulation and the host.
'''),
]
# name --> (target_class, params_list, defaults)
TARGETS = {
'linux': (LinuxTarget, COMMON_TARGET_PARAMS, None),
'android': (AndroidTarget, COMMON_TARGET_PARAMS +
[Parameter('package_data_directory', kind=str, default='/data/data',
description='''
Directory containing Android data
'''),
], None),
'local': (LocalLinuxTarget, COMMON_TARGET_PARAMS, None),
}
# name --> (platform_class, params_list, defaults)
PLATFORMS = {
'generic': (Platform, COMMON_PLATFORM_PARAMS, None),
'juno': (Juno, COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
{
'vemsd_mount': '/media/JUNO',
'baudrate': 115200,
'bootloader': 'u-boot',
'hard_reset_method': 'dtr',
}),
'tc2': (TC2, COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
{
'vemsd_mount': '/media/VEMSD',
'baudrate': 38400,
'bootloader': 'bootmon',
'hard_reset_method': 'reboottxt',
}),
'gem5': (Gem5SimulationPlatform, GEM5_PLATFORM_PARAMS, None),
}
class DefaultTargetDescriptor(TargetDescriptor):
name = 'devlib_targets'
description = """
The default target descriptor that provides descriptions in the form
<platform>_<target>.
These map directly onto ``Target``\ s and ``Platform``\ s supplied by ``devlib``.
"""
def get_descriptions(self):
result = []
for target_name, target_tuple in TARGETS.iteritems():
target, target_params = self._get_item(target_tuple)
for platform_name, platform_tuple in PLATFORMS.iteritems():
platform, platform_params = self._get_item(platform_tuple)
name = '{}_{}'.format(platform_name, target_name)
td = TargetDescription(name, self)
td.target = target
td.platform = platform
td.target_params = target_params
td.platform_params = platform_params
result.append(td)
return result
def _get_item(self, item_tuple):
cls, params, defaults = item_tuple
if not defaults:
return cls, params
param_map = OrderedDict((p.name, copy(p)) for p in params)
for name, value in defaults.iteritems():
if name not in param_map:
raise ValueError('Unexpected default "{}"'.format(name))
param_map[name].default = value
return cls, param_map.values()

View File

@ -0,0 +1,78 @@
from devlib import AndroidTarget
from devlib.exception import TargetError
from devlib.target import KernelConfig, KernelVersion, Cpuinfo
class TargetInfo(object):
@staticmethod
def from_pod(pod):
instance = TargetInfo()
instance.target = pod['target']
instance.abi = pod['abi']
instance.cpuinfo = Cpuinfo(pod['cpuinfo'])
instance.os = pod['os']
instance.os_version = pod['os_version']
instance.abi = pod['abi']
instance.is_rooted = pod['is_rooted']
instance.kernel_version = KernelVersion(pod['kernel_release'],
pod['kernel_version'])
instance.kernel_config = KernelConfig(pod['kernel_config'])
if pod["target"] == "AndroidTarget":
instance.screen_resolution = pod['screen_resolution']
instance.prop = pod['prop']
instance.prop = pod['android_id']
return instance
def __init__(self, target=None):
if target:
self.target = target.__class__.__name__
self.cpuinfo = target.cpuinfo
self.os = target.os
self.os_version = target.os_version
self.abi = target.abi
self.is_rooted = target.is_rooted
self.kernel_version = target.kernel_version
self.kernel_config = target.config
if isinstance(target, AndroidTarget):
self.screen_resolution = target.screen_resolution
self.prop = target.getprop()
self.android_id = target.android_id
else:
self.target = None
self.cpuinfo = None
self.os = None
self.os_version = None
self.abi = None
self.is_rooted = None
self.kernel_version = None
self.kernel_config = None
if isinstance(target, AndroidTarget):
self.screen_resolution = None
self.prop = None
self.android_id = None
def to_pod(self):
pod = {}
pod['target'] = self.target
pod['abi'] = self.abi
pod['cpuinfo'] = self.cpuinfo.sections
pod['os'] = self.os
pod['os_version'] = self.os_version
pod['abi'] = self.abi
pod['is_rooted'] = self.is_rooted
pod['kernel_release'] = self.kernel_version.release
pod['kernel_version'] = self.kernel_version.version
pod['kernel_config'] = dict(self.kernel_config.iteritems())
if self.target == "AndroidTarget":
pod['screen_resolution'] = self.screen_resolution
pod['prop'] = self.prop
pod['android_id'] = self.android_id
return pod

View File

@ -6,28 +6,31 @@ import time
import shutil
import sys
from wa.framework.plugin import Parameter
from wa.framework import signal
from wa.framework.exception import WorkerThreadError, ConfigError
from wa.target.info import TargetInfo
from wa.target.runtime_config import (SysfileValuesRuntimeConfig,
HotplugRuntimeConfig,
CpufreqRuntimeConfig,
CpuidleRuntimeConfig)
from wa.framework.plugin import Parameter
from wa.framework.target.info import TargetInfo
from wa.framework.target.runtime_config import (SysfileValuesRuntimeConfig,
HotplugRuntimeConfig,
CpufreqRuntimeConfig,
CpuidleRuntimeConfig)
from wa.utils.misc import isiterable
from wa.utils.serializer import json
from devlib import LocalLinuxTarget, LinuxTarget, AndroidTarget
from devlib.utils.types import identifier
# from wa.target.manager import AndroidTargetManager, LinuxTargetManager
# from wa.framework.plugin import Plugin, Parameter
class TargetManager(object):
name = 'target-manager'
description = """
Instanciated the required target and performs configuration and validation of the device.
Instanciated the required target and performs configuration and validation
of the device.
"""
parameters = [

View File

@ -32,9 +32,10 @@ class Workload(TargetedPlugin):
def init_resources(self, context):
"""
This method may be used to perform early resource discovery and initialization. This is invoked
during the initial loading stage and before the device is ready, so cannot be used for any
device-dependent initialization. This method is invoked before the workload instance is
This method may be used to perform early resource discovery and
initialization. This is invoked during the initial loading stage and
before the device is ready, so cannot be used for any device-dependent
initialization. This method is invoked before the workload instance is
validated.
"""
@ -59,7 +60,10 @@ class Workload(TargetedPlugin):
pass
def run(self, context):
"""Execute the workload. This is the method that performs the actual "work" of the"""
"""
Execute the workload. This is the method that performs the actual
"work" of the.
"""
pass
def update_result(self, context):
@ -83,7 +87,8 @@ class Workload(TargetedPlugin):
class UiAutomatorGUI(object):
def __init__(self, target, package='', klass='UiAutomation', method='runUiAutoamtion'):
def __init__(self, target, package='', klass='UiAutomation',
method='runUiAutoamtion'):
self.target = target
self.uiauto_package = package
self.uiauto_class = klass

View File

@ -1,85 +0,0 @@
from devlib.exception import TargetError
from devlib.target import KernelConfig, KernelVersion, Cpuinfo
class TargetInfo(object):
hmp_config_dir = '/sys/kernel/hmp'
def __init__(self):
self.os = None
self.kernel_version = None
self.kernel_cmdline = None
self.kernel_config = {}
self.sched_features = []
self.cpuinfo = None
self.os_version = {}
self.properties = {}
@staticmethod
def from_pod(pod):
kconfig_text = '\n'.join('{}={}'.format(k, v) for k, v in pod['kernel_config'].iteritems())
sections = []
for section in pod['cpuinfo']:
text = '\n'.join('{} : {}'.format(k, v) for k, v in section.iteritems())
sections.append(text)
cpuinfo_text = '\n\n'.join(sections)
instance = TargetInfo()
instance.os = pod['os']
instance.kernel_version = KernelVersion(pod['kernel_version'])
instance.kernel_cmdline = pod['kernel_cmdline']
instance.kernel_config = KernelConfig(kconfig_text)
instance.sched_features = pod['sched_features']
instance.cpuinfo = Cpuinfo(cpuinfo_text)
instance.os_version = pod['os_version']
instance.properties = pod['properties']
return instance
def to_pod(self):
kversion = str(self.kernel_version)
kconfig = {k: v for k, v in self.kernel_config.iteritems()}
return dict(
os=self.os,
kernel_version=kversion,
kernel_cmdline=self.kernel_cmdline,
kernel_config=kconfig,
sched_features=self.sched_features,
cpuinfo=self.cpuinfo.sections,
os_version=self.os_version,
properties=self.properties,
)
def load(self, target):
self.os = target.os
print target.is_rooted
self.os_version = target.os_version
self.kernel_version = target.kernel_version
self.kernel_cmdline = target.execute('cat /proc/cmdline',
as_root=target.is_rooted).strip()
self.kernel_config = target.config
self.cpuinfo = target.cpuinfo
try:
output = target.read_value('/sys/kernel/debug/sched_features')
self.sched_features = output.strip().split()
except TargetError:
pass
self.properties = self._get_properties(target)
def _get_properties(self, target):
props = {}
if target.file_exists(self.hmp_config_dir):
props['hmp'] = self._get_hmp_configuration(target)
if target.os == 'android':
props.update(target.getprop().iteritems())
return props
def _get_hmp_configuration(self, target):
hmp_props = {}
for entry in target.list_directory(self.hmp_config_dir):
path = target.path.join(self.hmp_config_dir, entry)
try:
hmp_props[entry] = target.read_value(path)
except TargetError:
pass
return hmp_props

148
wa/utils/formatter.py Normal file
View File

@ -0,0 +1,148 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wa.utils.terminalsize import get_terminal_size
INDENTATION_FROM_TITLE = 4
class TextFormatter(object):
"""
This is a base class for text formatting. It mainly ask to implement two
methods which are add_item and format_data. The formar will add new text to
the formatter, whereas the latter will return a formatted text. The name
attribute represents the name of the foramtter.
"""
name = None
data = None
def __init__(self):
pass
def add_item(self, new_data, item_title):
"""
Add new item to the text formatter.
:param new_data: The data to be added
:param item_title: A title for the added data
"""
raise NotImplementedError()
def format_data(self):
"""
It returns a formatted text
"""
raise NotImplementedError()
class DescriptionListFormatter(TextFormatter):
name = 'description_list_formatter'
data = None
def get_text_width(self):
if not self._text_width:
self._text_width, _ = get_terminal_size() # pylint: disable=unpacking-non-sequence
return self._text_width
def set_text_width(self, value):
self._text_width = value
text_width = property(get_text_width, set_text_width)
def __init__(self, title=None, width=None):
super(DescriptionListFormatter, self).__init__()
self.data_title = title
self._text_width = width
self.longest_word_length = 0
self.data = []
def add_item(self, new_data, item_title):
if len(item_title) > self.longest_word_length:
self.longest_word_length = len(item_title)
self.data[len(self.data):] = [(item_title, self._remove_newlines(new_data))]
def format_data(self):
parag_indentation = self.longest_word_length + INDENTATION_FROM_TITLE
string_formatter = '{}:<{}{} {}'.format('{', parag_indentation, '}', '{}')
formatted_data = ''
if self.data_title:
formatted_data += self.data_title
line_width = self.text_width - parag_indentation
for title, paragraph in self.data:
formatted_data += '\n'
title_len = self.longest_word_length - len(title)
title += ':'
if title_len > 0:
title = (' ' * title_len) + title
parag_lines = self._break_lines(paragraph, line_width).splitlines()
if parag_lines:
formatted_data += string_formatter.format(title, parag_lines[0])
for line in parag_lines[1:]:
formatted_data += '\n' + string_formatter.format('', line)
else:
formatted_data += title[:-1]
self.text_width = None
return formatted_data
# Return text's paragraphs sperated in a list, such that each index in the
# list is a single text paragraph with no new lines
def _remove_newlines(self, new_data): # pylint: disable=R0201
parag_list = ['']
parag_num = 0
prv_parag = None
# For each paragraph sperated by a new line
for paragraph in new_data.splitlines():
if paragraph:
parag_list[parag_num] += ' ' + paragraph
# if the previous line is NOT empty, then add new empty index for
# the next paragraph
elif prv_parag:
parag_num = 1
parag_list.append('')
prv_parag = paragraph
# sometimes, we end up with an empty string as the last item so we reomve it
if not parag_list[-1]:
return parag_list[:-1]
return parag_list
def _break_lines(self, parag_list, line_width): # pylint: disable=R0201
formatted_paragraphs = []
for para in parag_list:
words = para.split()
if words:
formatted_text = words.pop(0)
current_width = len(formatted_text)
# for each word in the paragraph, line width is an accumlation of
# word length + 1 (1 is for the space after each word).
for word in words:
word = word.strip()
if current_width + len(word) + 1 >= line_width:
formatted_text += '\n' + word
current_width = len(word)
else:
formatted_text += ' ' + word
current_width += len(word) + 1
formatted_paragraphs.append(formatted_text)
return '\n\n'.join(formatted_paragraphs)

306
wa/utils/log.py Normal file
View File

@ -0,0 +1,306 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import logging
import string
import threading
import subprocess
import colorama
from wa.framework import signal
from wa.framework.exception import WAError
from wa.utils.misc import get_traceback
COLOR_MAP = {
logging.DEBUG: colorama.Fore.BLUE,
logging.INFO: colorama.Fore.GREEN,
logging.WARNING: colorama.Fore.YELLOW,
logging.ERROR: colorama.Fore.RED,
logging.CRITICAL: colorama.Style.BRIGHT + colorama.Fore.RED,
}
RESET_COLOR = colorama.Style.RESET_ALL
_indent_level = 0
_indent_width = 4
_console_handler = None
def init(verbosity=logging.INFO, color=True, indent_with=4,
regular_fmt='%(levelname)-8s %(message)s',
verbose_fmt='%(asctime)s %(levelname)-8s %(name)10.10s: %(message)s',
debug=False):
global _indent_width, _console_handler
_indent_width = indent_with
signal.log_error_func = lambda m: log_error(m, signal.logger)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
error_handler = ErrorSignalHandler(logging.DEBUG)
root_logger.addHandler(error_handler)
_console_handler = logging.StreamHandler()
if color:
formatter = ColorFormatter
else:
formatter = LineFormatter
if verbosity:
_console_handler.setLevel(logging.DEBUG)
_console_handler.setFormatter(formatter(verbose_fmt))
else:
_console_handler.setLevel(logging.INFO)
_console_handler.setFormatter(formatter(regular_fmt))
root_logger.addHandler(_console_handler)
logging.basicConfig(level=logging.DEBUG)
if not debug:
logging.raiseExceptions = False
def set_level(level):
_console_handler.setLevel(level)
def add_file(filepath, level=logging.DEBUG,
fmt='%(asctime)s %(levelname)-8s %(name)s: %(message)-10.10s'):
root_logger = logging.getLogger()
file_handler = logging.FileHandler(filepath)
file_handler.setLevel(level)
file_handler.setFormatter(LineFormatter(fmt))
root_logger.addHandler(file_handler)
def enable(logs):
if isinstance(logs, list):
for log in logs:
__enable_logger(log)
else:
__enable_logger(logs)
def disable(logs):
if isinstance(logs, list):
for log in logs:
__disable_logger(log)
else:
__disable_logger(logs)
def __enable_logger(logger):
if isinstance(logger, basestring):
logger = logging.getLogger(logger)
logger.propagate = True
def __disable_logger(logger):
if isinstance(logger, basestring):
logger = logging.getLogger(logger)
logger.propagate = False
def indent():
global _indent_level
_indent_level += 1
def dedent():
global _indent_level
_indent_level -= 1
def log_error(e, logger, critical=False):
"""
Log the specified Exception as an error. The Error message will be formatted
differently depending on the nature of the exception.
:e: the error to log. should be an instance of ``Exception``
:logger: logger to be used.
:critical: if ``True``, this error will be logged at ``logging.CRITICAL``
level, otherwise it will be logged as ``logging.ERROR``.
"""
if critical:
log_func = logger.critical
else:
log_func = logger.error
if isinstance(e, KeyboardInterrupt):
log_func('Got CTRL-C. Aborting.')
elif isinstance(e, WAError):
log_func(e)
elif isinstance(e, subprocess.CalledProcessError):
tb = get_traceback()
log_func(tb)
command = e.cmd
if e.args:
command = '{} {}'.format(command, ' '.join(e.args))
message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n'
log_func(message.format(command, e.returncode, e.output))
elif isinstance(e, SyntaxError):
tb = get_traceback()
log_func(tb)
message = 'Syntax Error in {}, line {}, offset {}:'
log_func(message.format(e.filename, e.lineno, e.offset))
log_func('\t{}'.format(e.msg))
else:
tb = get_traceback()
log_func(tb)
log_func('{}({})'.format(e.__class__.__name__, e))
class ErrorSignalHandler(logging.Handler):
"""
Emits signals for ERROR and WARNING level traces.
"""
def emit(self, record):
if record.levelno == logging.ERROR:
signal.send(signal.ERROR_LOGGED, self)
elif record.levelno == logging.WARNING:
signal.send(signal.WARNING_LOGGED, self)
class LineFormatter(logging.Formatter):
"""
Logs each line of the message separately.
"""
def format(self, record):
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
indent = _indent_width * _indent_level
d = record.__dict__
parts = []
for line in record.message.split('\n'):
line = ' ' * indent + line
d.update({'message': line.strip('\r')})
parts.append(self._fmt % d)
return '\n'.join(parts)
class ColorFormatter(LineFormatter):
"""
Formats logging records with color and prepends record info
to each line of the message.
BLUE for DEBUG logging level
GREEN for INFO logging level
YELLOW for WARNING logging level
RED for ERROR logging level
BOLD RED for CRITICAL logging level
"""
def __init__(self, fmt=None, datefmt=None):
super(ColorFormatter, self).__init__(fmt, datefmt)
template_text = self._fmt.replace('%(message)s', RESET_COLOR + '%(message)s${color}')
template_text = '${color}' + template_text + RESET_COLOR
self.fmt_template = string.Template(template_text)
def format(self, record):
self._set_color(COLOR_MAP[record.levelno])
return super(ColorFormatter, self).format(record)
def _set_color(self, color):
self._fmt = self.fmt_template.substitute(color=color)
class BaseLogWriter(object):
def __init__(self, name, level=logging.DEBUG):
"""
File-like object class designed to be used for logging from streams
Each complete line (terminated by new line character) gets logged
at DEBUG level. In complete lines are buffered until the next new line.
:param name: The name of the logger that will be used.
"""
self.logger = logging.getLogger(name)
self.buffer = ''
if level == logging.DEBUG:
self.do_write = self.logger.debug
elif level == logging.INFO:
self.do_write = self.logger.info
elif level == logging.WARNING:
self.do_write = self.logger.warning
elif level == logging.ERROR:
self.do_write = self.logger.error
else:
raise Exception('Unknown logging level: {}'.format(level))
def flush(self):
# Defined to match the interface expected by pexpect.
return self
def close(self):
if self.buffer:
self.logger.debug(self.buffer)
self.buffer = ''
return self
def __del__(self):
# Ensure we don't lose bufferd output
self.close()
class LogWriter(BaseLogWriter):
def write(self, data):
data = data.replace('\r\n', '\n').replace('\r', '\n')
if '\n' in data:
parts = data.split('\n')
parts[0] = self.buffer + parts[0]
for part in parts[:-1]:
self.do_write(part)
self.buffer = parts[-1]
else:
self.buffer += data
return self
class LineLogWriter(BaseLogWriter):
def write(self, data):
self.do_write(data)
class StreamLogger(threading.Thread):
"""
Logs output from a stream in a thread.
"""
def __init__(self, name, stream, level=logging.DEBUG, klass=LogWriter):
super(StreamLogger, self).__init__()
self.writer = klass(name, level)
self.stream = stream
self.daemon = True
def run(self):
line = self.stream.readline()
while line:
self.writer.write(line.rstrip('\n'))
line = self.stream.readline()
self.writer.close()

View File

@ -24,7 +24,6 @@ import sys
import re
import math
import imp
import uuid
import string
import threading
import signal
@ -33,154 +32,28 @@ import pkgutil
import traceback
import logging
import random
import hashlib
from datetime import datetime, timedelta
from operator import mul, itemgetter
from StringIO import StringIO
from itertools import cycle, groupby
from itertools import cycle, groupby, chain
from functools import partial
from distutils.spawn import find_executable
import yaml
from dateutil import tz
from wa.framework.version import get_wa_version
# ABI --> architectures list
ABI_MAP = {
'armeabi': ['armeabi', 'armv7', 'armv7l', 'armv7el', 'armv7lh'],
'arm64': ['arm64', 'armv8', 'arm64-v8a'],
}
def preexec_function():
# Ignore the SIGINT signal by setting the handler to the standard
# signal handler SIG_IGN.
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Change process group in case we have to kill the subprocess and all of
# its children later.
# TODO: this is Unix-specific; would be good to find an OS-agnostic way
# to do this in case we wanna port WA to Windows.
os.setpgrp()
from devlib.utils.misc import (ABI_MAP, check_output, walk_modules,
ensure_directory_exists, ensure_file_directory_exists,
normalize, convert_new_lines, get_cpu_mask, unique,
escape_quotes, escape_single_quotes, escape_double_quotes,
isiterable, getch, as_relative, ranges_to_list,
list_to_ranges, list_to_mask, mask_to_list, which)
check_output_logger = logging.getLogger('check_output')
# Defined here rather than in wlauto.exceptions due to module load dependencies
class TimeoutError(Exception):
"""Raised when a subprocess command times out. This is basically a ``WAError``-derived version
of ``subprocess.CalledProcessError``, the thinking being that while a timeout could be due to
programming error (e.g. not setting long enough timers), it is often due to some failure in the
environment, and there fore should be classed as a "user error"."""
def __init__(self, command, output):
super(TimeoutError, self).__init__('Timed out: {}'.format(command))
self.command = command
self.output = output
def __str__(self):
return '\n'.join([self.message, 'OUTPUT:', self.output or ''])
def check_output(command, timeout=None, ignore=None, **kwargs):
"""This is a version of subprocess.check_output that adds a timeout parameter to kill
the subprocess if it does not return within the specified time."""
# pylint: disable=too-many-branches
if ignore is None:
ignore = []
elif isinstance(ignore, int):
ignore = [ignore]
elif not isinstance(ignore, list) and ignore != 'all':
message = 'Invalid value for ignore parameter: "{}"; must be an int or a list'
raise ValueError(message.format(ignore))
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
def callback(pid):
try:
check_output_logger.debug('{} timed out; sending SIGKILL'.format(pid))
os.killpg(pid, signal.SIGKILL)
except OSError:
pass # process may have already terminated.
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=preexec_function, **kwargs)
if timeout:
timer = threading.Timer(timeout, callback, [process.pid, ])
timer.start()
try:
output, error = process.communicate()
finally:
if timeout:
timer.cancel()
retcode = process.poll()
if retcode:
if retcode == -9: # killed, assume due to timeout callback
raise TimeoutError(command, output='\n'.join([output, error]))
elif ignore != 'all' and retcode not in ignore:
raise subprocess.CalledProcessError(retcode, command, output='\n'.join([output, error]))
return output, error
def init_argument_parser(parser):
parser.add_argument('-c', '--config', help='specify an additional config.py')
parser.add_argument('-v', '--verbose', action='count',
help='The scripts will produce verbose output.')
parser.add_argument('--debug', action='store_true',
help='Enable debug mode. Note: this implies --verbose.')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(get_wa_version()))
return parser
def walk_modules(path):
"""
Given a path to a Python package, iterate over all the modules and
sub-packages in that package.
"""
try:
root_mod = __import__(path, {}, {}, [''])
yield root_mod
except ImportError as e:
e.path = path
raise e
if not hasattr(root_mod, '__path__'): # module, not package
return
for _, name, ispkg in pkgutil.iter_modules(root_mod.__path__):
try:
submod_path = '.'.join([path, name])
if ispkg:
for submod in walk_modules(submod_path):
yield submod
else:
yield __import__(submod_path, {}, {}, [''])
except ImportError as e:
e.path = submod_path
raise e
def ensure_directory_exists(dirpath):
"""A filter for directory paths to ensure they exist."""
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
return dirpath
def ensure_file_directory_exists(filepath):
"""
A filter for file paths to ensure the directory of the
file exists and the file can be created there. The file
itself is *not* going to be created if it doesn't already
exist.
"""
ensure_directory_exists(os.path.dirname(filepath))
return filepath
def diff_tokens(before_token, after_token):
"""
Creates a diff of two tokens.
@ -269,22 +142,18 @@ def get_traceback(exc=None):
return sio.getvalue()
def normalize(value, dict_type=dict):
"""Normalize values. Recursively normalizes dict keys to be lower case,
no surrounding whitespace, underscore-delimited strings."""
if isinstance(value, dict):
normalized = dict_type()
for k, v in value.iteritems():
if isinstance(k, basestring):
k = k.strip().lower().replace(' ', '_')
normalized[k] = normalize(v, dict_type)
return normalized
elif isinstance(value, list):
return [normalize(v, dict_type) for v in value]
elif isinstance(value, tuple):
return tuple([normalize(v, dict_type) for v in value])
else:
return value
def _check_remove_item(the_list, item):
"""Helper function for merge_lists that implements checking wether an items
should be removed from the list and doing so if needed. Returns ``True`` if
the item has been removed and ``False`` otherwise."""
if not isinstance(item, basestring):
return False
if not item.startswith('~'):
return False
actual_item = item[1:]
if actual_item in the_list:
del the_list[the_list.index(actual_item)]
return True
VALUE_REGEX = re.compile(r'(\d+(?:\.\d+)?)\s*(\w*)')
@ -338,50 +207,6 @@ def capitalize(text):
return text[0].upper() + text[1:].lower()
def convert_new_lines(text):
""" Convert new lines to a common format. """
return text.replace('\r\n', '\n').replace('\r', '\n')
def escape_quotes(text):
"""Escape quotes, and escaped quotes, in the specified text."""
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\\\'').replace('\"', '\\\"')
def escape_single_quotes(text):
"""Escape single quotes, and escaped single quotes, in the specified text."""
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\'\\\'\'')
def escape_double_quotes(text):
"""Escape double quotes, and escaped double quotes, in the specified text."""
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\"', '\\\"')
def getch(count=1):
"""Read ``count`` characters from standard input."""
if os.name == 'nt':
import msvcrt # pylint: disable=F0401
return ''.join([msvcrt.getch() for _ in xrange(count)])
else: # assume Unix
import tty # NOQA
import termios # NOQA
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(count)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def isiterable(obj):
"""Returns ``True`` if the specified object is iterable and
*is not a string type*, ``False`` otherwise."""
return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
def utc_to_local(dt):
"""Convert naive datetime to local time zone, assuming UTC."""
return dt.replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
@ -392,21 +217,6 @@ def local_to_utc(dt):
return dt.replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc())
def as_relative(path):
"""Convert path to relative by stripping away the leading '/' on UNIX or
the equivant on other platforms."""
path = os.path.splitdrive(path)[1]
return path.lstrip(os.sep)
def get_cpu_mask(cores):
"""Return a string with the hex for the cpu mask for the specified core numbers."""
mask = 0
for i in cores:
mask |= 1 << i
return '0x{0:x}'.format(mask)
def load_class(classpath):
"""Loads the specified Python class. ``classpath`` must be a fully-qualified
class name (i.e. namspaced under module/package)."""
@ -468,29 +278,7 @@ def enum_metaclass(enum_param, return_name=False, start=0):
return __EnumMeta
def which(name):
"""Platform-independent version of UNIX which utility."""
if os.name == 'nt':
paths = os.getenv('PATH').split(os.pathsep)
exts = os.getenv('PATHEXT').split(os.pathsep)
for path in paths:
testpath = os.path.join(path, name)
if os.path.isfile(testpath):
return testpath
for ext in exts:
testpathext = testpath + ext
if os.path.isfile(testpathext):
return testpathext
return None
else: # assume UNIX-like
try:
result = check_output(['which', name])[0]
return result.strip() # pylint: disable=E1103
except subprocess.CalledProcessError:
return None
_bash_color_regex = re.compile('\x1b\\[[0-9;]+m')
_bash_color_regex = re.compile('\x1b\[[0-9;]+m')
def strip_bash_colors(text):
@ -536,6 +324,18 @@ def get_random_string(length):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(length))
class LoadSyntaxError(Exception):
def __init__(self, message, filepath, lineno):
super(LoadSyntaxError, self).__init__(message)
self.filepath = filepath
self.lineno = lineno
def __str__(self):
message = 'Syntax Error in {}, line {}:\n\t{}'
return message.format(self.filepath, self.lineno, self.message)
RAND_MOD_NAME_LEN = 30
BAD_CHARS = string.punctuation + string.whitespace
TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
@ -544,23 +344,63 @@ TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
def to_identifier(text):
"""Converts text to a valid Python identifier by replacing all
whitespace and punctuation."""
result = re.sub('_+', '_', text.translate(TRANS_TABLE))
if result and result[0] in string.digits:
result = '_' + result
return result
return re.sub('_+', '_', text.translate(TRANS_TABLE))
def unique(alist):
def load_struct_from_python(filepath=None, text=None):
"""Parses a config structure from a .py file. The structure should be composed
of basic Python types (strings, ints, lists, dicts, etc.)."""
if not (filepath or text) or (filepath and text):
raise ValueError('Exactly one of filepath or text must be specified.')
try:
if filepath:
modname = to_identifier(filepath)
mod = imp.load_source(modname, filepath)
else:
modname = get_random_string(RAND_MOD_NAME_LEN)
while modname in sys.modules: # highly unlikely, but...
modname = get_random_string(RAND_MOD_NAME_LEN)
mod = imp.new_module(modname)
exec text in mod.__dict__ # pylint: disable=exec-used
return dict((k, v)
for k, v in mod.__dict__.iteritems()
if not k.startswith('_'))
except SyntaxError as e:
raise LoadSyntaxError(e.message, filepath, e.lineno)
def load_struct_from_yaml(filepath=None, text=None):
"""Parses a config structure from a .yaml file. The structure should be composed
of basic Python types (strings, ints, lists, dicts, etc.)."""
if not (filepath or text) or (filepath and text):
raise ValueError('Exactly one of filepath or text must be specified.')
try:
if filepath:
with open(filepath) as fh:
return yaml.load(fh)
else:
return yaml.load(text)
except yaml.YAMLError as e:
lineno = None
if hasattr(e, 'problem_mark'):
lineno = e.problem_mark.line # pylint: disable=no-member
raise LoadSyntaxError(e.message, filepath=filepath, lineno=lineno)
def load_struct_from_file(filepath):
"""
Returns a list containing only unique elements from the input list (but preserves
order, unlike sets).
Attempts to parse a Python structure consisting of basic types from the specified file.
Raises a ``ValueError`` if the specified file is of unkown format; ``LoadSyntaxError`` if
there is an issue parsing the file.
"""
result = []
for item in alist:
if item not in result:
result.append(item)
return result
extn = os.path.splitext(filepath)[1].lower()
if (extn == '.py') or (extn == '.pyc') or (extn == '.pyo'):
return load_struct_from_python(filepath)
elif extn == '.yaml':
return load_struct_from_yaml(filepath)
else:
raise ValueError('Unknown format "{}": {}'.format(extn, filepath))
def open_file(filepath):
@ -576,68 +416,170 @@ def open_file(filepath):
return subprocess.call(['xdg-open', filepath])
def ranges_to_list(ranges_string):
"""Converts a sysfs-style ranges string, e.g. ``"0,2-4"``, into a list ,e.g ``[0,2,3,4]``"""
values = []
for rg in ranges_string.split(','):
if '-' in rg:
first, last = map(int, rg.split('-'))
values.extend(xrange(first, last + 1))
else:
values.append(int(rg))
return values
def sha256(path, chunk=2048):
"""Calculates SHA256 hexdigest of the file at the specified path."""
h = hashlib.sha256()
with open(path, 'rb') as fh:
buf = fh.read(chunk)
while buf:
h.update(buf)
buf = fh.read(chunk)
return h.hexdigest()
def list_to_ranges(values):
"""Converts a list, e.g ``[0,2,3,4]``, into a sysfs-style ranges string, e.g. ``"0,2-4"``"""
range_groups = []
for _, g in groupby(enumerate(values), lambda (i, x): i - x):
range_groups.append(map(itemgetter(1), g))
range_strings = []
for group in range_groups:
if len(group) == 1:
range_strings.append(str(group[0]))
else:
range_strings.append('{}-{}'.format(group[0], group[-1]))
return ','.join(range_strings)
def urljoin(*parts):
return '/'.join(p.rstrip('/') for p in parts)
def list_to_mask(values, base=0x0):
"""Converts the specified list of integer values into
a bit mask for those values. Optinally, the list can be
applied to an existing mask."""
for v in values:
base |= (1 << v)
return base
def mask_to_list(mask):
"""Converts the specfied integer bitmask into a list of
indexes of bits that are set in the mask."""
size = len(bin(mask)) - 2 # because of "0b"
return [size - i - 1 for i in xrange(size)
if mask & (1 << size - i - 1)]
class Namespace(dict):
# From: http://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/
def istextfile(fileobj, blocksize=512):
""" Uses heuristics to guess whether the given file is text or binary,
by reading a single block of bytes from the file.
If more than 30% of the chars in the block are non-text, or there
are NUL ('\x00') bytes in the block, assume this is a binary file.
"""
A dict-like object that allows treating keys and attributes
interchangeably (this means that keys are restricted to strings
that are valid Python identifiers).
_text_characters = (b''.join(chr(i) for i in range(32, 127)) +
b'\n\r\t\f\b')
block = fileobj.read(blocksize)
if b'\x00' in block:
# Files with null bytes are binary
return False
elif not block:
# An empty file is considered a valid text file
return True
# Use translate's 'deletechars' argument to efficiently remove all
# occurrences of _text_characters from the block
nontext = block.translate(None, _text_characters)
return float(len(nontext)) / len(block) <= 0.30
def categorize(v):
if hasattr(v, 'merge_with') and hasattr(v, 'merge_into'):
return 'o'
elif hasattr(v, 'iteritems'):
return 'm'
elif isiterable(v):
return 's'
elif v is None:
return 'n'
else:
return 'c'
def merge_config_values(base, other):
"""
This is used to merge two objects, typically when setting the value of a
``ConfigurationPoint``. First, both objects are categorized into
c: A scalar value. Basically, most objects. These values
are treated as atomic, and not mergeable.
s: A sequence. Anything iterable that is not a dict or
a string (strings are considered scalars).
m: A key-value mapping. ``dict`` and its derivatives.
n: ``None``.
o: A mergeable object; this is an object that implements both
``merge_with`` and ``merge_into`` methods.
The merge rules based on the two categories are then as follows:
(c1, c2) --> c2
(s1, s2) --> s1 . s2
(m1, m2) --> m1 . m2
(c, s) --> [c] . s
(s, c) --> s . [c]
(s, m) --> s . [m]
(m, s) --> [m] . s
(m, c) --> ERROR
(c, m) --> ERROR
(o, X) --> o.merge_with(X)
(X, o) --> o.merge_into(X)
(X, n) --> X
(n, X) --> X
where:
'.' means concatenation (for maps, contcationation of (k, v) streams
then converted back into a map). If the types of the two objects
differ, the type of ``other`` is used for the result.
'X' means "any category"
'[]' used to indicate a literal sequence (not necessarily a ``list``).
when this is concatenated with an actual sequence, that sequencies
type is used.
notes:
- When a mapping is combined with a sequence, that mapping is
treated as a scalar value.
- When combining two mergeable objects, they're combined using
``o1.merge_with(o2)`` (_not_ using o2.merge_into(o1)).
- Combining anything with ``None`` yields that value, irrespective
of the order. So a ``None`` value is eqivalent to the corresponding
item being omitted.
- When both values are scalars, merging is equivalent to overwriting.
- There is no recursion (e.g. if map values are lists, they will not
be merged; ``other`` will overwrite ``base`` values). If complicated
merging semantics (such as recursion) are required, they should be
implemented within custom mergeable types (i.e. those that implement
``merge_with`` and ``merge_into``).
While this can be used as a generic "combine any two arbitry objects"
function, the semantics have been selected specifically for merging
configuration point values.
"""
cat_base = categorize(base)
cat_other = categorize(other)
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
if cat_base == 'n':
return other
elif cat_other == 'n':
return base
def __setattr__(self, name, value):
self[name] = value
if cat_base == 'o':
return base.merge_with(other)
elif cat_other == 'o':
return other.merge_into(base)
def __setitem__(self, name, value):
if to_identifier(name) != name:
message = 'Key must be a valid identifier; got "{}"'
raise ValueError(message.format(name))
dict.__setitem__(self, name, value)
if cat_base == 'm':
if cat_other == 's':
return merge_sequencies([base], other)
elif cat_other == 'm':
return merge_maps(base, other)
else:
message = 'merge error ({}, {}): "{}" and "{}"'
raise ValueError(message.format(cat_base, cat_other, base, other))
elif cat_base == 's':
if cat_other == 's':
return merge_sequencies(base, other)
else:
return merge_sequencies(base, [other])
else: # cat_base == 'c'
if cat_other == 's':
return merge_sequencies([base], other)
elif cat_other == 'm':
message = 'merge error ({}, {}): "{}" and "{}"'
raise ValueError(message.format(cat_base, cat_other, base, other))
else:
return other
def merge_sequencies(s1, s2):
return type(s2)(unique(chain(s1, s2)))
def merge_maps(m1, m2):
return type(m2)(chain(m1.iteritems(), m2.iteritems()))
def merge_dicts_simple(base, other):
result = base.copy()
for key, value in (base or {}).iteritems():
result[key] = merge_config_values(result.get(key), value)
return result
def touch(path):
with open(path, 'w'):
pass

View File

@ -1,13 +1,13 @@
"""
This module contains wrappers for Python serialization modules for
common formats that make it easier to serialize/deserialize WA
Plain Old Data structures (serilizable WA classes implement
``to_pod()``/``from_pod()`` methods for converting between POD
Plain Old Data structures (serilizable WA classes implement
``to_pod()``/``from_pod()`` methods for converting between POD
structures and Python class instances).
The modifications to standard serilization procedures are:
- mappings are deserialized as ``OrderedDict``\ 's are than standard
- mappings are deserialized as ``OrderedDict``\ 's rather than standard
Python ``dict``\ 's. This allows for cleaner syntax in certain parts
of WA configuration (e.g. values to be written to files can be specified
as a dict, and they will be written in the order specified in the config).
@ -16,7 +16,7 @@ The modifications to standard serilization procedures are:
in the POD config.
This module exports the "wrapped" versions of serialization libraries,
and this should be imported and used instead of importing the libraries
and this should be imported and used instead of importing the libraries
directly. i.e. ::
from wa.utils.serializer import yaml
@ -27,7 +27,7 @@ instead of ::
import yaml
pod = yaml.load(fh)
It's also possible to suse the serializer directly::
It's also possible to use the serializer directly::
from wa.utils import serializer
pod = serializer.load(fh)
@ -35,13 +35,14 @@ It's also possible to suse the serializer directly::
This can also be used to ``dump()`` POD structures. By default,
``dump()`` will produce JSON, but ``fmt`` parameter may be used to
specify an alternative format (``yaml`` or ``python``). ``load()`` will
use the file extension to guess the format, but ``fmt`` may also be used
use the file plugin to guess the format, but ``fmt`` may also be used
to specify it explicitly.
"""
# pylint: disable=unused-argument
import os
import re
import sys
import json as _json
from collections import OrderedDict
from datetime import datetime
@ -50,8 +51,8 @@ import yaml as _yaml
import dateutil.parser
from wa.framework.exception import SerializerSyntaxError
from wa.utils.types import regex_type
from wa.utils.misc import isiterable
from wa.utils.types import regex_type, none_type
__all__ = [
@ -60,16 +61,29 @@ __all__ = [
'read_pod',
'dump',
'load',
'is_pod',
'POD_TYPES',
]
POD_TYPES = [
list,
tuple,
dict,
set,
str,
unicode,
int,
float,
bool,
datetime,
regex_type,
none_type,
]
class WAJSONEncoder(_json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'to_pod'):
return obj.to_pod()
elif isinstance(obj, regex_type):
def default(self, obj): # pylint: disable=method-hidden
if isinstance(obj, regex_type):
return 'REGEX:{}:{}'.format(obj.flags, obj.pattern)
elif isinstance(obj, datetime):
return 'DATET:{}'.format(obj.isoformat())
@ -79,8 +93,8 @@ class WAJSONEncoder(_json.JSONEncoder):
class WAJSONDecoder(_json.JSONDecoder):
def decode(self, s):
d = _json.JSONDecoder.decode(self, s)
def decode(self, s, **kwargs):
d = _json.JSONDecoder.decode(self, s, **kwargs)
def try_parse_object(v):
if isinstance(v, basestring) and v.startswith('REGEX:'):
@ -112,7 +126,6 @@ class json(object):
def dump(o, wfh, indent=4, *args, **kwargs):
return _json.dump(o, wfh, cls=WAJSONEncoder, indent=indent, *args, **kwargs)
@staticmethod
def load(fh, *args, **kwargs):
try:
@ -176,7 +189,7 @@ class yaml(object):
except _yaml.YAMLError as e:
lineno = None
if hasattr(e, 'problem_mark'):
lineno = e.problem_mark.line
lineno = e.problem_mark.line # pylint: disable=no-member
raise SerializerSyntaxError(e.message, lineno)
loads = load
@ -196,7 +209,7 @@ class python(object):
def loads(s, *args, **kwargs):
pod = {}
try:
exec s in pod
exec s in pod # pylint: disable=exec-used
except SyntaxError as e:
raise SerializerSyntaxError(e.message, e.lineno)
for k in pod.keys():
@ -209,20 +222,29 @@ def read_pod(source, fmt=None):
if isinstance(source, basestring):
with open(source) as fh:
return _read_pod(fh, fmt)
elif hasattr(source, 'read') and (hasattr(sourc, 'name') or fmt):
elif hasattr(source, 'read') and (hasattr(source, 'name') or fmt):
return _read_pod(source, fmt)
else:
message = 'source must be a path or an open file handle; got {}'
raise ValueError(message.format(type(source)))
def write_pod(pod, dest, fmt=None):
if isinstance(dest, basestring):
with open(dest, 'w') as wfh:
return _write_pod(pod, wfh, fmt)
elif hasattr(dest, 'write') and (hasattr(dest, 'name') or fmt):
return _write_pod(pod, dest, fmt)
else:
message = 'dest must be a path or an open file handle; got {}'
raise ValueError(message.format(type(dest)))
def dump(o, wfh, fmt='json', *args, **kwargs):
serializer = {
'yaml': yaml,
'json': json,
'python': python,
'py': python,
}.get(fmt)
serializer = {'yaml': yaml,
'json': json,
'python': python,
'py': python,
}.get(fmt)
if serializer is None:
raise ValueError('Unknown serialization format: "{}"'.format(fmt))
serializer.dump(o, wfh, *args, **kwargs)
@ -242,4 +264,20 @@ def _read_pod(fh, fmt=None):
elif fmt == 'py':
return python.load(fh)
else:
raise ValueError('Unknown format "{}": {}'.format(fmt, path))
raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(fh, 'name', '<none>')))
def _write_pod(pod, wfh, fmt=None):
if fmt is None:
fmt = os.path.splitext(wfh.name)[1].lower().strip('.')
if fmt == 'yaml':
return yaml.dump(pod, wfh)
elif fmt == 'json':
return json.dump(pod, wfh)
elif fmt == 'py':
raise ValueError('Serializing to Python is not supported')
else:
raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(wfh, 'name', '<none>')))
def is_pod(obj):
return type(obj) in POD_TYPES

93
wa/utils/terminalsize.py Normal file
View File

@ -0,0 +1,93 @@
# Adapted from
# https://gist.github.com/jtriley/1108174
# pylint: disable=bare-except,unpacking-non-sequence
import os
import shlex
import struct
import platform
import subprocess
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
# needed for window's python in cygwin's xterm
tuple_xy = _get_terminal_size_tput()
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None or tuple_xy == (0, 0):
tuple_xy = (80, 25) # assume "standard" terminal
return tuple_xy
def _get_terminal_size_windows():
# pylint: disable=unused-variable,redefined-outer-name,too-many-locals
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
if __name__ == "__main__":
sizex, sizey = get_terminal_size()
print 'width =', sizex, 'height =', sizey

View File

@ -15,77 +15,29 @@
"""
Routines for doing various type conversions. These usually embody some higher-level
semantics than are present in standard Python types (e.g. ``boolean`` will convert the
string ``"false"`` to ``False``, where as non-empty strings are usually considered to be
``True``).
Routines for doing various type conversions. These usually embody some
higher-level semantics than are present in standard Python types (e.g.
``boolean`` will convert the string ``"false"`` to ``False``, where as
non-empty strings are usually considered to be ``True``).
A lot of these are intened to stpecify type conversions declaratively in place like
``Parameter``'s ``kind`` argument. These are basically "hacks" around the fact that Python
is not the best language to use for configuration.
A lot of these are intened to stpecify type conversions declaratively in place
like ``Parameter``'s ``kind`` argument. These are basically "hacks" around the
fact that Python is not the best language to use for configuration.
"""
import os
import re
import math
import shlex
import numbers
from bisect import insort
from collections import defaultdict
from collections import defaultdict, MutableMapping
from copy import copy
from devlib.utils.types import identifier, boolean, integer, numeric, caseless_string
from wa.utils.misc import isiterable, to_identifier
def identifier(text):
"""Converts text to a valid Python identifier by replacing all
whitespace and punctuation."""
return to_identifier(text)
def boolean(value):
"""
Returns bool represented by the value. This is different from
calling the builtin bool() in that it will interpret string representations.
e.g. boolean('0') and boolean('false') will both yield False.
"""
false_strings = ['', '0', 'n', 'no']
if isinstance(value, basestring):
value = value.lower()
if value in false_strings or 'false'.startswith(value):
return False
return bool(value)
def integer(value):
"""Handles conversions for string respresentations of binary, octal and hex."""
if isinstance(value, basestring):
return int(value, 0)
else:
return int(value)
def numeric(value):
"""
Returns the value as number (int if possible, or float otherwise), or
raises ``ValueError`` if the specified ``value`` does not have a straight
forward numeric conversion.
"""
if isinstance(value, int):
return value
try:
fvalue = float(value)
except ValueError:
raise ValueError('Not numeric: {}'.format(value))
if not math.isnan(fvalue) and not math.isinf(fvalue):
ivalue = int(fvalue)
# yeah, yeah, I know. Whatever. This is best-effort.
if ivalue == fvalue:
return ivalue
return fvalue
def list_of_strs(value):
"""
Value must be iterable. All elements will be converted to strings.
@ -142,7 +94,6 @@ def list_of(type_):
"""Generates a "list of" callable for the specified type. The callable
attempts to convert all elements in the passed value to the specifed
``type_``, raising ``ValueError`` on error."""
def __init__(self, values):
list.__init__(self, map(type_, values))
@ -204,7 +155,6 @@ def list_or(type_):
list_type = list_of(type_)
class list_or_type(list_type):
def __init__(self, value):
# pylint: disable=non-parent-init-called,super-init-not-called
if isiterable(value):
@ -220,6 +170,7 @@ list_or_bool = list_or(boolean)
regex_type = type(re.compile(''))
none_type = type(None)
def regex(value):
@ -234,28 +185,25 @@ def regex(value):
return re.compile(value)
class caseless_string(str):
__counters = defaultdict(int)
def reset_counter(name=None):
__counters[name] = 0
def counter(name=None):
"""
Just like built-in Python string except case-insensitive on comparisons. However, the
case is preserved otherwise.
An auto incremeting value (kind of like an AUTO INCREMENT field in SQL).
Optionally, the name of the counter to be used is specified (each counter
increments separately).
Counts start at 1, not 0.
"""
def __eq__(self, other):
if isinstance(other, basestring):
other = other.lower()
return self.lower() == other
def __ne__(self, other):
return not self.__eq__(other)
def __cmp__(self, other):
if isinstance(basestring, other):
other = other.lower()
return cmp(self.lower(), other)
def format(self, *args, **kwargs):
return caseless_string(super(caseless_string, self).format(*args, **kwargs))
__counters[name] += 1
value = __counters[name]
return value
class arguments(list):
@ -375,7 +323,8 @@ class prioritylist(object):
raise ValueError('Invalid index {}'.format(index))
current_global_offset = 0
priority_counts = {priority: count for (priority, count) in
zip(self.priorities, [len(self.elements[p]) for p in self.priorities])}
zip(self.priorities, [len(self.elements[p])
for p in self.priorities])}
for priority in self.priorities:
if not index_range:
break
@ -395,103 +344,134 @@ class prioritylist(object):
return self.size
class TreeNode(object):
class toggle_set(set):
"""
A list that contains items to enable or disable something.
@property
def is_root(self):
return self.parent is None
@property
def is_leaf(self):
return not self.children
A prefix of ``~`` is used to denote disabling something, for example
the list ['apples', '~oranges', 'cherries'] enables both ``apples``
and ``cherries`` but disables ``oranges``.
"""
@property
def parent(self):
return self._parent
@staticmethod
def from_pod(pod):
return toggle_set(pod)
@parent.setter
def parent(self, parent):
if self._parent:
self._parent.remove_child(self)
self._parent = parent
if self._parent:
self._parent.add_child(self)
@staticmethod
def merge(source, dest):
for item in source:
if item not in dest:
#Disable previously enabled item
if item.startswith('~') and item[1:] in dest:
dest.remove(item[1:])
#Enable previously disabled item
if not item.startswith('~') and ('~' + item) in dest:
dest.remove('~' + item)
dest.add(item)
return dest
@property
def children(self):
return [c for c in self._children]
def merge_with(self, other):
new_self = copy(self)
return toggle_set.merge(other, new_self)
def __init__(self):
self._parent = None
self._children = []
def merge_into(self, other):
other = copy(other)
return toggle_set.merge(self, other)
def add_child(self, node):
if node == self:
raise ValueError('A node cannot be its own child.')
if node in self._children:
return
for ancestor in self.iter_ancestors():
if ancestor == node:
raise ValueError('Can\'t add {} as a child, as it already an ancestor')
if node.parent and node.parent != self:
raise ValueError('Cannot add {}, as it already has a parent.'.format(node))
self._children.append(node)
node._parent = self
def values(self):
"""
returns a list of enabled items.
"""
return set([item for item in self if not item.startswith('~')])
def remove_child(self, node):
if node not in self._children:
message = 'Cannot remove: {} is not a child of {}'
raise ValueError(message.format(node, self))
self._children.remove(node)
node._parent = None
def conflicts_with(self, other):
"""
Checks if any items in ``other`` conflict with items already in this list.
def iter_ancestors(self, after=None, upto=None):
if upto == self:
return
ancestor = self
if after:
while ancestor != after:
ancestor = ancestor.parent
while ancestor and ancestor != upto:
yield ancestor
ancestor = ancestor.parent
Args:
other (list): The list to be checked against
def iter_descendants(self):
for child in self.children:
yield child
for grandchild in child.iter_descendants():
yield grandchild
Returns:
A list of items in ``other`` that conflict with items in this list
"""
conflicts = []
for item in other:
if item.startswith('~') and item[1:] in self:
conflicts.append(item)
if not item.startswith('~') and ('~' + item) in self:
conflicts.append(item)
return conflicts
def iter_leaves(self):
for descendant in self.iter_descendants():
if descendant.is_leaf:
yield descendant
def to_pod(self):
return list(self.values())
def get_common_ancestor(self, other):
if self.has_ancestor(other):
return other
if other.has_ancestor(self):
return self
for my_ancestor in self.iter_ancestors():
for other_ancestor in other.iter_ancestors():
if my_ancestor == other_ancestor:
return my_ancestor
def get_root(self):
node = self
while not node.is_root:
node = node.parent
return node
class ID(str):
def has_ancestor(self, other):
for ancestor in self.iter_ancestors():
if other == ancestor:
return True
return False
def merge_with(self, other):
return '_'.join(self, other)
def has_descendant(self, other):
for descendant in self.iter_descendants():
if other == descendant:
return True
return False
def merge_into(self, other):
return '_'.join(other, self)
class obj_dict(MutableMapping):
"""
An object that behaves like a dict but each dict entry can also be accesed
as an attribute.
:param not_in_dict: A list of keys that can only be accessed as attributes
"""
@staticmethod
def from_pod(pod):
return obj_dict(pod)
def __init__(self, values=None, not_in_dict=None):
self.__dict__['dict'] = dict(values or {})
self.__dict__['not_in_dict'] = not_in_dict if not_in_dict is not None else []
def to_pod(self):
return self.__dict__['dict']
def __getitem__(self, key):
if key in self.not_in_dict:
msg = '"{}" is in the list keys that can only be accessed as attributes'
raise KeyError(msg.format(key))
return self.__dict__['dict'][key]
def __setitem__(self, key, value):
self.__dict__['dict'][key] = value
def __delitem__(self, key):
del self.__dict__['dict'][key]
def __len__(self):
return sum(1 for _ in self)
def __iter__(self):
for key in self.__dict__['dict']:
if key not in self.__dict__['not_in_dict']:
yield key
def __repr__(self):
return repr(dict(self))
def __str__(self):
return str(dict(self))
def __setattr__(self, name, value):
self.__dict__['dict'][name] = value
def __delattr__(self, name):
if name in self:
del self.__dict__['dict'][name]
else:
raise AttributeError("No such attribute: " + name)
def __getattr__(self, name):
if name in self.__dict__['dict']:
return self.__dict__['dict'][name]
else:
raise AttributeError("No such attribute: " + name)

View File

@ -18,7 +18,7 @@
import os
import re
from wa import Workload, Parameter, ConfigError, runmethod
from wa import Workload, Parameter, ConfigError
this_dir = os.path.dirname(__file__)
@ -62,7 +62,6 @@ class Dhrystone(Workload):
description='The processes spawned by sysbench will be pinned to cores as specified by this parameter'),
]
@runmethod
def initialize(self, context):
host_exe = os.path.join(this_dir, 'dhrystone')
Dhrystone.target_exe = self.target.install(host_exe)
@ -118,7 +117,6 @@ class Dhrystone(Workload):
context.add_metric('total DMIPS', total_dmips)
context.add_metric('total score', total_score)
@runmethod
def finalize(self, context):
self.target.uninstall('dhrystone')

View File

@ -76,7 +76,8 @@ class PluginCache(object):
msg = "Source '{}' has not been added to the plugin cache."
raise RuntimeError(msg.format(source))
if not self.loader.has_plugin(plugin_name) and plugin_name not in GENERIC_CONFIGS:
if (not self.loader.has_plugin(plugin_name) and
plugin_name not in GENERIC_CONFIGS):
msg = 'configuration provided for unknown plugin "{}"'
raise ConfigError(msg.format(plugin_name))