1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2025-10-18 01:34:08 +01:00

New target description + moving target stuff under "framework"

Changing the way target descriptions work from a static mapping to
something that is dynamically generated and is extensible via plugins.
Also moving core target implementation stuff under "framework".
This commit is contained in:
Sergei Trofimov
2017-03-06 11:10:25 +00:00
parent 18d001fd76
commit 42539bbe0d
43 changed files with 6229 additions and 2586 deletions

View File

@@ -16,32 +16,42 @@
import textwrap
from wa.framework.plugin import Plugin
from wa.framework.entrypoint import init_argument_parser
from wa.framework.version import get_wa_version
from wa.utils.doc import format_body
def init_argument_parser(parser):
parser.add_argument('-c', '--config', action='append', default=[],
help='specify an additional config.py')
parser.add_argument('-v', '--verbose', action='count',
help='The scripts will produce verbose output.')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(get_wa_version()))
return parser
class Command(Plugin):
"""
Defines a Workload Automation command. This will be executed from the command line as
``wa <command> [args ...]``. This defines the name to be used when invoking wa, the
code that will actually be executed on invocation and the argument parser to be used
to parse the reset of the command line arguments.
Defines a Workload Automation command. This will be executed from the
command line as ``wa <command> [args ...]``. This defines the name to be
used when invoking wa, the code that will actually be executed on
invocation and the argument parser to be used to parse the reset of the
command line arguments.
"""
kind = 'command'
kind = "command"
help = None
usage = None
description = None
epilog = None
formatter_class = None
def __init__(self, subparsers, **kwargs):
super(Command, self).__init__(**kwargs)
def __init__(self, subparsers):
super(Command, self).__init__()
self.group = subparsers
desc = format_body(textwrap.dedent(self.description), 80)
parser_params = dict(help=(self.help or self.description), usage=self.usage,
description=format_body(textwrap.dedent(self.description), 80),
epilog=self.epilog)
description=desc, epilog=self.epilog)
if self.formatter_class:
parser_params['formatter_class'] = self.formatter_class
self.parser = subparsers.add_parser(self.name, **parser_params)
@@ -50,19 +60,22 @@ class Command(Plugin):
def initialize(self, context):
"""
Perform command-specific initialisation (e.g. adding command-specific options to the command's
parser). ``context`` is always ``None``.
Perform command-specific initialisation (e.g. adding command-specific
options to the command's parser). ``context`` is always ``None``.
"""
pass
def execute(self, args):
def execute(self, state, args):
"""
Execute this command.
:args: An ``argparse.Namespace`` containing command line arguments (as returned by
``argparse.ArgumentParser.parse_args()``. This would usually be the result of
invoking ``self.parser``.
:state: An initialized ``ConfigManager`` that contains the current state of
WA exeuction up to that point (processed configuraition, loaded
plugins, etc).
:args: An ``argparse.Namespace`` containing command line arguments (as
returned by ``argparse.ArgumentParser.parse_args()``. This would
usually be the result of invoking ``self.parser``.
"""
raise NotImplementedError()

View File

@@ -1,2 +1,19 @@
from wa.framework.configuration.core import settings, ConfigurationPoint, PluginConfiguration
from wa.framework.configuration.core import merge_config_values, WA_CONFIGURATION
# Copyright 2013-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.core.configuration.configuration import (settings,
RunConfiguration,
JobGenerator,
ConfigurationPoint)
from wlauto.core.configuration.plugin_cache import PluginCache

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,42 @@
from wlauto.core.configuration.configuration import MetaConfiguration, RunConfiguration
from wlauto.core.configuration.plugin_cache import PluginCache
from wlauto.utils.serializer import yaml
from wlauto.utils.doc import strip_inlined_text
DEFAULT_INSTRUMENTS = ['execution_time',
'interrupts',
'cpufreq',
'status',
'standard',
'csv']
def _format_yaml_comment(param, short_description=False):
comment = param.description
comment = strip_inlined_text(comment)
if short_description:
comment = comment.split('\n\n')[0]
comment = comment.replace('\n', '\n# ')
comment = "# {}\n".format(comment)
return comment
def _format_instruments(output):
plugin_cache = PluginCache()
output.write("instrumentation:\n")
for plugin in DEFAULT_INSTRUMENTS:
plugin_cls = plugin_cache.loader.get_plugin_class(plugin)
output.writelines(_format_yaml_comment(plugin_cls, short_description=True))
output.write(" - {}\n".format(plugin))
output.write("\n")
def generate_default_config(path):
with open(path, 'w') as output:
for param in MetaConfiguration.config_points + RunConfiguration.config_points:
entry = {param.name: param.default}
comment = _format_yaml_comment(param)
output.writelines(comment)
yaml.dump(entry, output, default_flow_style=False)
output.write("\n")
_format_instruments(output)

View File

@@ -1,67 +1,222 @@
from copy import copy
from collections import OrderedDict
import random
from itertools import izip_longest, groupby, chain
from wa.framework import pluginloader
from wa.framework.exception import ConfigError
from wa.framework.configuration.core import ConfigurationPoint
from wa.framework.utils.types import TreeNode, list_of, identifier
from wa.framework.configuration.core import (MetaConfiguration, RunConfiguration,
JobGenerator, settings)
from wa.framework.configuration.parsers import ConfigParser
from wa.framework.configuration.plugin_cache import PluginCache
class ExecConfig(object):
class CombinedConfig(object):
static_config_points = [
ConfigurationPoint(
'components',
kind=list_of(identifier),
description="""
Components to be activated.
""",
),
ConfigurationPoint(
'runtime_parameters',
kind=list_of(identifier),
aliases=['runtime_params'],
description="""
Components to be activated.
""",
),
ConfigurationPoint(
'classifiers',
kind=list_of(str),
description="""
Classifiers to be used. Classifiers are arbitrary key-value
pairs associated with with config. They may be used during output
proicessing and should be used to provide additional context for
collected results.
""",
),
]
@staticmethod
def from_pod(pod):
instance = CombinedConfig()
instance.settings = MetaConfiguration.from_pod(pod.get('settings', {}))
instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {}))
return instance
config_points = None
def __init__(self, settings=None, run_config=None):
self.settings = settings
self.run_config = run_config
@classmethod
def _load(cls, load_global=False, loader=pluginloader):
if cls.config_points is None:
cls.config_points = {c.name: c for c in cls.static_config_points}
for plugin in loader.list_plugins():
cp = ConfigurationPoint(
plugin.name,
kind=OrderedDict,
description="""
Configuration for {} plugin.
""".format(plugin.name)
)
cls._add_config_point(plugin.name, cp)
for alias in plugin.aliases:
cls._add_config_point(alias.name, cp)
@classmethod
def _add_config_point(cls, name, cp):
if name in cls.config_points:
message = 'Cofig point for "{}" already exists ("{}")'
raise ValueError(message.format(name, cls.config_points[name].name))
def to_pod(self):
return {'settings': self.settings.to_pod(),
'run_config': self.run_config.to_pod()}
class JobStatus:
PENDING = 0
RUNNING = 1
OK = 2
FAILED = 3
PARTIAL = 4
ABORTED = 5
PASSED = 6
class GlobalExecConfig(ExecConfig):
class Job(object):
def __init__(self, spec, iteration, context):
self.spec = spec
self.iteration = iteration
self.context = context
self.status = 'new'
self.workload = None
self.output = None
def load(self, target, loader=pluginloader):
self.workload = loader.get_workload(self.spec.workload_name,
target,
**self.spec.workload_parameters)
self.workload.init_resources(self.context)
self.workload.validate()
class ConfigManager(object):
"""
Represents run-time state of WA. Mostly used as a container for loaded
configuration and discovered plugins.
This exists outside of any command or run and is associated with the running
instance of wA itself.
"""
@property
def enabled_instruments(self):
return self.jobs_config.enabled_instruments
@property
def job_specs(self):
if not self._jobs_generated:
msg = 'Attempting to access job specs before '\
'jobs have been generated'
raise RuntimeError(msg)
return [j.spec for j in self._jobs]
@property
def jobs(self):
if not self._jobs_generated:
msg = 'Attempting to access jobs before '\
'they have been generated'
raise RuntimeError(msg)
return self._jobs
def __init__(self, settings=settings):
self.settings = settings
self.run_config = RunConfiguration()
self.plugin_cache = PluginCache()
self.jobs_config = JobGenerator(self.plugin_cache)
self.loaded_config_sources = []
self._config_parser = ConfigParser()
self._jobs = []
self._jobs_generated = False
self.agenda = None
def load_config_file(self, filepath):
self._config_parser.load_from_path(self, filepath)
self.loaded_config_sources.append(filepath)
def load_config(self, values, source, wrap_exceptions=True):
self._config_parser.load(self, values, source)
self.loaded_config_sources.append(source)
def get_plugin(self, name=None, kind=None, *args, **kwargs):
return self.plugin_cache.get_plugin(name, kind, *args, **kwargs)
def get_instruments(self, target):
instruments = []
for name in self.enabled_instruments:
instruments.append(self.get_plugin(name, kind='instrument',
target=target))
return instruments
def finalize(self):
if not self.agenda:
msg = 'Attempting to finalize config before agenda has been set'
raise RuntimeError(msg)
self.run_config.merge_device_config(self.plugin_cache)
return CombinedConfig(self.settings, self.run_config)
def generate_jobs(self, context):
job_specs = self.jobs_config.generate_job_specs(context.tm)
exec_order = self.run_config.execution_order
for spec, i in permute_iterations(job_specs, exec_order):
job = Job(spec, i, context)
job.load(context.tm.target)
self._jobs.append(job)
self._jobs_generated = True
def permute_by_job(specs):
"""
This is that "classic" implementation that executes all iterations of a
workload spec before proceeding onto the next spec.
"""
for spec in specs:
for i in range(1, spec.iterations + 1):
yield (spec, i)
def permute_by_iteration(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all
sections for the first global spec first, followed by all sections for the
second spec, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
"""
groups = [list(g) for k, g in groupby(specs, lambda s: s.workload_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in xrange(spec.iterations)])
for t in chain(*map(list, izip_longest(*all_tuples))):
if t is not None:
yield t
def permute_by_section(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all specs
for the first section followed by all specs for the seciod section, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
"""
groups = [list(g) for k, g in groupby(specs, lambda s: s.section_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in xrange(spec.iterations)])
for t in chain(*map(list, izip_longest(*all_tuples))):
if t is not None:
yield t
def permute_randomly(specs):
"""
This will generate a random permutation of specs/iteration tuples.
"""
result = []
for spec in specs:
for i in xrange(1, spec.iterations + 1):
result.append((spec, i))
random.shuffle(result)
for t in result:
yield t
permute_map = {
'by_iteration': permute_by_iteration,
'by_job': permute_by_job,
'by_section': permute_by_section,
'random': permute_randomly,
}
def permute_iterations(specs, exec_order):
if exec_order not in permute_map:
msg = 'Unknown execution order "{}"; must be in: {}'
raise ValueError(msg.format(exec_order, permute_map.keys()))
return permute_map[exec_order](specs)

View File

@@ -0,0 +1,308 @@
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from wlauto.exceptions import ConfigError
from wlauto.utils.serializer import read_pod, SerializerSyntaxError
from wlauto.utils.types import toggle_set, counter
from wlauto.core.configuration.configuration import JobSpec
###############
### Parsers ###
###############
class ConfigParser(object):
def load_from_path(self, state, filepath):
self.load(state, _load_file(filepath, "Config"), filepath)
def load(self, state, raw, source, wrap_exceptions=True): # pylint: disable=too-many-branches
try:
if 'run_name' in raw:
msg = '"run_name" can only be specified in the config '\
'section of an agenda'
raise ConfigError(msg)
if 'id' in raw:
raise ConfigError('"id" cannot be set globally')
merge_result_processors_instruments(raw)
# Get WA core configuration
for cfg_point in state.settings.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.settings.set(cfg_point.name, value)
# Get run specific configuration
for cfg_point in state.run_config.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.run_config.set(cfg_point.name, value)
# Get global job spec configuration
for cfg_point in JobSpec.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.jobs_config.set_global_value(cfg_point.name, value)
for name, values in raw.iteritems():
# Assume that all leftover config is for a plug-in or a global
# alias it is up to PluginCache to assert this assumption
state.plugin_cache.add_configs(name, values, source)
except ConfigError as e:
if wrap_exceptions:
raise ConfigError('Error in "{}":\n{}'.format(source, str(e)))
else:
raise e
class AgendaParser(object):
def load_from_path(self, state, filepath):
raw = _load_file(filepath, 'Agenda')
self.load(state, raw, filepath)
def load(self, state, raw, source):
try:
if not isinstance(raw, dict):
raise ConfigError('Invalid agenda, top level entry must be a dict')
self._populate_and_validate_config(state, raw, source)
sections = self._pop_sections(raw)
global_workloads = self._pop_workloads(raw)
if raw:
msg = 'Invalid top level agenda entry(ies): "{}"'
raise ConfigError(msg.format('", "'.join(raw.keys())))
sect_ids, wkl_ids = self._collect_ids(sections, global_workloads)
self._process_global_workloads(state, global_workloads, wkl_ids)
self._process_sections(state, sections, sect_ids, wkl_ids)
state.agenda = source
except (ConfigError, SerializerSyntaxError) as e:
raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
def _populate_and_validate_config(self, state, raw, source):
for name in ['config', 'global']:
entry = raw.pop(name, None)
if entry is None:
continue
if not isinstance(entry, dict):
msg = 'Invalid entry "{}" - must be a dict'
raise ConfigError(msg.format(name))
if 'run_name' in entry:
state.run_config.set('run_name', entry.pop('run_name'))
state.load_config(entry, source, wrap_exceptions=False)
def _pop_sections(self, raw):
sections = raw.pop("sections", [])
if not isinstance(sections, list):
raise ConfigError('Invalid entry "sections" - must be a list')
return sections
def _pop_workloads(self, raw):
workloads = raw.pop("workloads", [])
if not isinstance(workloads, list):
raise ConfigError('Invalid entry "workloads" - must be a list')
return workloads
def _collect_ids(self, sections, global_workloads):
seen_section_ids = set()
seen_workload_ids = set()
for workload in global_workloads:
workload = _get_workload_entry(workload)
_collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
for section in sections:
_collect_valid_id(section.get("id"), seen_section_ids, "section")
for workload in section["workloads"] if "workloads" in section else []:
workload = _get_workload_entry(workload)
_collect_valid_id(workload.get("id"), seen_workload_ids,
"workload")
return seen_section_ids, seen_workload_ids
def _process_global_workloads(self, state, global_workloads, seen_wkl_ids):
for workload_entry in global_workloads:
workload = _process_workload_entry(workload_entry, seen_wkl_ids,
state.jobs_config)
state.jobs_config.add_workload(workload)
def _process_sections(self, state, sections, seen_sect_ids, seen_wkl_ids):
for section in sections:
workloads = []
for workload_entry in section.pop("workloads", []):
workload = _process_workload_entry(workload_entry, seen_workload_ids,
state.jobs_config)
workloads.append(workload)
section = _construct_valid_entry(section, seen_sect_ids,
"s", state.jobs_config)
state.jobs_config.add_section(section, workloads)
########################
### Helper functions ###
########################
def get_aliased_param(cfg_point, d, default=None, pop=True):
"""
Given a ConfigurationPoint and a dict, this function will search the dict for
the ConfigurationPoint's name/aliases. If more than one is found it will raise
a ConfigError. If one (and only one) is found then it will return the value
for the ConfigurationPoint. If the name or aliases are present in the dict it will
return the "default" parameter of this function.
"""
aliases = [cfg_point.name] + cfg_point.aliases
alias_map = [a for a in aliases if a in d]
if len(alias_map) > 1:
raise ConfigError(DUPLICATE_ENTRY_ERROR.format(aliases))
elif alias_map:
if pop:
return d.pop(alias_map[0])
else:
return d[alias_map[0]]
else:
return default
def _load_file(filepath, error_name):
if not os.path.isfile(filepath):
raise ValueError("{} does not exist".format(filepath))
try:
raw = read_pod(filepath)
except SerializerSyntaxError as e:
raise ConfigError('Error parsing {} {}: {}'.format(error_name, filepath, e))
if not isinstance(raw, dict):
message = '{} does not contain a valid {} structure; top level must be a dict.'
raise ConfigError(message.format(filepath, error_name))
return raw
def merge_result_processors_instruments(raw):
instr_config = JobSpec.configuration['instrumentation']
instruments = toggle_set(get_aliased_param(instr_config, raw, default=[]))
result_processors = toggle_set(raw.pop('result_processors', []))
if instruments and result_processors:
conflicts = instruments.conflicts_with(result_processors)
if conflicts:
msg = '"instrumentation" and "result_processors" have '\
'conflicting entries: {}'
entires = ', '.join('"{}"'.format(c.strip("~")) for c in conflicts)
raise ConfigError(msg.format(entires))
raw['instrumentation'] = instruments.merge_with(result_processors)
def _pop_aliased(d, names, entry_id):
name_count = sum(1 for n in names if n in d)
if name_count > 1:
names_list = ', '.join(names)
msg = 'Inivalid workload entry "{}": at moust one of ({}}) must be specified.'
raise ConfigError(msg.format(workload_entry['id'], names_list))
for name in names:
if name in d:
return d.pop(name)
return None
def _construct_valid_entry(raw, seen_ids, prefix, jobs_config):
workload_entry = {}
# Generate an automatic ID if the entry doesn't already have one
if 'id' not in raw:
while True:
new_id = '{}{}'.format(prefix, counter(name=prefix))
if new_id not in seen_ids:
break
workload_entry['id'] = new_id
seen_ids.add(new_id)
else:
workload_entry['id'] = raw.pop('id')
# Process instrumentation
merge_result_processors_instruments(raw)
# Validate all workload_entry
for name, cfg_point in JobSpec.configuration.iteritems():
value = get_aliased_param(cfg_point, raw)
if value is not None:
value = cfg_point.kind(value)
cfg_point.validate_value(name, value)
workload_entry[name] = value
wk_id = workload_entry['id']
param_names = ['workload_params', 'workload_parameters']
if prefix == 'wk':
param_names += ['params', 'parameters']
workload_entry["workload_parameters"] = _pop_aliased(raw, param_names, wk_id)
param_names = ['runtime_parameters', 'runtime_params']
if prefix == 's':
param_names += ['params', 'parameters']
workload_entry["runtime_parameters"] = _pop_aliased(raw, param_names, wk_id)
param_names = ['boot_parameters', 'boot_params']
workload_entry["boot_parameters"] = _pop_aliased(raw, param_names, wk_id)
if "instrumentation" in workload_entry:
jobs_config.update_enabled_instruments(workload_entry["instrumentation"])
# error if there are unknown workload_entry
if raw:
msg = 'Invalid entry(ies) in "{}": "{}"'
raise ConfigError(msg.format(workload_entry['id'], ', '.join(raw.keys())))
return workload_entry
def _collect_valid_id(entry_id, seen_ids, entry_type):
if entry_id is None:
return
if entry_id in seen_ids:
raise ConfigError('Duplicate {} ID "{}".'.format(entry_type, entry_id))
# "-" is reserved for joining section and workload IDs
if "-" in entry_id:
msg = 'Invalid {} ID "{}"; IDs cannot contain a "-"'
raise ConfigError(msg.format(entry_type, entry_id))
if entry_id == "global":
msg = 'Invalid {} ID "global"; is a reserved ID'
raise ConfigError(msg.format(entry_type))
seen_ids.add(entry_id)
def _get_workload_entry(workload):
if isinstance(workload, basestring):
workload = {'name': workload}
elif not isinstance(workload, dict):
raise ConfigError('Invalid workload entry: "{}"')
return workload
def _process_workload_entry(workload, seen_workload_ids, jobs_config):
workload = _get_workload_entry(workload)
workload = _construct_valid_entry(workload, seen_workload_ids,
"wk", jobs_config)
return workload

View File

@@ -0,0 +1,227 @@
# Copyright 2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from collections import defaultdict
from itertools import chain
from devlib.utils.misc import memoized
from wa.framework import pluginloader
from wa.framework.exception import ConfigError
from wa.framework.target.descriptor import get_target_descriptions
from wa.utils.types import obj_dict
GENERIC_CONFIGS = ["device_config", "workload_parameters",
"boot_parameters", "runtime_parameters"]
class PluginCache(object):
"""
The plugin cache is used to store configuration that cannot be processed at
this stage, whether thats because it is unknown if its needed
(in the case of disabled plug-ins) or it is not know what it belongs to (in
the case of "device-config" ect.). It also maintains where configuration came
from, and the priority order of said sources.
"""
def __init__(self, loader=pluginloader):
self.loader = loader
self.sources = []
self.plugin_configs = defaultdict(lambda: defaultdict(dict))
self.global_alias_values = defaultdict(dict)
self.targets = {td.name: td for td in get_target_descriptions()}
# Generate a mapping of what global aliases belong to
self._global_alias_map = defaultdict(dict)
self._list_of_global_aliases = set()
for plugin in self.loader.list_plugins():
for param in plugin.parameters:
if param.global_alias:
self._global_alias_map[plugin.name][param.global_alias] = param
self._list_of_global_aliases.add(param.global_alias)
def add_source(self, source):
if source in self.sources:
raise Exception("Source has already been added.")
self.sources.append(source)
def add_global_alias(self, alias, value, source):
if source not in self.sources:
msg = "Source '{}' has not been added to the plugin cache."
raise RuntimeError(msg.format(source))
if not self.is_global_alias(alias):
msg = "'{} is not a valid global alias'"
raise RuntimeError(msg.format(alias))
self.global_alias_values[alias][source] = value
def add_configs(self, plugin_name, values, source):
if self.is_global_alias(plugin_name):
self.add_global_alias(plugin_name, values, source)
return
for name, value in values.iteritems():
self.add_config(plugin_name, name, value, source)
def add_config(self, plugin_name, name, value, source):
if source not in self.sources:
msg = "Source '{}' has not been added to the plugin cache."
raise RuntimeError(msg.format(source))
if (not self.loader.has_plugin(plugin_name) and
plugin_name not in GENERIC_CONFIGS):
msg = 'configuration provided for unknown plugin "{}"'
raise ConfigError(msg.format(plugin_name))
if (plugin_name not in GENERIC_CONFIGS and
name not in self.get_plugin_parameters(plugin_name)):
msg = "'{}' is not a valid parameter for '{}'"
raise ConfigError(msg.format(name, plugin_name))
self.plugin_configs[plugin_name][source][name] = value
def is_global_alias(self, name):
return name in self._list_of_global_aliases
def get_plugin_config(self, plugin_name, generic_name=None):
config = obj_dict(not_in_dict=['name'])
config.name = plugin_name
if plugin_name not in GENERIC_CONFIGS:
self._set_plugin_defaults(plugin_name, config)
self._set_from_global_aliases(plugin_name, config)
if generic_name is None:
# Perform a simple merge with the order of sources representing
# priority
plugin_config = self.plugin_configs[plugin_name]
for source in self.sources:
if source not in plugin_config:
continue
for name, value in plugin_config[source].iteritems():
cfg_points[name].set_value(config, value=value)
else:
# A more complicated merge that involves priority of sources and
# specificity
self._merge_using_priority_specificity(plugin_name, generic_name, config)
return config
def get_plugin(self, name, kind=None, *args, **kwargs):
config = self.get_plugin_config(name)
kwargs = dict(config.items() + kwargs.items())
return self.loader.get_plugin(name, kind=kind, *args, **kwargs)
@memoized
def get_plugin_parameters(self, name):
if name in self.targets:
return self._get_target_params(name)
params = self.loader.get_plugin_class(name).parameters
return {param.name: param for param in params}
def _set_plugin_defaults(self, plugin_name, config):
cfg_points = self.get_plugin_parameters(plugin_name)
for cfg_point in cfg_points.itervalues():
cfg_point.set_value(config, check_mandatory=False)
def _set_from_global_aliases(self, plugin_name, config):
for alias, param in self._global_alias_map[plugin_name].iteritems():
if alias in self.global_alias_values:
for source in self.sources:
if source not in self.global_alias_values[alias]:
continue
val = self.global_alias_values[alias][source]
param.set_value(config, value=val)
def _get_target_params(self, name):
td = self.targets[name]
params = {p.name: p for p in chain(td.target_params, td.platform_params)}
#params['connection_settings'] = {p.name: p for p in td.conn_params}
return params
# pylint: disable=too-many-nested-blocks, too-many-branches
def _merge_using_priority_specificity(self, specific_name,
generic_name, final_config):
"""
WA configuration can come from various sources of increasing priority,
as well as being specified in a generic and specific manner (e.g
``device_config`` and ``nexus10`` respectivly). WA has two rules for
the priority of configuration:
- Configuration from higher priority sources overrides
configuration from lower priority sources.
- More specific configuration overrides less specific configuration.
There is a situation where these two rules come into conflict. When a
generic configuration is given in config source of high priority and a
specific configuration is given in a config source of lower priority.
In this situation it is not possible to know the end users intention
and WA will error.
:param generic_name: The name of the generic configuration
e.g ``device_config``
:param specific_name: The name of the specific configuration used
e.g ``nexus10``
:param cfg_point: A dict of ``ConfigurationPoint``s to be used when
merging configuration. keys=config point name,
values=config point
:rtype: A fully merged and validated configuration in the form of a
obj_dict.
"""
generic_config = copy(self.plugin_configs[generic_name])
specific_config = copy(self.plugin_configs[specific_name])
cfg_points = self.get_plugin_parameters(specific_name)
sources = self.sources
seen_specific_config = defaultdict(list)
# set_value uses the 'name' attribute of the passed object in it error
# messages, to ensure these messages make sense the name will have to be
# changed several times during this function.
final_config.name = specific_name
# pylint: disable=too-many-nested-blocks
for source in sources:
try:
if source in generic_config:
final_config.name = generic_name
for name, cfg_point in cfg_points.iteritems():
if name in generic_config[source]:
if name in seen_specific_config:
msg = ('"{generic_name}" configuration "{config_name}" has already been '
'specified more specifically for {specific_name} in:\n\t\t{sources}')
msg = msg.format(generic_name=generic_name,
config_name=name,
specific_name=specific_name,
sources=", ".join(seen_specific_config[name]))
raise ConfigError(msg)
value = generic_config[source][name]
cfg_point.set_value(final_config, value, check_mandatory=False)
if source in specific_config:
final_config.name = specific_name
for name, cfg_point in cfg_points.iteritems():
if name in specific_config[source]:
seen_specific_config[name].append(str(source))
value = specific_config[source][name]
cfg_point.set_value(final_config, value, check_mandatory=False)
except ConfigError as e:
raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
# Validate final configuration
final_config.name = specific_name
for cfg_point in cfg_points.itervalues():
cfg_point.validate(final_config)

View File

@@ -0,0 +1,89 @@
# Copyright 2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class JobSpecSource(object):
kind = ""
def __init__(self, config, parent=None):
self.config = config
self.parent = parent
@property
def id(self):
return self.config['id']
def name(self):
raise NotImplementedError()
class WorkloadEntry(JobSpecSource):
kind = "workload"
@property
def name(self):
if self.parent.id == "global":
return 'workload "{}"'.format(self.id)
else:
return 'workload "{}" from section "{}"'.format(self.id, self.parent.id)
class SectionNode(JobSpecSource):
kind = "section"
@property
def name(self):
if self.id == "global":
return "globally specified configuration"
else:
return 'section "{}"'.format(self.id)
@property
def is_leaf(self):
return not bool(self.children)
def __init__(self, config, parent=None):
super(SectionNode, self).__init__(config, parent=parent)
self.workload_entries = []
self.children = []
def add_section(self, section):
new_node = SectionNode(section, parent=self)
self.children.append(new_node)
return new_node
def add_workload(self, workload_config):
self.workload_entries.append(WorkloadEntry(workload_config, self))
def descendants(self):
for child in self.children:
for n in child.descendants():
yield n
yield child
def ancestors(self):
if self.parent is not None:
yield self.parent
for ancestor in self.parent.ancestors():
yield ancestor
def leaves(self):
if self.is_leaf:
yield self
else:
for n in self.descendants():
if n.is_leaf:
yield n

View File

@@ -12,72 +12,100 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import logging
import os
import subprocess
from wa.framework import pluginloader, log
from wa.framework.configuration import settings
from wa.framework.exception import WAError
from wa.utils.doc import format_body
from wa.utils.misc import init_argument_parser
import warnings
from wa.framework import pluginloader
from wa.framework.command import init_argument_parser
from wa.framework.configuration import settings
from wa.framework.configuration.execution import ConfigManager
from wa.framework.host import init_user_directory
from wa.framework.exception import WAError, DevlibError, ConfigError
from wa.utils import log
from wa.utils.doc import format_body
from wa.utils.misc import get_traceback
warnings.filterwarnings(action='ignore', category=UserWarning, module='zope')
logger = logging.getLogger('wa')
def init_settings():
settings.load_environment()
if not os.path.isdir(settings.user_directory):
settings.initialize_user_directory()
settings.load_user_config()
def get_argument_parser():
description = ("Execute automated workloads on a remote device and process "
"the resulting output.\n\nUse \"wa <subcommand> -h\" to see "
"help for individual subcommands.")
parser = argparse.ArgumentParser(description=format_body(description, 80),
prog='wa',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
init_argument_parser(parser)
return parser
logger = logging.getLogger('command_line')
def load_commands(subparsers):
commands = {}
for command in pluginloader.list_commands():
commands[command.name] = pluginloader.get_command(command.name, subparsers=subparsers)
commands[command.name] = pluginloader.get_command(command.name,
subparsers=subparsers)
return commands
def main():
if not os.path.exists(settings.user_directory):
init_user_directory()
try:
log.init()
init_settings()
parser = get_argument_parser()
commands = load_commands(parser.add_subparsers(dest='command')) # each command will add its own subparser
description = ("Execute automated workloads on a remote device and process "
"the resulting output.\n\nUse \"wa <subcommand> -h\" to see "
"help for individual subcommands.")
parser = argparse.ArgumentParser(description=format_body(description, 80),
prog='wa',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
init_argument_parser(parser)
# load_commands will trigger plugin enumeration, and we want logging
# to be enabled for that, which requires the verbosity setting; however
# full argument parse cannot be complted until the commands are loaded; so
# parse just the base args for know so we can get verbosity.
args, _ = parser.parse_known_args()
settings.set("verbosity", args.verbose)
log.init(settings.verbosity)
# each command will add its own subparser
commands = load_commands(parser.add_subparsers(dest='command'))
args = parser.parse_args()
settings.set('verbosity', args.verbose)
if args.config:
settings.load_config_file(args.config)
log.set_level(settings.verbosity)
config = ConfigManager()
config.load_config_file(settings.user_config_file)
for config_file in args.config:
if not os.path.exists(config_file):
raise ConfigError("Config file {} not found".format(config_file))
config.load_config_file(config_file)
command = commands[args.command]
sys.exit(command.execute(args))
sys.exit(command.execute(config, args))
except KeyboardInterrupt:
logging.info('Got CTRL-C. Aborting.')
sys.exit(3)
except (WAError, DevlibError) as e:
logging.critical(e)
sys.exit(1)
except subprocess.CalledProcessError as e:
tb = get_traceback()
logging.critical(tb)
command = e.cmd
if e.args:
command = '{} {}'.format(command, ' '.join(e.args))
message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n'
logging.critical(message.format(command, e.returncode, e.output))
sys.exit(2)
except SyntaxError as e:
tb = get_traceback()
logging.critical(tb)
message = 'Syntax Error in {}, line {}, offset {}:'
logging.critical(message.format(e.filename, e.lineno, e.offset))
logging.critical('\t{}'.format(e.msg))
sys.exit(2)
except Exception as e: # pylint: disable=broad-except
log_error(e, logger, critical=True)
if isinstance(e, WAError):
sys.exit(2)
else:
sys.exit(3)
tb = get_traceback()
logging.critical(tb)
logging.critical('{}({})'.format(e.__class__.__name__, e))
sys.exit(2)

View File

@@ -12,7 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wa.utils.misc import get_traceback, TimeoutError # NOQA pylint: disable=W0611
from devlib.exception import (DevlibError, HostError, TimeoutError,
TargetError, TargetNotRespondingError)
from wa.utils.misc import get_traceback
class WAError(Exception):
@@ -35,11 +38,6 @@ class WorkloadError(WAError):
pass
class HostError(WAError):
"""Problem with the host on which WA is running."""
pass
class JobError(WAError):
"""Job execution error."""
pass
@@ -113,7 +111,8 @@ class PluginLoaderError(WAError):
if isinstance(orig, WAError):
reason = 'because of:\n{}: {}'.format(orig_name, orig)
else:
reason = 'because of:\n{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
text = 'because of:\n{}\n{}: {}'
reason = text.format(get_traceback(self.exc_info), orig_name, orig)
return '\n'.join([self.message, reason])
else:
return self.message
@@ -121,10 +120,12 @@ class PluginLoaderError(WAError):
class WorkerThreadError(WAError):
"""
This should get raised in the main thread if a non-WAError-derived exception occurs on
a worker/background thread. If a WAError-derived exception is raised in the worker, then
it that exception should be re-raised on the main thread directly -- the main point of this is
to preserve the backtrace in the output, and backtrace doesn't get output for WAErrors.
This should get raised in the main thread if a non-WAError-derived
exception occurs on a worker/background thread. If a WAError-derived
exception is raised in the worker, then it that exception should be
re-raised on the main thread directly -- the main point of this is to
preserve the backtrace in the output, and backtrace doesn't get output for
WAErrors.
"""
@@ -133,7 +134,8 @@ class WorkerThreadError(WAError):
self.exc_info = exc_info
orig = self.exc_info[1]
orig_name = type(orig).__name__
message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread)
message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
text = 'Exception of type {} occured on thread {}:\n{}\n{}: {}'
message = text.format(orig_name, thread, get_traceback(self.exc_info),
orig_name, orig)
super(WorkerThreadError, self).__init__(message)

File diff suppressed because it is too large Load Diff

View File

@@ -1,23 +1,33 @@
import os
from wa.framework.configuration import settings
from wa.framework.exception import ConfigError
from wa.utils.misc import ensure_directory_exists
from wlauto.core.configuration import settings
class HostRunConfig(object):
def init_user_directory(overwrite_existing=False): # pylint: disable=R0914
"""
Host-side configuration for a run.
Initialise a fresh user directory.
"""
if os.path.exists(settings.user_directory):
if not overwrite_existing:
raise RuntimeError('Environment {} already exists.'.format(settings.user_directory))
shutil.rmtree(settings.user_directory)
def __init__(self, output_directory,
run_info_directory=None,
run_config_directory=None):
self.output_directory = output_directory
self.run_info_directory = run_info_directory or os.path.join(self.output_directory, '_info')
self.run_config_directory = run_config_directory or os.path.join(self.output_directory, '_config')
os.makedirs(settings.user_directory)
os.makedirs(settings.dependencies_directory)
os.makedirs(settings.plugins_directory)
def initialize(self):
ensure_directory_exists(self.output_directory)
ensure_directory_exists(self.run_info_directory)
ensure_directory_exists(self.run_config_directory)
# TODO: generate default config.yaml here
if os.getenv('USER') == 'root':
# If running with sudo on POSIX, change the ownership to the real user.
real_user = os.getenv('SUDO_USER')
if real_user:
import pwd # done here as module won't import on win32
user_entry = pwd.getpwnam(real_user)
uid, gid = user_entry.pw_uid, user_entry.pw_gid
os.chown(settings.user_directory, uid, gid)
# why, oh why isn't there a recusive=True option for os.chown?
for root, dirs, files in os.walk(settings.user_directory):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)

View File

@@ -0,0 +1,399 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Adding New Instrument
=====================
Any new instrument should be a subclass of Instrument and it must have a name.
When a new instrument is added to Workload Automation, the methods of the new
instrument will be found automatically and hooked up to the supported signals.
Once a signal is broadcasted, the corresponding registered method is invoked.
Each method in Instrument must take two arguments, which are self and context.
Supported signals can be found in [... link to signals ...] To make
implementations easier and common, the basic steps to add new instrument is
similar to the steps to add new workload.
Hence, the following methods are sufficient to implement to add new instrument:
- setup: This method is invoked after the workload is setup. All the
necessary setups should go inside this method. Setup, includes operations
like, pushing the files to the target device, install them, clear logs,
etc.
- start: It is invoked just before the workload start execution. Here is
where instrument measures start being registered/taken.
- stop: It is invoked just after the workload execution stops. The measures
should stop being taken/registered.
- update_result: It is invoked after the workload updated its result.
update_result is where the taken measures are added to the result so it
can be processed by Workload Automation.
- teardown is invoked after the workload is teared down. It is a good place
to clean any logs generated by the instrument.
For example, to add an instrument which will trace device errors, we subclass
Instrument and overwrite the variable name.::
#BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace')
class TraceErrorsInstrument(Instrument):
name = 'trace-errors'
def __init__(self, device):
super(TraceErrorsInstrument, self).__init__(device)
self.trace_on_device = os.path.join(self.device.working_directory, 'trace')
We then declare and implement the aforementioned methods. For the setup method,
we want to push the file to the target device and then change the file mode to
755 ::
def setup(self, context):
self.device.push(BINARY_FILE, self.device.working_directory)
self.device.execute('chmod 755 {}'.format(self.trace_on_device))
Then we implemented the start method, which will simply run the file to start
tracing. ::
def start(self, context):
self.device.execute('{} start'.format(self.trace_on_device))
Lastly, we need to stop tracing once the workload stops and this happens in the
stop method::
def stop(self, context):
self.device.execute('{} stop'.format(self.trace_on_device))
The generated result can be updated inside update_result, or if it is trace, we
just pull the file to the host device. context has a result variable which
has add_metric method. It can be used to add the instrumentation results metrics
to the final result for the workload. The method can be passed 4 params, which
are metric key, value, unit and lower_is_better, which is a boolean. ::
def update_result(self, context):
# pull the trace file to the device
result = os.path.join(self.device.working_directory, 'trace.txt')
self.device.pull(result, context.working_directory)
# parse the file if needs to be parsed, or add result to
# context.result
At the end, we might want to delete any files generated by the instrumentation
and the code to clear these file goes in teardown method. ::
def teardown(self, context):
self.device.remove(os.path.join(self.device.working_directory, 'trace.txt'))
"""
import logging
import inspect
from collections import OrderedDict
import wa.framework.signal as signal
from wa.framework.plugin import Plugin
from wa.framework.exception import WAError, TargetNotRespondingError, TimeoutError
from wa.utils.misc import get_traceback, isiterable
from wa.utils.types import identifier
logger = logging.getLogger('instrumentation')
# Maps method names onto signals the should be registered to.
# Note: the begin/end signals are paired -- if a begin_ signal is sent,
# then the corresponding end_ signal is guaranteed to also be sent.
# Note: using OrderedDict to preserve logical ordering for the table generated
# in the documentation
SIGNAL_MAP = OrderedDict([
# Below are "aliases" for some of the more common signals to allow
# instrumentation to have similar structure to workloads
('initialize', signal.SUCCESSFUL_RUN_INIT),
# ('setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
# ('start', signal.BEFORE_WORKLOAD_EXECUTION),
# ('stop', signal.AFTER_WORKLOAD_EXECUTION),
# ('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
# ('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE),
# ('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
# ('finalize', signal.RUN_FIN),
# ('on_run_start', signal.RUN_START),
# ('on_run_end', signal.RUN_END),
# ('on_workload_spec_start', signal.WORKLOAD_SPEC_START),
# ('on_workload_spec_end', signal.WORKLOAD_SPEC_END),
# ('on_iteration_start', signal.ITERATION_START),
# ('on_iteration_end', signal.ITERATION_END),
# ('before_initial_boot', signal.BEFORE_INITIAL_BOOT),
# ('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT),
# ('after_initial_boot', signal.AFTER_INITIAL_BOOT),
# ('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT),
# ('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT),
# ('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT),
# ('before_boot', signal.BEFORE_BOOT),
# ('on_successful_boot', signal.SUCCESSFUL_BOOT),
# ('after_boot', signal.AFTER_BOOT),
# ('on_spec_init', signal.SPEC_INIT),
# ('on_run_init', signal.RUN_INIT),
# ('on_iteration_init', signal.ITERATION_INIT),
# ('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP),
# ('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
# ('after_workload_setup', signal.AFTER_WORKLOAD_SETUP),
# ('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION),
# ('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION),
# ('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION),
# ('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE),
# ('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
# ('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE),
# ('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN),
# ('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN),
# ('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN),
# ('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING),
# ('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING),
# ('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING),
# ('on_error', signal.ERROR_LOGGED),
# ('on_warning', signal.WARNING_LOGGED),
])
PRIORITY_MAP = OrderedDict([
('very_fast_', 20),
('fast_', 10),
('normal_', 0),
('slow_', -10),
('very_slow_', -20),
])
installed = []
def is_installed(instrument):
if isinstance(instrument, Instrument):
if instrument in installed:
return True
if instrument.name in [i.name for i in installed]:
return True
elif isinstance(instrument, type):
if instrument in [i.__class__ for i in installed]:
return True
else: # assume string
if identifier(instrument) in [identifier(i.name) for i in installed]:
return True
return False
def is_enabled(instrument):
if isinstance(instrument, Instrument) or isinstance(instrument, type):
name = instrument.name
else: # assume string
name = instrument
try:
installed_instrument = get_instrument(name)
return installed_instrument.is_enabled
except ValueError:
return False
failures_detected = False
def reset_failures():
global failures_detected # pylint: disable=W0603
failures_detected = False
def check_failures():
result = failures_detected
reset_failures()
return result
class ManagedCallback(object):
"""
This wraps instruments' callbacks to ensure that errors do interfer
with run execution.
"""
def __init__(self, instrument, callback):
self.instrument = instrument
self.callback = callback
def __call__(self, context):
if self.instrument.is_enabled:
try:
self.callback(context)
except (KeyboardInterrupt, TargetNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception as e: # pylint: disable=W0703
logger.error('Error in insturment {}'.format(self.instrument.name))
global failures_detected # pylint: disable=W0603
failures_detected = True
if isinstance(e, WAError):
logger.error(e)
else:
tb = get_traceback()
logger.error(tb)
logger.error('{}({})'.format(e.__class__.__name__, e))
if not context.current_iteration:
# Error occureed outside of an iteration (most likely
# during intial setup or teardown). Since this would affect
# the rest of the run, mark the instument as broken so that
# it doesn't get re-enabled for subsequent iterations.
self.instrument.is_broken = True
disable(self.instrument)
# Need this to keep track of callbacks, because the dispatcher only keeps
# weak references, so if the callbacks aren't referenced elsewhere, they will
# be deallocated before they've had a chance to be invoked.
_callbacks = []
def install(instrument):
"""
This will look for methods (or any callable members) with specific names
in the instrument and hook them up to the corresponding signals.
:param instrument: Instrument instance to install.
"""
logger.debug('Installing instrument %s.', instrument)
if is_installed(instrument):
raise ValueError('Instrument {} is already installed.'.format(instrument.name))
for attr_name in dir(instrument):
priority = 0
stripped_attr_name = attr_name
for key, value in PRIORITY_MAP.iteritems():
if attr_name.startswith(key):
stripped_attr_name = attr_name[len(key):]
priority = value
break
if stripped_attr_name in SIGNAL_MAP:
attr = getattr(instrument, attr_name)
if not callable(attr):
raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument))
argspec = inspect.getargspec(attr)
arg_num = len(argspec.args)
# Instrument callbacks will be passed exactly two arguments: self
# (the instrument instance to which the callback is bound) and
# context. However, we also allow callbacks to capture the context
# in variable arguments (declared as "*args" in the definition).
if arg_num > 2 or (arg_num < 2 and argspec.varargs is None):
message = '{} must take exactly 2 positional arguments; {} given.'
raise ValueError(message.format(attr_name, arg_num))
logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name])
mc = ManagedCallback(instrument, attr)
_callbacks.append(mc)
signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority)
installed.append(instrument)
def uninstall(instrument):
instrument = get_instrument(instrument)
installed.remove(instrument)
def validate():
for instrument in installed:
instrument.validate()
def get_instrument(inst):
if isinstance(inst, Instrument):
return inst
for installed_inst in installed:
if identifier(installed_inst.name) == identifier(inst):
return installed_inst
raise ValueError('Instrument {} is not installed'.format(inst))
def disable_all():
for instrument in installed:
_disable_instrument(instrument)
def enable_all():
for instrument in installed:
_enable_instrument(instrument)
def enable(to_enable):
if isiterable(to_enable):
for inst in to_enable:
_enable_instrument(inst)
else:
_enable_instrument(to_enable)
def disable(to_disable):
if isiterable(to_disable):
for inst in to_disable:
_disable_instrument(inst)
else:
_disable_instrument(to_disable)
def _enable_instrument(inst):
inst = get_instrument(inst)
if not inst.is_broken:
logger.debug('Enabling instrument {}'.format(inst.name))
inst.is_enabled = True
else:
logger.debug('Not enabling broken instrument {}'.format(inst.name))
def _disable_instrument(inst):
inst = get_instrument(inst)
if inst.is_enabled:
logger.debug('Disabling instrument {}'.format(inst.name))
inst.is_enabled = False
def get_enabled():
return [i for i in installed if i.is_enabled]
def get_disabled():
return [i for i in installed if not i.is_enabled]
class Instrument(Plugin):
"""
Base class for instrumentation implementations.
"""
kind = "instrument"
def __init__(self, target, **kwargs):
super(Instrument, self).__init__(**kwargs)
self.target = target
self.is_enabled = True
self.is_broken = False
def initialize(self, context):
pass
def finalize(self, context):
pass
def __str__(self):
return self.name
def __repr__(self):
return 'Instrument({})'.format(self.name)

362
wa/framework/old_output.py Normal file
View File

@@ -0,0 +1,362 @@
import os
import shutil
import logging
import uuid
from copy import copy
from datetime import datetime, timedelta
from wa.framework import signal, log
from wa.framework.configuration.core import merge_config_values
from wa.utils import serializer
from wa.utils.misc import enum_metaclass, ensure_directory_exists as _d
from wa.utils.types import numeric
class Status(object):
__metaclass__ = enum_metaclass('values', return_name=True)
values = [
'NEW',
'PENDING',
'RUNNING',
'COMPLETE',
'OK',
'OKISH',
'NONCRITICAL',
'PARTIAL',
'FAILED',
'ABORTED',
'SKIPPED',
'UNKNOWN',
]
class WAOutput(object):
basename = '.wa-output'
@classmethod
def load(cls, source):
if os.path.isfile(source):
pod = serializer.load(source)
elif os.path.isdir(source):
pod = serializer.load(os.path.join(source, cls.basename))
else:
message = 'Cannot load {} from {}'
raise ValueError(message.format(cls.__name__, source))
return cls.from_pod(pod)
@classmethod
def from_pod(cls, pod):
instance = cls(pod['output_directory'])
instance.status = pod['status']
instance.metrics = [Metric.from_pod(m) for m in pod['metrics']]
instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']]
instance.events = [RunEvent.from_pod(e) for e in pod['events']]
instance.classifiers = pod['classifiers']
return instance
def __init__(self, output_directory):
self.logger = logging.getLogger('output')
self.output_directory = output_directory
self.status = Status.UNKNOWN
self.classifiers = {}
self.metrics = []
self.artifacts = []
self.events = []
def initialize(self, overwrite=False):
if os.path.exists(self.output_directory):
if not overwrite:
raise RuntimeError('"{}" already exists.'.format(self.output_directory))
self.logger.info('Removing existing output directory.')
shutil.rmtree(self.output_directory)
self.logger.debug('Creating output directory {}'.format(self.output_directory))
os.makedirs(self.output_directory)
def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
classifiers = merge_config_values(self.classifiers, classifiers or {})
self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
def get_path(self, subpath):
return os.path.join(self.output_directory, subpath)
def to_pod(self):
return {
'output_directory': self.output_directory,
'status': self.status,
'metrics': [m.to_pod() for m in self.metrics],
'artifacts': [a.to_pod() for a in self.artifacts],
'events': [e.to_pod() for e in self.events],
'classifiers': copy(self.classifiers),
}
def persist(self):
statefile = os.path.join(self.output_directory, self.basename)
with open(statefile, 'wb') as wfh:
serializer.dump(self, wfh)
class RunInfo(object):
default_name_format = 'wa-run-%y%m%d-%H%M%S'
def __init__(self, project=None, project_stage=None, name=None):
self.uuid = uuid.uuid4()
self.project = project
self.project_stage = project_stage
self.name = name or datetime.now().strftime(self.default_name_format)
self.start_time = None
self.end_time = None
self.duration = None
@staticmethod
def from_pod(pod):
instance = RunInfo()
instance.uuid = uuid.UUID(pod['uuid'])
instance.project = pod['project']
instance.project_stage = pod['project_stage']
instance.name = pod['name']
instance.start_time = pod['start_time']
instance.end_time = pod['end_time']
instance.duration = timedelta(seconds=pod['duration'])
return instance
def to_pod(self):
d = copy(self.__dict__)
d['uuid'] = str(self.uuid)
d['duration'] = self.duration.days * 3600 * 24 + self.duration.seconds
return d
class RunOutput(WAOutput):
@property
def info_directory(self):
return _d(os.path.join(self.output_directory, '_info'))
@property
def config_directory(self):
return _d(os.path.join(self.output_directory, '_config'))
@property
def failed_directory(self):
return _d(os.path.join(self.output_directory, '_failed'))
@property
def log_file(self):
return os.path.join(self.output_directory, 'run.log')
@classmethod
def from_pod(cls, pod):
instance = WAOutput.from_pod(pod)
instance.info = RunInfo.from_pod(pod['info'])
instance.jobs = [JobOutput.from_pod(i) for i in pod['jobs']]
instance.failed = [JobOutput.from_pod(i) for i in pod['failed']]
return instance
def __init__(self, output_directory):
super(RunOutput, self).__init__(output_directory)
self.logger = logging.getLogger('output')
self.info = RunInfo()
self.jobs = []
self.failed = []
def initialize(self, overwrite=False):
super(RunOutput, self).initialize(overwrite)
log.add_file(self.log_file)
self.add_artifact('runlog', self.log_file, 'log')
def create_job_output(self, id):
outdir = os.path.join(self.output_directory, id)
job_output = JobOutput(outdir)
self.jobs.append(job_output)
return job_output
def move_failed(self, job_output):
basename = os.path.basename(job_output.output_directory)
i = 1
dest = os.path.join(self.failed_directory, basename + '-{}'.format(i))
while os.path.exists(dest):
i += 1
dest = '{}-{}'.format(dest[:-2], i)
shutil.move(job_output.output_directory, dest)
def to_pod(self):
pod = super(RunOutput, self).to_pod()
pod['info'] = self.info.to_pod()
pod['jobs'] = [i.to_pod() for i in self.jobs]
pod['failed'] = [i.to_pod() for i in self.failed]
return pod
class JobOutput(WAOutput):
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
class Artifact(object):
"""
This is an artifact generated during execution/post-processing of a workload.
Unlike metrics, this represents an actual artifact, such as a file, generated.
This may be "result", such as trace, or it could be "meta data" such as logs.
These are distinguished using the ``kind`` attribute, which also helps WA decide
how it should be handled. Currently supported kinds are:
:log: A log file. Not part of "results" as such but contains information about the
run/workload execution that be useful for diagnostics/meta analysis.
:meta: A file containing metadata. This is not part of "results", but contains
information that may be necessary to reproduce the results (contrast with
``log`` artifacts which are *not* necessary).
:data: This file contains new data, not available otherwise and should be considered
part of the "results" generated by WA. Most traces would fall into this category.
:export: Exported version of results or some other artifact. This signifies that
this artifact does not contain any new data that is not available
elsewhere and that it may be safely discarded without losing information.
:raw: Signifies that this is a raw dump/log that is normally processed to extract
useful information and is then discarded. In a sense, it is the opposite of
``export``, but in general may also be discarded.
.. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on
how important it is to preserve this file, e.g. when archiving, vs
how much space it takes up. Unlike ``export`` artifacts which are
(almost) always ignored by other exporters as that would never result
in data loss, ``raw`` files *may* be processed by exporters if they
decided that the risk of losing potentially (though unlikely) useful
data is greater than the time/space cost of handling the artifact (e.g.
a database uploader may choose to ignore ``raw`` artifacts, where as a
network filer archiver may choose to archive them).
.. note: The kind parameter is intended to represent the logical function of a particular
artifact, not it's intended means of processing -- this is left entirely up to the
result processors.
"""
RUN = 'run'
ITERATION = 'iteration'
valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
@staticmethod
def from_pod(pod):
return Artifact(**pod)
def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
""""
:param name: Name that uniquely identifies this artifact.
:param path: The *relative* path of the artifact. Depending on the ``level``
must be either relative to the run or iteration output directory.
Note: this path *must* be delimited using ``/`` irrespective of the
operating system.
:param kind: The type of the artifact this is (e.g. log file, result, etc.) this
will be used a hit to result processors. This must be one of ``'log'``,
``'meta'``, ``'data'``, ``'export'``, ``'raw'``.
:param level: The level at which the artifact will be generated. Must be either
``'iteration'`` or ``'run'``.
:param mandatory: Boolean value indicating whether this artifact must be present
at the end of result processing for its level.
:param description: A free-form description of what this artifact is.
"""
if kind not in self.valid_kinds:
raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds))
self.name = name
self.path = path.replace('/', os.sep) if path is not None else path
self.kind = kind
self.level = level
self.mandatory = mandatory
self.description = description
def exists(self, context):
"""Returns ``True`` if artifact exists within the specified context, and
``False`` otherwise."""
fullpath = os.path.join(context.output_directory, self.path)
return os.path.exists(fullpath)
def to_pod(self):
return copy(self.__dict__)
class RunEvent(object):
"""
An event that occured during a run.
"""
@staticmethod
def from_pod(pod):
instance = RunEvent(pod['message'])
instance.timestamp = pod['timestamp']
return instance
def __init__(self, message):
self.timestamp = datetime.utcnow()
self.message = message
def to_pod(self):
return copy(self.__dict__)
def __str__(self):
return '{} {}'.format(self.timestamp, self.message)
__repr__ = __str__
class Metric(object):
"""
This is a single metric collected from executing a workload.
:param name: the name of the metric. Uniquely identifies the metric
within the results.
:param value: The numerical value of the metric for this execution of
a workload. This can be either an int or a float.
:param units: Units for the collected value. Can be None if the value
has no units (e.g. it's a count or a standardised score).
:param lower_is_better: Boolean flag indicating where lower values are
better than higher ones. Defaults to False.
:param classifiers: A set of key-value pairs to further classify this metric
beyond current iteration (e.g. this can be used to identify
sub-tests).
"""
@staticmethod
def from_pod(pod):
return Metric(**pod)
def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None):
self.name = name
self.value = numeric(value)
self.units = units
self.lower_is_better = lower_is_better
self.classifiers = classifiers or {}
def to_pod(self):
return copy(self.__dict__)
def __str__(self):
result = '{}: {}'.format(self.name, self.value)
if self.units:
result += ' ' + self.units
result += ' ({})'.format('-' if self.lower_is_better else '+')
return '<{}>'.format(result)
__repr__ = __str__
def _check_artifact_path(path, rootpath):
if path.startswith(rootpath):
return os.path.abspath(path)
rootpath = os.path.abspath(rootpath)
full_path = os.path.join(rootpath, path)
if not os.path.isfile(full_path):
raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
return full_path

View File

@@ -1,362 +1,188 @@
import logging
import os
import shutil
import logging
import string
import sys
import uuid
from copy import copy
from datetime import datetime, timedelta
from wa.framework import signal, log
from wa.framework.configuration.core import merge_config_values
from wa.utils import serializer
from wa.utils.misc import enum_metaclass, ensure_directory_exists as _d
from wa.utils.types import numeric
from wlauto.core.configuration.configuration import JobSpec
from wlauto.core.configuration.manager import ConfigManager
from wlauto.core.device_manager import TargetInfo
from wlauto.utils.misc import touch
from wlauto.utils.serializer import write_pod, read_pod
class Status(object):
logger = logging.getLogger('output')
__metaclass__ = enum_metaclass('values', return_name=True)
values = [
'NEW',
'PENDING',
'RUNNING',
'COMPLETE',
'OK',
'OKISH',
'NONCRITICAL',
'PARTIAL',
'FAILED',
'ABORTED',
'SKIPPED',
'UNKNOWN',
]
class WAOutput(object):
basename = '.wa-output'
@classmethod
def load(cls, source):
if os.path.isfile(source):
pod = serializer.load(source)
elif os.path.isdir(source):
pod = serializer.load(os.path.join(source, cls.basename))
else:
message = 'Cannot load {} from {}'
raise ValueError(message.format(cls.__name__, source))
return cls.from_pod(pod)
@classmethod
def from_pod(cls, pod):
instance = cls(pod['output_directory'])
instance.status = pod['status']
instance.metrics = [Metric.from_pod(m) for m in pod['metrics']]
instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']]
instance.events = [RunEvent.from_pod(e) for e in pod['events']]
instance.classifiers = pod['classifiers']
return instance
def __init__(self, output_directory):
self.logger = logging.getLogger('output')
self.output_directory = output_directory
self.status = Status.UNKNOWN
self.classifiers = {}
self.metrics = []
self.artifacts = []
self.events = []
def initialize(self, overwrite=False):
if os.path.exists(self.output_directory):
if not overwrite:
raise RuntimeError('"{}" already exists.'.format(self.output_directory))
self.logger.info('Removing existing output directory.')
shutil.rmtree(self.output_directory)
self.logger.debug('Creating output directory {}'.format(self.output_directory))
os.makedirs(self.output_directory)
def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
classifiers = merge_config_values(self.classifiers, classifiers or {})
self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
def get_path(self, subpath):
return os.path.join(self.output_directory, subpath)
def to_pod(self):
return {
'output_directory': self.output_directory,
'status': self.status,
'metrics': [m.to_pod() for m in self.metrics],
'artifacts': [a.to_pod() for a in self.artifacts],
'events': [e.to_pod() for e in self.events],
'classifiers': copy(self.classifiers),
}
def persist(self):
statefile = os.path.join(self.output_directory, self.basename)
with open(statefile, 'wb') as wfh:
serializer.dump(self, wfh)
class RunInfo(object):
"""
Information about the current run, such as its unique ID, run
time, etc.
default_name_format = 'wa-run-%y%m%d-%H%M%S'
"""
@staticmethod
def from_pod(pod):
uid = pod.pop('uuid')
if uid is not None:
uid = uuid.UUID(uid)
instance = RunInfo(**pod)
instance.uuid = uid
return instance
def __init__(self, project=None, project_stage=None, name=None):
def __init__(self, run_name=None, project=None, project_stage=None,
start_time=None, end_time=None, duration=None):
self.uuid = uuid.uuid4()
self.project = project
self.project_stage = project_stage
self.name = name or datetime.now().strftime(self.default_name_format)
self.run_name = None
self.project = None
self.project_stage = None
self.start_time = None
self.end_time = None
self.duration = None
@staticmethod
def from_pod(pod):
instance = RunInfo()
instance.uuid = uuid.UUID(pod['uuid'])
instance.project = pod['project']
instance.project_stage = pod['project_stage']
instance.name = pod['name']
instance.start_time = pod['start_time']
instance.end_time = pod['end_time']
instance.duration = timedelta(seconds=pod['duration'])
return instance
def to_pod(self):
d = copy(self.__dict__)
d['uuid'] = str(self.uuid)
d['duration'] = self.duration.days * 3600 * 24 + self.duration.seconds
return d
class RunOutput(WAOutput):
@property
def info_directory(self):
return _d(os.path.join(self.output_directory, '_info'))
@property
def config_directory(self):
return _d(os.path.join(self.output_directory, '_config'))
@property
def failed_directory(self):
return _d(os.path.join(self.output_directory, '_failed'))
@property
def log_file(self):
return os.path.join(self.output_directory, 'run.log')
@classmethod
def from_pod(cls, pod):
instance = WAOutput.from_pod(pod)
instance.info = RunInfo.from_pod(pod['info'])
instance.jobs = [JobOutput.from_pod(i) for i in pod['jobs']]
instance.failed = [JobOutput.from_pod(i) for i in pod['failed']]
return instance
def __init__(self, output_directory):
super(RunOutput, self).__init__(output_directory)
self.logger = logging.getLogger('output')
self.info = RunInfo()
self.jobs = []
self.failed = []
def initialize(self, overwrite=False):
super(RunOutput, self).initialize(overwrite)
log.add_file(self.log_file)
self.add_artifact('runlog', self.log_file, 'log')
def create_job_output(self, id):
outdir = os.path.join(self.output_directory, id)
job_output = JobOutput(outdir)
self.jobs.append(job_output)
return job_output
def move_failed(self, job_output):
basename = os.path.basename(job_output.output_directory)
i = 1
dest = os.path.join(self.failed_directory, basename + '-{}'.format(i))
while os.path.exists(dest):
i += 1
dest = '{}-{}'.format(dest[:-2], i)
shutil.move(job_output.output_directory, dest)
def to_pod(self):
pod = super(RunOutput, self).to_pod()
pod['info'] = self.info.to_pod()
pod['jobs'] = [i.to_pod() for i in self.jobs]
pod['failed'] = [i.to_pod() for i in self.failed]
return pod
class JobOutput(WAOutput):
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
class Artifact(object):
class RunState(object):
"""
This is an artifact generated during execution/post-processing of a workload.
Unlike metrics, this represents an actual artifact, such as a file, generated.
This may be "result", such as trace, or it could be "meta data" such as logs.
These are distinguished using the ``kind`` attribute, which also helps WA decide
how it should be handled. Currently supported kinds are:
:log: A log file. Not part of "results" as such but contains information about the
run/workload execution that be useful for diagnostics/meta analysis.
:meta: A file containing metadata. This is not part of "results", but contains
information that may be necessary to reproduce the results (contrast with
``log`` artifacts which are *not* necessary).
:data: This file contains new data, not available otherwise and should be considered
part of the "results" generated by WA. Most traces would fall into this category.
:export: Exported version of results or some other artifact. This signifies that
this artifact does not contain any new data that is not available
elsewhere and that it may be safely discarded without losing information.
:raw: Signifies that this is a raw dump/log that is normally processed to extract
useful information and is then discarded. In a sense, it is the opposite of
``export``, but in general may also be discarded.
.. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on
how important it is to preserve this file, e.g. when archiving, vs
how much space it takes up. Unlike ``export`` artifacts which are
(almost) always ignored by other exporters as that would never result
in data loss, ``raw`` files *may* be processed by exporters if they
decided that the risk of losing potentially (though unlikely) useful
data is greater than the time/space cost of handling the artifact (e.g.
a database uploader may choose to ignore ``raw`` artifacts, where as a
network filer archiver may choose to archive them).
.. note: The kind parameter is intended to represent the logical function of a particular
artifact, not it's intended means of processing -- this is left entirely up to the
result processors.
Represents the state of a WA run.
"""
RUN = 'run'
ITERATION = 'iteration'
valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
@staticmethod
def from_pod(pod):
return Artifact(**pod)
return RunState()
def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
""""
:param name: Name that uniquely identifies this artifact.
:param path: The *relative* path of the artifact. Depending on the ``level``
must be either relative to the run or iteration output directory.
Note: this path *must* be delimited using ``/`` irrespective of the
operating system.
:param kind: The type of the artifact this is (e.g. log file, result, etc.) this
will be used a hit to result processors. This must be one of ``'log'``,
``'meta'``, ``'data'``, ``'export'``, ``'raw'``.
:param level: The level at which the artifact will be generated. Must be either
``'iteration'`` or ``'run'``.
:param mandatory: Boolean value indicating whether this artifact must be present
at the end of result processing for its level.
:param description: A free-form description of what this artifact is.
"""
if kind not in self.valid_kinds:
raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds))
self.name = name
self.path = path.replace('/', os.sep) if path is not None else path
self.kind = kind
self.level = level
self.mandatory = mandatory
self.description = description
def exists(self, context):
"""Returns ``True`` if artifact exists within the specified context, and
``False`` otherwise."""
fullpath = os.path.join(context.output_directory, self.path)
return os.path.exists(fullpath)
def __init__(self):
pass
def to_pod(self):
return copy(self.__dict__)
return {}
class RunEvent(object):
"""
An event that occured during a run.
class RunOutput(object):
"""
@property
def logfile(self):
return os.path.join(self.basepath, 'run.log')
@staticmethod
def from_pod(pod):
instance = RunEvent(pod['message'])
instance.timestamp = pod['timestamp']
return instance
@property
def metadir(self):
return os.path.join(self.basepath, '__meta')
def __init__(self, message):
self.timestamp = datetime.utcnow()
self.message = message
@property
def infofile(self):
return os.path.join(self.metadir, 'run_info.json')
def to_pod(self):
return copy(self.__dict__)
@property
def statefile(self):
return os.path.join(self.basepath, '.run_state.json')
def __str__(self):
return '{} {}'.format(self.timestamp, self.message)
@property
def configfile(self):
return os.path.join(self.metadir, 'config.json')
__repr__ = __str__
@property
def targetfile(self):
return os.path.join(self.metadir, 'target_info.json')
@property
def jobsfile(self):
return os.path.join(self.metadir, 'jobs.json')
@property
def raw_config_dir(self):
return os.path.join(self.metadir, 'raw_config')
def __init__(self, path):
self.basepath = path
self.info = None
self.state = None
if (not os.path.isfile(self.statefile) or
not os.path.isfile(self.infofile)):
msg = '"{}" does not exist or is not a valid WA output directory.'
raise ValueError(msg.format(self.basepath))
self.reload()
def reload(self):
self.info = RunInfo.from_pod(read_pod(self.infofile))
self.state = RunState.from_pod(read_pod(self.statefile))
def write_info(self):
write_pod(self.info.to_pod(), self.infofile)
def write_state(self):
write_pod(self.state.to_pod(), self.statefile)
def write_config(self, config):
write_pod(config.to_pod(), self.configfile)
def read_config(self):
if not os.path.isfile(self.configfile):
return None
return ConfigManager.from_pod(read_pod(self.configfile))
def write_target_info(self, ti):
write_pod(ti.to_pod(), self.targetfile)
def read_config(self):
if not os.path.isfile(self.targetfile):
return None
return TargetInfo.from_pod(read_pod(self.targetfile))
def write_job_specs(self, job_specs):
job_specs[0].to_pod()
js_pod = {'jobs': [js.to_pod() for js in job_specs]}
write_pod(js_pod, self.jobsfile)
def read_job_specs(self):
if not os.path.isfile(self.jobsfile):
return None
pod = read_pod(self.jobsfile)
return [JobSpec.from_pod(jp) for jp in pod['jobs']]
class Metric(object):
"""
This is a single metric collected from executing a workload.
def init_wa_output(path, wa_state, force=False):
if os.path.exists(path):
if force:
logger.info('Removing existing output directory.')
shutil.rmtree(os.path.abspath(path))
else:
raise RuntimeError('path exists: {}'.format(path))
:param name: the name of the metric. Uniquely identifies the metric
within the results.
:param value: The numerical value of the metric for this execution of
a workload. This can be either an int or a float.
:param units: Units for the collected value. Can be None if the value
has no units (e.g. it's a count or a standardised score).
:param lower_is_better: Boolean flag indicating where lower values are
better than higher ones. Defaults to False.
:param classifiers: A set of key-value pairs to further classify this metric
beyond current iteration (e.g. this can be used to identify
sub-tests).
logger.info('Creating output directory.')
os.makedirs(path)
meta_dir = os.path.join(path, '__meta')
os.makedirs(meta_dir)
_save_raw_config(meta_dir, wa_state)
touch(os.path.join(path, 'run.log'))
"""
info = RunInfo(
run_name=wa_state.run_config.run_name,
project=wa_state.run_config.project,
project_stage=wa_state.run_config.project_stage,
)
write_pod(info.to_pod(), os.path.join(meta_dir, 'run_info.json'))
with open(os.path.join(path, '.run_state.json'), 'w') as wfh:
wfh.write('{}')
@staticmethod
def from_pod(pod):
return Metric(**pod)
def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None):
self.name = name
self.value = numeric(value)
self.units = units
self.lower_is_better = lower_is_better
self.classifiers = classifiers or {}
def to_pod(self):
return copy(self.__dict__)
def __str__(self):
result = '{}: {}'.format(self.name, self.value)
if self.units:
result += ' ' + self.units
result += ' ({})'.format('-' if self.lower_is_better else '+')
return '<{}>'.format(result)
__repr__ = __str__
return RunOutput(path)
def _check_artifact_path(path, rootpath):
if path.startswith(rootpath):
return os.path.abspath(path)
rootpath = os.path.abspath(rootpath)
full_path = os.path.join(rootpath, path)
if not os.path.isfile(full_path):
raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
return full_path
def _save_raw_config(meta_dir, state):
raw_config_dir = os.path.join(meta_dir, 'raw_config')
os.makedirs(raw_config_dir)
for i, source in enumerate(state.loaded_config_sources):
if not os.path.isfile(source):
continue
basename = os.path.basename(source)
dest_path = os.path.join(raw_config_dir, 'cfg{}-{}'.format(i, basename))
shutil.copy(source, dest_path)

View File

@@ -21,69 +21,28 @@ import inspect
import imp
import string
import logging
from copy import copy
from itertools import chain
from collections import OrderedDict, defaultdict
from itertools import chain
from copy import copy
from wa.framework import log
from wa.framework.exception import ValidationError, ConfigError, NotFoundError, PluginLoaderError
from wa.framework.configuration.core import ConfigurationPoint, ConfigurationPointCollection
from wa.utils.misc import isiterable, ensure_directory_exists as _d, get_article
from wa.utils.misc import walk_modules, get_article
from wa.utils.types import identifier, integer, boolean, caseless_string
from wa.framework.configuration.core import settings, ConfigurationPoint as Parameter
from wa.framework.exception import (NotFoundError, PluginLoaderError, ValidationError,
ConfigError, HostError)
from wa.utils import log
from wa.utils.misc import (ensure_directory_exists as _d, walk_modules, load_class,
merge_dicts_simple, get_article)
from wa.utils.types import identifier, boolean
class Parameter(ConfigurationPoint):
is_runtime = False
def __init__(self, name,
kind=None,
mandatory=None,
default=None,
override=False,
allowed_values=None,
description=None,
constraint=None,
convert_types=True,
global_alias=None,
reconfigurable=True):
"""
:param global_alias: This is an alternative alias for this parameter,
unlike the name, this alias will not be
namespaced under the owning extension's name
(hence the global part). This is introduced
primarily for backward compatibility -- so that
old extension settings names still work. This
should not be used for new parameters.
:param reconfigurable: This indicated whether this parameter may be
reconfigured during the run (e.g. between different
iterations). This determines where in run configruation
this parameter may appear.
For other parameters, see docstring for
``wa.framework.configuration.core.ConfigurationPoint``
"""
super(Parameter, self).__init__(name, kind, mandatory,
default, override, allowed_values,
description, constraint,
convert_types)
self.global_alias = global_alias
self.reconfigurable = reconfigurable
def __repr__(self):
d = copy(self.__dict__)
del d['description']
return 'Param({})'.format(d)
MODNAME_TRANS = string.maketrans(':/\\.', '____')
class PluginAliasCollection(object):
class AttributeCollection(object):
"""
Accumulator for extension attribute objects (such as Parameters). This will
replace any class member list accumulating such attributes through the magic of
metaprogramming\ [*]_.
Accumulator for plugin attribute objects (such as Parameters or Artifacts).
This will replace any class member list accumulating such attributes
through the magic of metaprogramming\ [*]_.
.. [*] which is totally safe and not going backfire in any way...
@@ -93,7 +52,8 @@ class PluginAliasCollection(object):
def values(self):
return self._attrs.values()
def __init__(self):
def __init__(self, attrcls):
self._attrcls = attrcls
self._attrs = OrderedDict()
def add(self, p):
@@ -104,6 +64,8 @@ class PluginAliasCollection(object):
for a, v in p.__dict__.iteritems():
if v is not None:
setattr(newp, a, v)
if not hasattr(newp, "_overridden"):
newp._overridden = p._owner
self._attrs[p.name] = newp
else:
# Duplicate attribute condition is check elsewhere.
@@ -119,13 +81,19 @@ class PluginAliasCollection(object):
__repr__ = __str__
def _to_attrcls(self, p):
if isinstance(p, tuple) or isinstance(p, list):
# must be in the form (name, {param: value, ...})
p = Alias(p[1], **p[1])
elif not isinstance(p, Alias):
old_owner = getattr(p, "_owner", None)
if isinstance(p, basestring):
p = self._attrcls(p)
elif isinstance(p, tuple) or isinstance(p, list):
p = self._attrcls(*p)
elif isinstance(p, dict):
p = self._attrcls(**p)
elif not isinstance(p, self._attrcls):
raise ValueError('Invalid parameter value: {}'.format(p))
if p.name in self._attrs:
if (p.name in self._attrs and not p.override and
p.name != 'modules'): # TODO: HACK due to "diamond dependecy" in workloads...
raise ValueError('Attribute {} has already been defined.'.format(p.name))
p._owner = old_owner
return p
def __iadd__(self, other):
@@ -146,83 +114,209 @@ class PluginAliasCollection(object):
return len(self._attrs)
class AliasCollection(AttributeCollection):
def __init__(self):
super(AliasCollection, self).__init__(Alias)
def _to_attrcls(self, p):
if isinstance(p, tuple) or isinstance(p, list):
# must be in the form (name, {param: value, ...})
p = self._attrcls(p[1], **p[1])
elif not isinstance(p, self._attrcls):
raise ValueError('Invalid parameter value: {}'.format(p))
if p.name in self._attrs:
raise ValueError('Attribute {} has already been defined.'.format(p.name))
return p
class ListCollection(list):
def __init__(self, attrcls): # pylint: disable=unused-argument
super(ListCollection, self).__init__()
class Artifact(object):
"""
This is an artifact generated during execution/post-processing of a workload.
Unlike metrics, this represents an actual artifact, such as a file, generated.
This may be "result", such as trace, or it could be "meta data" such as logs.
These are distinguished using the ``kind`` attribute, which also helps WA decide
how it should be handled. Currently supported kinds are:
:log: A log file. Not part of "results" as such but contains information
about the run/workload execution that be useful for diagnostics/meta
analysis.
:meta: A file containing metadata. This is not part of "results", but contains
information that may be necessary to reproduce the results
(contrast with ``log`` artifacts which are *not* necessary).
:data: This file contains new data, not available otherwise and should be
considered part of the "results" generated by WA. Most traces
would fall into this category.
:export: Exported version of results or some other artifact. This signifies
that this artifact does not contain any new data that is not
available elsewhere and that it may be safely discarded
without losing information.
:raw: Signifies that this is a raw dump/log that is normally processed to
extract useful information and is then discarded. In a sense, it
is the opposite of ``export``, but in general may also be
discarded.
.. note:: whether a file is marked as ``log``/``data`` or ``raw``
depends on how important it is to preserve this file,
e.g. when archiving, vs how much space it takes up.
Unlike ``export`` artifacts which are (almost) always
ignored by other exporters as that would never result
in data loss, ``raw`` files *may* be processed by
exporters if they decided that the risk of losing
potentially (though unlikely) useful data is greater
than the time/space cost of handling the artifact (e.g.
a database uploader may choose to ignore ``raw``
artifacts, where as a network filer archiver may choose
to archive them).
.. note: The kind parameter is intended to represent the logical function of
a particular artifact, not its intended means of processing --
this is left entirely up to the result processors.
"""
RUN = 'run'
ITERATION = 'iteration'
valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
""""
:param name: Name that uniquely identifies this artifact.
:param path: The *relative* path of the artifact. Depending on the ``level``
must be either relative to the run or iteration output directory.
.. note:: this path *must* be delimited using ``/``
irrespective of the operating system.
:param kind: The type of the artifact this is (e.g. log file, result, etc.)
this will be used a hit to result processors. This must be
one of ``'log'``, ``'meta'``, ``'data'``, ``'export'``,
``'raw'``.
:param level: The level at which the artifact will be generated. Must be
either ``'iteration'`` or ``'run'``.
:param mandatory: Boolean value indicating whether this artifact must be
present at the end of result processing for its level.
:param description: A free-form description of what this artifact is.
"""
if kind not in self.valid_kinds:
msg = 'Invalid Artifact kind: {}; must be in {}'
raise ValueError(msg.format(kind, self.valid_kinds))
self.name = name
self.path = path.replace('/', os.sep) if path is not None else path
self.kind = kind
self.level = level
self.mandatory = mandatory
self.description = description
def exists(self, context):
"""
Returns ``True`` if artifact exists within the specified context, and
``False`` otherwise.
"""
fullpath = os.path.join(context.output_directory, self.path)
return os.path.exists(fullpath)
def to_dict(self):
return copy(self.__dict__)
class Alias(object):
"""
This represents a configuration alias for an extension, mapping an alternative name to
a set of parameter values, effectively providing an alternative set of default values.
This represents a configuration alias for an plugin, mapping an alternative
name to a set of parameter values, effectively providing an alternative set
of default values.
"""
def __init__(self, name, **kwargs):
self.name = name
self.parameters = kwargs
self.params = kwargs
self.plugin_name = None # gets set by the MetaClass
def validate(self, plugin):
plugin_params = set(p.name for p in plugin.parameters)
for param in self.parameters:
if param not in plugin_params:
def validate(self, ext):
ext_params = set(p.name for p in ext.parameters)
for param in self.params:
if param not in ext_params:
# Raising config error because aliases might have come through
# the config.
msg = 'Parameter {} (defined in alias {}) is invalid for {}'
raise ValueError(msg.format(param, self.name, plugin.name))
raise ConfigError(msg.format(param, self.name, ext.name))
class PluginMeta(type):
"""
This basically adds some magic to extensions to make implementing new extensions, such as
workloads less complicated.
This basically adds some magic to plugins to make implementing new plugins,
such as workloads less complicated.
It ensures that certain class attributes (specified by the ``to_propagate``
attribute of the metaclass) get propagated down the inheritance hierarchy. The assumption
is that the values of the attributes specified in the class are iterable; if that is not met,
Bad Things(tm) will happen.
attribute of the metaclass) get propagated down the inheritance hierarchy.
The assumption is that the values of the attributes specified in the class
are iterable; if that is not met, Bad Things (tm) will happen.
This also provides "virtual" method implementations. The ``super``'s version of these
methods (specified by the ``virtual_methods`` attribute of the metaclass) will be
automatically invoked.
This also provides virtual method implementation, similar to those in
C-derived OO languages, and alias specifications.
"""
to_propagate = [
('parameters', ConfigurationPointCollection),
('parameters', Parameter, AttributeCollection),
('artifacts', Artifact, AttributeCollection),
('core_modules', str, ListCollection),
]
#virtual_methods = ['validate', 'initialize', 'finalize']
virtual_methods = []
virtual_methods = ['validate', 'initialize', 'finalize']
global_virtuals = ['initialize', 'finalize']
def __new__(mcs, clsname, bases, attrs):
mcs._propagate_attributes(bases, attrs)
mcs._propagate_attributes(bases, attrs, clsname)
cls = type.__new__(mcs, clsname, bases, attrs)
mcs._setup_aliases(cls)
mcs._implement_virtual(cls, bases)
return cls
@classmethod
def _propagate_attributes(mcs, bases, attrs):
def _propagate_attributes(mcs, bases, attrs, clsname):
"""
For attributes specified by to_propagate, their values will be a union of
that specified for cls and it's bases (cls values overriding those of bases
that specified for cls and its bases (cls values overriding those of bases
in case of conflicts).
"""
for prop_attr, attr_collector_cls in mcs.to_propagate:
for prop_attr, attr_cls, attr_collector_cls in mcs.to_propagate:
should_propagate = False
propagated = attr_collector_cls()
propagated = attr_collector_cls(attr_cls)
for base in bases:
if hasattr(base, prop_attr):
propagated += getattr(base, prop_attr) or []
should_propagate = True
if prop_attr in attrs:
propagated += attrs[prop_attr] or []
pattrs = attrs[prop_attr] or []
for pa in pattrs:
if not isinstance(pa, basestring):
pa._owner = clsname
propagated += pattrs
should_propagate = True
if should_propagate:
for p in propagated:
override = bool(getattr(p, "override", None))
overridden = bool(getattr(p, "_overridden", None))
if override != overridden:
msg = "Overriding non existing parameter '{}' inside '{}'"
raise ValueError(msg.format(p.name, p._owner))
attrs[prop_attr] = propagated
@classmethod
def _setup_aliases(mcs, cls):
if hasattr(cls, 'aliases'):
aliases, cls.aliases = cls.aliases, PluginAliasCollection()
aliases, cls.aliases = cls.aliases, AliasCollection()
for alias in aliases:
if isinstance(alias, basestring):
alias = Alias(alias)
@@ -248,7 +342,8 @@ class PluginMeta(type):
for vmname in mcs.virtual_methods:
clsmethod = getattr(cls, vmname, None)
if clsmethod:
basemethods = [getattr(b, vmname) for b in bases if hasattr(b, vmname)]
basemethods = [getattr(b, vmname) for b in bases
if hasattr(b, vmname)]
methods[vmname] = [bm for bm in basemethods if bm != clsmethod]
methods[vmname].append(clsmethod)
@@ -261,7 +356,12 @@ class PluginMeta(type):
def wrapper(self, *args, **kwargs):
for dm in methods[name__]:
dm(self, *args, **kwargs)
if name__ in mcs.global_virtuals:
if dm not in called_globals:
dm(self, *args, **kwargs)
called_globals.add(dm)
else:
dm(self, *args, **kwargs)
return wrapper
setattr(cls, vmname, generate_method_wrapper(vmname))
@@ -269,35 +369,52 @@ class PluginMeta(type):
class Plugin(object):
"""
Base class for all WA plugins.
A plugin extends the functionality of WA in some way. Plugins are discovered
and loaded dynamically by the plugin loader upon invocation of WA scripts.
Adding an extension is a matter of placing a class that implements an appropriate
interface somewhere it would be discovered by the loader. That "somewhere" is
typically one of the plugin subdirectories under ``~/.workload_automation/``.
Base class for all WA plugins. An plugin is basically a plug-in. It
extends the functionality of WA in some way. Plugins are discovered and
loaded dynamically by the plugin loader upon invocation of WA scripts.
Adding an plugin is a matter of placing a class that implements an
appropriate interface somewhere it would be discovered by the loader. That
"somewhere" is typically one of the plugin subdirectories under
``~/.workload_automation/``.
"""
__metaclass__ = PluginMeta
name = None
kind = None
parameters = []
name = None
parameters = [
Parameter('modules', kind=list,
description="""
Lists the modules to be loaded by this plugin. A module is a
plug-in that further extends functionality of an plugin.
"""),
]
artifacts = []
aliases = []
core_modules = []
@classmethod
def get_default_config(cls):
return {p.name: p.default for p in cls.parameters}
@classmethod
def get_parameter(cls, name):
for param in cls.parameters:
if param.name == name or name in param.aliases:
return param
@property
def dependencies_directory(self):
return _d(os.path.join(settings.dependencies_directory, self.name))
@property
def _classname(self):
return self.__class__.__name__
def __init__(self, **kwargs):
self.logger = logging.getLogger(self.name)
self.logger = logging.getLogger(self._classname)
self._modules = []
self.capabilities = getattr(self.__class__, 'capabilities', [])
self.update_config(**kwargs)
for param in self.parameters:
param.set_value(self, kwargs.get(param.name))
for key in kwargs:
if key not in self.parameters:
message = 'Unexpected parameter "{}" for {}'
raise ConfigError(message.format(key, self.name))
def get_config(self):
"""
@@ -309,35 +426,21 @@ class Plugin(object):
config[param.name] = getattr(self, param.name, None)
return config
def update_config(self, **kwargs):
"""
Updates current configuration (i.e. parameter values) of this plugin.
"""
for param in self.parameters:
param.set_value(self, kwargs.get(param.name))
for key in kwargs:
if key not in self.parameters:
message = 'Unexpected parameter "{}" for {}'
raise ConfigError(message.format(key, self.name))
def validate(self):
"""
Perform basic validation to ensure that this extension is capable of running.
This is intended as an early check to ensure the extension has not been mis-configured,
rather than a comprehensive check (that may, e.g., require access to the execution
context).
Perform basic validation to ensure that this plugin is capable of
running. This is intended as an early check to ensure the plugin has
not been mis-configured, rather than a comprehensive check (that may,
e.g., require access to the execution context).
This method may also be used to enforce (i.e. set as well as check) inter-parameter
constraints for the extension (e.g. if valid values for parameter A depend on the value
of parameter B -- something that is not possible to enforce using ``Parameter``\ 's
``constraint`` attribute.
This method may also be used to enforce (i.e. set as well as check)
inter-parameter constraints for the plugin (e.g. if valid values for
parameter A depend on the value of parameter B -- something that is not
possible to enfroce using ``Parameter``\ 's ``constraint`` attribute.
"""
if self.name is None:
raise ValidationError('name not set for {}'.format(self.__class__.__name__))
if self.kind is None:
raise ValidationError('kind not set for {}'.format(self.name))
raise ValidationError('Name not set for {}'.format(self._classname))
for param in self.parameters:
param.validate(self)
@@ -347,109 +450,120 @@ class Plugin(object):
def finalize(self, context):
pass
def check_artifacts(self, context, level):
"""
Make sure that all mandatory artifacts have been generated.
"""
for artifact in self.artifacts:
if artifact.level != level or not artifact.mandatory:
continue
fullpath = os.path.join(context.output_directory, artifact.path)
if not os.path.exists(fullpath):
message = 'Mandatory "{}" has not been generated for {}.'
raise ValidationError(message.format(artifact.path, self.name))
def __getattr__(self, name):
if name == '_modules':
raise ValueError('_modules accessed too early!')
for module in self._modules:
if hasattr(module, name):
return getattr(module, name)
raise AttributeError(name)
def load_modules(self, loader):
"""
Load the modules specified by the "modules" Parameter using the
provided loader. A loader can be any object that has an atribute called
"get_module" that implements the following signature::
get_module(name, owner, **kwargs)
and returns an instance of :class:`wlauto.core.plugin.Module`. If the
module with the specified name is not found, the loader must raise an
appropriate exception.
"""
modules = list(reversed(self.core_modules)) +\
list(reversed(self.modules or []))
if not modules:
return
for module_spec in modules:
if not module_spec:
continue
module = self._load_module(loader, module_spec)
self._install_module(module)
def has(self, capability):
"""Check if this extension has the specified capability. The alternative method ``can`` is
identical to this. Which to use is up to the caller depending on what makes semantic sense
in the context of the capability, e.g. ``can('hard_reset')`` vs ``has('active_cooling')``."""
"""
Check if this plugin has the specified capability. The alternative
method ``can`` is identical to this. Which to use is up to the caller
depending on what makes semantic sense in the context of the
capability, e.g. ``can('hard_reset')`` vs ``has('active_cooling')``.
"""
return capability in self.capabilities
can = has
def _load_module(self, loader, module_spec):
if isinstance(module_spec, basestring):
name = module_spec
params = {}
elif isinstance(module_spec, dict):
if len(module_spec) != 1:
msg = 'Invalid module spec: {}; dict must have exctly one key -- '\
'the module name.'
raise ValueError(msg.format(module_spec))
name, params = module_spec.items()[0]
else:
message = 'Invalid module spec: {}; must be a string or a one-key dict.'
raise ValueError(message.format(module_spec))
class TargetedPluginMeta(PluginMeta):
if not isinstance(params, dict):
message = 'Invalid module spec: {}; dict value must also be a dict.'
raise ValueError(message.format(module_spec))
to_propagate = PluginMeta.to_propagate + [
('supported_targets', list),
('supported_platforms', list),
]
virtual_methods = PluginMeta.virtual_methods + [
'validate_on_target',
]
module = loader.get_module(name, owner=self, **params)
module.initialize(None)
return module
def _install_module(self, module):
for capability in module.capabilities:
if capability not in self.capabilities:
self.capabilities.append(capability)
self._modules.append(module)
class TargetedPlugin(Plugin):
"""
A plugin that operates on a target device. These kinds of plugins are created
with a ``devlib.Target`` instance and may only support certain kinds of targets.
A plugin that interacts with a target device.
"""
__metaclass__ = TargetedPluginMeta
suppoted_targets = []
supported_targets = []
supported_platforms = []
@classmethod
def check_compatible(cls, target):
if cls.suppoted_targets:
if target.os not in cls.suppoted_targets:
msg = 'Incompatible target OS "{}" for {}'
raise TargetError(msg.format(target.os, cls.name))
def __init__(self, target, **kwargs):
super(TargetedPlugin, self).__init__(**kwargs)
if self.supported_targets and target.os not in self.supported_targets:
raise TargetError('Plugin {} does not support target {}'.format(self.name, target.name))
if self.supported_platforms and target.platform.name not in self.supported_platforms:
raise TargetError('Plugin {} does not support platform {}'.format(self.name, target.platform))
self.check_compatible(target)
self.target = target
def validate_on_target(self):
"""
This will be invoked once at the beginning of a run after a ``Target``
has been connected and initialized. This is intended for validation
that cannot be performed offline but does not depend on ephemeral
state that is likely to change during the course of a run (validation
against such states should be done during setup of a particular
execution.
"""
pass
class PluginLoaderItem(object):
def __init__(self, ext_tuple):
self.name = ext_tuple.name
self.default_package = ext_tuple.default_package
self.default_path = ext_tuple.default_path
self.cls = load_class(ext_tuple.cls)
class GlobalParameterAlias(object):
"""
Represents a "global alias" for an plugin parameter. A global alias
is specified at the top-level of config rather namespaced under an plugin
name.
Multiple plugins may have parameters with the same global_alias if they are
part of the same inheritance hierarchy and one parameter is an override of the
other. This class keeps track of all such cases in its plugins dict.
"""
def __init__(self, name):
self.name = name
self.plugins = {}
def iteritems(self):
for ext in self.plugins.itervalues():
yield (self.get_param(ext), ext)
def get_param(self, ext):
for param in ext.parameters:
if param.global_alias == self.name:
return param
message = 'Plugin {} does not have a parameter with global alias {}'
raise ValueError(message.format(ext.name, self.name))
def update(self, other_ext):
self._validate_ext(other_ext)
self.plugins[other_ext.name] = other_ext
def _validate_ext(self, other_ext):
other_param = self.get_param(other_ext)
for param, ext in self.iteritems():
if ((not (issubclass(ext, other_ext) or issubclass(other_ext, ext))) and
other_param.kind != param.kind):
message = 'Duplicate global alias {} declared in {} and {} plugins with different types'
raise PluginLoaderError(message.format(self.name, ext.name, other_ext.name))
if not param.name == other_param.name:
message = 'Two params {} in {} and {} in {} both declare global alias {}'
raise PluginLoaderError(message.format(param.name, ext.name,
other_param.name, other_ext.name, self.name))
def __str__(self):
text = 'GlobalAlias({} => {})'
extlist = ', '.join(['{}.{}'.format(e.name, p.name) for p, e in self.iteritems()])
return text.format(self.name, extlist)
MODNAME_TRANS = string.maketrans(':/\\.', '____')
class PluginLoader(object):
"""
@@ -461,19 +575,19 @@ class PluginLoader(object):
"""
def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False):
def __init__(self, packages=None, paths=None, ignore_paths=None,
keep_going=False):
"""
params::
:packages: List of packages to load plugins from.
:paths: List of paths to be searched for Python modules containing
WA plugins.
:ignore_paths: List of paths to ignore when search for WA plugins (these would
typically be subdirectories of one or more locations listed in
``paths`` parameter.
:keep_going: Specifies whether to keep going if an error occurs while loading
plugins.
:ignore_paths: List of paths to ignore when search for WA plugins
(these would typically be subdirectories of one or
more locations listed in ``paths`` parameter.
:keep_going: Specifies whether to keep going if an error occurs while
loading plugins.
"""
self.logger = logging.getLogger('pluginloader')
self.keep_going = keep_going
@@ -490,6 +604,8 @@ class PluginLoader(object):
def update(self, packages=None, paths=None, ignore_paths=None):
""" Load plugins from the specified paths/packages
without clearing or reloading existing plugin. """
msg = 'Updating from: packages={} paths={}'
self.logger.debug(msg.format(packages, paths))
if packages:
self.packages.extend(packages)
self._discover_from_packages(packages)
@@ -505,6 +621,7 @@ class PluginLoader(object):
def reload(self):
""" Clear all discovered items and re-run the discovery. """
self.logger.debug('Reloading')
self.clear()
self._discover_from_packages(self.packages)
self._discover_from_paths(self.paths, self.ignore_paths)
@@ -519,15 +636,16 @@ class PluginLoader(object):
try:
return self.plugins[name]
except KeyError:
raise NotFoundError('Plugins {} not found.'.format(name))
raise NotFoundError('plugins {} not found.'.format(name))
if kind not in self.kind_map:
raise ValueError('Unknown plugin type: {}'.format(kind))
store = self.kind_map[kind]
if name not in store:
raise NotFoundError('Plugins {} is not {} {}.'.format(name, get_article(kind), kind))
msg = 'plugins {} is not {} {}.'
raise NotFoundError(msg.format(name, get_article(kind), kind))
return store[name]
def get_plugin(self, name, kind=None, *args, **kwargs):
def get_plugin(self, name=None, kind=None, *args, **kwargs):
"""
Return plugin of the specified kind with the specified name. Any
additional parameters will be passed to the plugin's __init__.
@@ -548,7 +666,7 @@ class PluginLoader(object):
"""
real_name, alias_config = self.resolve_alias(name)
base_default_config = self.get_plugin_class(real_name).get_default_config()
return merge_dicts(base_default_config, alias_config, list_duplicates='last', dict_type=OrderedDict)
return merge_dicts_simple(base_default_config, alias_config)
def list_plugins(self, kind=None):
"""
@@ -588,7 +706,7 @@ class PluginLoader(object):
return (alias_name, {})
if alias_name in self.aliases:
alias = self.aliases[alias_name]
return (alias.plugin_name, alias.parameters)
return (alias.plugin_name, alias.params)
raise NotFoundError('Could not find plugin or alias "{}"'.format(alias_name))
# Internal methods.
@@ -605,41 +723,45 @@ class PluginLoader(object):
loader.get_plugin('foo', kind='device')
"""
error_msg = 'No plugins of type "{}" discovered'
if name.startswith('get_'):
name = name.replace('get_', '', 1)
if name in self.kind_map:
def __wrapper(pname, *args, **kwargs):
return self.get_plugin(pname, name, *args, **kwargs)
return __wrapper
raise NotFoundError(error_msg.format(name))
if name.startswith('list_'):
name = name.replace('list_', '', 1).rstrip('s')
if name in self.kind_map:
def __wrapper(*args, **kwargs):
def __wrapper(*args, **kwargs): # pylint: disable=E0102
return self.list_plugins(name, *args, **kwargs)
return __wrapper
raise NotFoundError(error_msg.format(name))
if name.startswith('has_'):
name = name.replace('has_', '', 1)
if name in self.kind_map:
def __wrapper(pname, *args, **kwargs):
def __wrapper(pname, *args, **kwargs): # pylint: disable=E0102
return self.has_plugin(pname, name, *args, **kwargs)
return __wrapper
raise NotFoundError(error_msg.format(name))
raise AttributeError(name)
def _discover_from_packages(self, packages):
self.logger.debug('Discovering plugins in packages')
try:
for package in packages:
for module in walk_modules(package):
self._discover_in_module(module)
except ImportError as e:
source = getattr(e, 'path', package)
except HostError as e:
message = 'Problem loading plugins from {}: {}'
raise PluginLoaderError(message.format(source, e.message))
raise PluginLoaderError(message.format(e.module, str(e.orig_exc)),
e.exc_info)
def _discover_from_paths(self, paths, ignore_paths):
paths = paths or []
ignore_paths = ignore_paths or []
self.logger.debug('Discovering plugins in paths')
for path in paths:
self.logger.debug('Checking path %s', path)
@@ -654,7 +776,7 @@ class PluginLoader(object):
if should_skip:
continue
for fname in files:
if not os.path.splitext(fname)[1].lower() == '.py':
if os.path.splitext(fname)[1].lower() != '.py':
continue
filepath = os.path.join(root, fname)
self._discover_from_file(filepath)
@@ -669,10 +791,11 @@ class PluginLoader(object):
self.logger.warning('Failed to load {}'.format(filepath))
self.logger.warning('Got: {}'.format(e))
else:
raise PluginLoaderError('Failed to load {}'.format(filepath), sys.exc_info())
msg = 'Failed to load {}'
raise LoaderError(msg.format(filepath), sys.exc_info())
except Exception as e:
message = 'Problem loading plugins from {}: {}'
raise PluginLoaderError(message.format(filepath, e))
raise LoaderError(message.format(filepath, e))
def _discover_in_module(self, module): # NOQA pylint: disable=too-many-branches
self.logger.debug('Checking module %s', module.__name__)
@@ -699,6 +822,7 @@ class PluginLoader(object):
raise e
finally:
log.dedent()
pass
def _add_found_plugin(self, obj):
"""
@@ -708,8 +832,9 @@ class PluginLoader(object):
self.logger.debug('Adding %s %s', obj.kind, obj.name)
key = identifier(obj.name.lower())
if key in self.plugins or key in self.aliases:
raise PluginLoaderError('{} "{}" already exists.'.format(obj.kind, obj.name))
# Plugins are tracked both, in a common plugins
msg = '{} "{}" already exists.'
raise PluginLoaderError(msg.format(obj.kind, obj.name))
# plugins are tracked both, in a common plugins
# dict, and in per-plugin kind dict (as retrieving
# plugins by kind is a common use case.
self.plugins[key] = obj
@@ -718,17 +843,6 @@ class PluginLoader(object):
for alias in obj.aliases:
alias_id = identifier(alias.name.lower())
if alias_id in self.plugins or alias_id in self.aliases:
raise PluginLoaderError('{} "{}" already exists.'.format(obj.kind, obj.name))
msg = '{} "{}" already exists.'
raise PluginLoaderError(msg.format(obj.kind, obj.name))
self.aliases[alias_id] = alias
# Update global aliases list. If a global alias is already in the list,
# then make sure this plugin is in the same parent/child hierarchy
# as the one already found.
for param in obj.parameters:
if param.global_alias:
if param.global_alias not in self.global_param_aliases:
ga = GlobalParameterAlias(param.global_alias)
ga.update(obj)
self.global_param_aliases[ga.name] = ga
else: # global alias already exists.
self.global_param_aliases[param.global_alias].update(obj)

View File

@@ -17,53 +17,73 @@ import sys
class __LoaderWrapper(object):
@property
def kinds(self):
if not self._loader:
self.reset()
return self._loader.kind_map.keys()
@property
def kind_map(self):
if not self._loader:
self.reset()
return self._loader.kind_map
def __init__(self):
self._loader = None
def reset(self):
# These imports cannot be done at top level, because of
# These imports cannot be done at top level, because of
# sys.modules manipulation below
from wa.framework.plugin import PluginLoader
from wa.framework.configuration.core import settings
self._loader = PluginLoader(settings.plugin_packages,
settings.plugin_paths,
settings.plugin_ignore_paths)
[settings.plugins_directory], [])
def update(self, packages=None, paths=None, ignore_paths=None):
if not self._loader: self.reset()
if not self._loader:
self.reset()
self._loader.update(packages, paths, ignore_paths)
def reload(self):
if not self._loader: self.reset()
if not self._loader:
self.reset()
self._loader.reload()
def list_plugins(self, kind=None):
if not self._loader: self.reset()
if not self._loader:
self.reset()
return self._loader.list_plugins(kind)
def has_plugin(self, name, kind=None):
if not self._loader: self.reset()
if not self._loader:
self.reset()
return self._loader.has_plugin(name, kind)
def get_plugin_class(self, name, kind=None):
if not self._loader: self.reset()
return _load.get_plugin_class(name, kind)
if not self._loader:
self.reset()
return self._loader.get_plugin_class(name, kind)
def get_plugin(self, name, kind=None, *args, **kwargs):
if not self._loader: self.reset()
return self._loader.get_plugin(name, kind=kind, *args, **kwargs)
def get_plugin(self, name=None, kind=None, *args, **kwargs):
if not self._loader:
self.reset()
return self._loader.get_plugin(name=name, kind=kind, *args, **kwargs)
def get_default_config(self, name):
if not self._loader: self.reset()
if not self._loader:
self.reset()
return self._loader.get_default_config(name)
def resolve_alias(self, name):
if not self._loader: self.reset()
if not self._loader:
self.reset()
return self._loader.resolve_alias(name)
def __getattr__(self, name):
if not self._loader: self.reset()
if not self._loader:
self.reset()
return getattr(self._loader, name)
sys.modules[__name__] = __LoaderWrapper()
sys.modules[__name__] = __LoaderWrapper()

View File

@@ -60,6 +60,23 @@ class GetterPriority(object):
remote = -20
class __NullOwner(object):
"""Represents an owner for a resource not owned by anyone."""
name = 'noone'
dependencies_directory = settings.dependencies_directory
def __getattr__(self, name):
return None
def __str__(self):
return 'no-one'
__repr__ = __str__
NO_ONE = __NullOwner()
class Resource(object):
"""
Represents a resource that needs to be resolved. This can be pretty much
@@ -95,6 +112,73 @@ class Resource(object):
return '<{}\'s {}>'.format(self.owner, self.name)
class FileResource(Resource):
"""
Base class for all resources that are a regular file in the
file system.
"""
def delete(self, instance):
os.remove(instance)
class File(FileResource):
name = 'file'
def __init__(self, owner, path, url=None):
super(File, self).__init__(owner)
self.path = path
self.url = url
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url)
class PluginAsset(File):
name = 'plugin_asset'
def __init__(self, owner, path):
super(PluginAsset, self).__init__(owner, os.path.join(owner.name, path))
class Executable(FileResource):
name = 'executable'
def __init__(self, owner, platform, filename):
super(Executable, self).__init__(owner)
self.platform = platform
self.filename = filename
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename)
class ReventFile(FileResource):
name = 'revent'
def __init__(self, owner, stage):
super(ReventFile, self).__init__(owner)
self.stage = stage
class JarFile(FileResource):
name = 'jar'
class ApkFile(FileResource):
name = 'apk'
def __init__(self, owner, version):
super(ApkFile, self).__init__(owner)
self.version = version
class ResourceGetter(Plugin):
"""
Base class for implementing resolvers. Defines resolver
@@ -201,18 +285,20 @@ class ResourceResolver(object):
"""
def __init__(self):
self.logger = logging.getLogger('resolver')
def __init__(self, config):
self.logger = logging.getLogger(self.__class__.__name__)
self.getters = defaultdict(prioritylist)
self.config = config
def load(self, loader=pluginloader):
def load(self):
"""
Discover getters under the specified source. The source could
be either a python package/module or a path.
"""
for rescls in loader.list_resource_getters():
getter = loader.get_resource_getter(rescls.name, resolver=self)
for rescls in pluginloader.list_resource_getters():
getter = self.config.get_plugin(name=rescls.name, kind="resource_getter", resolver=self)
getter.register()
def get(self, resource, strict=True, *args, **kwargs):
@@ -259,7 +345,7 @@ class ResourceResolver(object):
means should register with lower (negative) priorities.
"""
self.logger.debug('Registering {}'.format(getter.name))
self.logger.debug('Registering {} for {} resources'.format(getter.name, kind))
self.getters[kind].add(getter, priority)
def unregister(self, getter, kind):
@@ -273,420 +359,6 @@ class ResourceResolver(object):
except ValueError:
raise ValueError('Resource getter {} is not installed.'.format(getter.name))
class __NullOwner(object):
"""Represents an owner for a resource not owned by anyone."""
name = 'noone'
dependencies_directory = settings.dependencies_directory
def __getattr__(self, name):
return None
def __str__(self):
return 'no-one'
__repr__ = __str__
NO_ONE = __NullOwner()
class FileResource(Resource):
"""
Base class for all resources that are a regular file in the
file system.
"""
def delete(self, instance):
os.remove(instance)
class File(FileResource):
name = 'file'
def __init__(self, owner, path, url=None):
super(File, self).__init__(owner)
self.path = path
self.url = url
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url)
class ExtensionAsset(File):
name = 'extension_asset'
def __init__(self, owner, path):
super(ExtensionAsset, self).__init__(
owner, os.path.join(owner.name, path))
class Executable(FileResource):
name = 'executable'
def __init__(self, owner, platform, filename):
super(Executable, self).__init__(owner)
self.platform = platform
self.filename = filename
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename)
class ReventFile(FileResource):
name = 'revent'
def __init__(self, owner, stage):
super(ReventFile, self).__init__(owner)
self.stage = stage
class JarFile(FileResource):
name = 'jar'
class ApkFile(FileResource):
name = 'apk'
class PackageFileGetter(ResourceGetter):
name = 'package_file'
description = """
Looks for exactly one file with the specified extension in the owner's
directory. If a version is specified on invocation of get, it will filter
the discovered file based on that version. Versions are treated as
case-insensitive.
"""
extension = None
def register(self):
self.resolver.register(self, self.extension, GetterPriority.package)
def get(self, resource, **kwargs):
resource_dir = os.path.dirname(
sys.modules[resource.owner.__module__].__file__)
version = kwargs.get('version')
return get_from_location_by_extension(resource, resource_dir, self.extension, version)
class EnvironmentFileGetter(ResourceGetter):
name = 'environment_file'
description = """
Looks for exactly one file with the specified extension in the owner's
directory. If a version is specified on invocation of get, it will filter
the discovered file based on that version. Versions are treated as
case-insensitive.
"""
extension = None
def register(self):
self.resolver.register(self, self.extension,
GetterPriority.environment)
def get(self, resource, **kwargs):
resource_dir = resource.owner.dependencies_directory
version = kwargs.get('version')
return get_from_location_by_extension(resource, resource_dir, self.extension, version)
class ReventGetter(ResourceGetter):
"""Implements logic for identifying revent files."""
def get_base_location(self, resource):
raise NotImplementedError()
def register(self):
self.resolver.register(self, 'revent', GetterPriority.package)
def get(self, resource, **kwargs):
filename = '.'.join([resource.owner.device.name,
resource.stage, 'revent']).lower()
location = _d(os.path.join(
self.get_base_location(resource), 'revent_files'))
for candidate in os.listdir(location):
if candidate.lower() == filename.lower():
return os.path.join(location, candidate)
class PackageApkGetter(PackageFileGetter):
name = 'package_apk'
extension = 'apk'
class PackageJarGetter(PackageFileGetter):
name = 'package_jar'
extension = 'jar'
class PackageReventGetter(ReventGetter):
name = 'package_revent'
def get_base_location(self, resource):
return _get_owner_path(resource)
class EnvironmentApkGetter(EnvironmentFileGetter):
name = 'environment_apk'
extension = 'apk'
class EnvironmentJarGetter(EnvironmentFileGetter):
name = 'environment_jar'
extension = 'jar'
class EnvironmentReventGetter(ReventGetter):
name = 'enviroment_revent'
def get_base_location(self, resource):
return resource.owner.dependencies_directory
class ExecutableGetter(ResourceGetter):
name = 'exe_getter'
resource_type = 'executable'
priority = GetterPriority.environment
def get(self, resource, **kwargs):
if settings.binaries_repository:
path = os.path.join(settings.binaries_repository,
resource.platform, resource.filename)
if os.path.isfile(path):
return path
class PackageExecutableGetter(ExecutableGetter):
name = 'package_exe_getter'
priority = GetterPriority.package
def get(self, resource, **kwargs):
path = os.path.join(_get_owner_path(resource), 'bin',
resource.platform, resource.filename)
if os.path.isfile(path):
return path
class EnvironmentExecutableGetter(ExecutableGetter):
name = 'env_exe_getter'
def get(self, resource, **kwargs):
paths = [
os.path.join(resource.owner.dependencies_directory, 'bin',
resource.platform, resource.filename),
os.path.join(settings.environment_root, 'bin',
resource.platform, resource.filename),
]
for path in paths:
if os.path.isfile(path):
return path
class DependencyFileGetter(ResourceGetter):
name = 'filer'
description = """
Gets resources from the specified mount point. Copies them the local dependencies
directory, and returns the path to the local copy.
"""
resource_type = 'file'
relative_path = '' # May be overridden by subclasses.
default_mount_point = '/'
priority = GetterPriority.remote
parameters = [
Parameter('mount_point', default='/', global_alias='filer_mount_point',
description='Local mount point for the remote filer.'),
]
def __init__(self, resolver, **kwargs):
super(DependencyFileGetter, self).__init__(resolver, **kwargs)
self.mount_point = settings.filer_mount_point or self.default_mount_point
def get(self, resource, **kwargs):
force = kwargs.get('force')
remote_path = os.path.join(
self.mount_point, self.relative_path, resource.path)
local_path = os.path.join(
resource.owner.dependencies_directory, os.path.basename(resource.path))
if not os.path.isfile(local_path) or force:
if not os.path.isfile(remote_path):
return None
self.logger.debug('Copying {} to {}'.format(
remote_path, local_path))
shutil.copy(remote_path, local_path)
return local_path
class PackageCommonDependencyGetter(ResourceGetter):
name = 'packaged_common_dependency'
resource_type = 'file'
priority = GetterPriority.package - 1 # check after owner-specific locations
def get(self, resource, **kwargs):
path = os.path.join(settings.package_directory,
'common', resource.path)
if os.path.exists(path):
return path
class EnvironmentCommonDependencyGetter(ResourceGetter):
name = 'environment_common_dependency'
resource_type = 'file'
# check after owner-specific locations
priority = GetterPriority.environment - 1
def get(self, resource, **kwargs):
path = os.path.join(settings.dependencies_directory,
os.path.basename(resource.path))
if os.path.exists(path):
return path
class PackageDependencyGetter(ResourceGetter):
name = 'packaged_dependency'
resource_type = 'file'
priority = GetterPriority.package
def get(self, resource, **kwargs):
owner_path = inspect.getfile(resource.owner.__class__)
path = os.path.join(os.path.dirname(owner_path), resource.path)
if os.path.exists(path):
return path
class EnvironmentDependencyGetter(ResourceGetter):
name = 'environment_dependency'
resource_type = 'file'
priority = GetterPriority.environment
def get(self, resource, **kwargs):
path = os.path.join(resource.owner.dependencies_directory,
os.path.basename(resource.path))
if os.path.exists(path):
return path
class ExtensionAssetGetter(DependencyFileGetter):
name = 'extension_asset'
resource_type = 'extension_asset'
relative_path = 'workload_automation/assets'
class RemoteFilerGetter(ResourceGetter):
name = 'filer_assets'
description = """
Finds resources on a (locally mounted) remote filer and caches them locally.
This assumes that the filer is mounted on the local machine (e.g. as a samba share).
"""
priority = GetterPriority.remote
resource_type = ['apk', 'file', 'jar', 'revent']
parameters = [
Parameter('remote_path', global_alias='remote_assets_path', default='',
description="""
Path, on the local system, where the assets are located.
"""),
Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets',
description="""
If ``True``, will always attempt to fetch assets from the
remote, even if a local cached copy is available.
"""),
]
def get(self, resource, **kwargs):
version = kwargs.get('version')
if resource.owner:
remote_path = os.path.join(self.remote_path, resource.owner.name)
local_path = os.path.join(
settings.environment_root, resource.owner.dependencies_directory)
return self.try_get_resource(resource, version, remote_path, local_path)
else:
result = None
for entry in os.listdir(remote_path):
remote_path = os.path.join(self.remote_path, entry)
local_path = os.path.join(
settings.environment_root, settings.dependencies_directory, entry)
result = self.try_get_resource(
resource, version, remote_path, local_path)
if result:
break
return result
def try_get_resource(self, resource, version, remote_path, local_path):
if not self.always_fetch:
result = self.get_from(resource, version, local_path)
if result:
return result
if remote_path:
# Didn't find it cached locally; now check the remoted
result = self.get_from(resource, version, remote_path)
if not result:
return result
else: # remote path is not set
return None
# Found it remotely, cache locally, then return it
local_full_path = os.path.join(
_d(local_path), os.path.basename(result))
self.logger.debug('cp {} {}'.format(result, local_full_path))
shutil.copy(result, local_full_path)
return local_full_path
def get_from(self, resource, version, location): # pylint: disable=no-self-use
if resource.name in ['apk', 'jar']:
return get_from_location_by_extension(resource, location, resource.name, version)
elif resource.name == 'file':
filepath = os.path.join(location, resource.path)
if os.path.exists(filepath):
return filepath
elif resource.name == 'revent':
filename = '.'.join(
[resource.owner.device.name, resource.stage, 'revent']).lower()
alternate_location = os.path.join(location, 'revent_files')
# There tends to be some confusion as to where revent files should
# be placed. This looks both in the extension's directory, and in
# 'revent_files' subdirectory under it, if it exists.
if os.path.isdir(alternate_location):
for candidate in os.listdir(alternate_location):
if candidate.lower() == filename.lower():
return os.path.join(alternate_location, candidate)
if os.path.isdir(location):
for candidate in os.listdir(location):
if candidate.lower() == filename.lower():
return os.path.join(location, candidate)
else:
message = 'Unexpected resource type: {}'.format(resource.name)
raise ValueError(message)
# Utility functions
def get_from_location_by_extension(resource, location, extension, version=None):

View File

@@ -0,0 +1,510 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains the standard set of resource getters used by Workload Automation.
"""
import os
import sys
import shutil
import inspect
import httplib
import logging
import json
import requests
from wa import Parameter, settings, __file__ as __base_filepath
from wa.framework.resource import ResourceGetter, GetterPriority, NO_ONE
from wa.framework.exception import ResourceError
from wa.utils.misc import (ensure_directory_exists as _d,
ensure_file_directory_exists as _f, sha256, urljoin)
from wa.utils.types import boolean
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
class PackageFileGetter(ResourceGetter):
name = 'package_file'
description = """
Looks for exactly one file with the specified plugin in the owner's directory. If a version
is specified on invocation of get, it will filter the discovered file based on that version.
Versions are treated as case-insensitive.
"""
plugin = None
def register(self):
self.resolver.register(self, self.plugin, GetterPriority.package)
def get(self, resource, **kwargs):
resource_dir = os.path.dirname(sys.modules[resource.owner.__module__].__file__)
version = kwargs.get('version')
return get_from_location_by_plugin(resource, resource_dir, self.plugin, version)
class EnvironmentFileGetter(ResourceGetter):
name = 'environment_file'
description = """Looks for exactly one file with the specified plugin in the owner's directory. If a version
is specified on invocation of get, it will filter the discovered file based on that version.
Versions are treated as case-insensitive."""
plugin = None
def register(self):
self.resolver.register(self, self.plugin, GetterPriority.environment)
def get(self, resource, **kwargs):
resource_dir = resource.owner.dependencies_directory
version = kwargs.get('version')
return get_from_location_by_plugin(resource, resource_dir, self.plugin, version)
class ReventGetter(ResourceGetter):
"""Implements logic for identifying revent files."""
def get_base_location(self, resource):
raise NotImplementedError()
def register(self):
self.resolver.register(self, 'revent', GetterPriority.package)
def get(self, resource, **kwargs):
filename = '.'.join([resource.owner.device.model, resource.stage, 'revent']).lower()
location = _d(os.path.join(self.get_base_location(resource), 'revent_files'))
for candidate in os.listdir(location):
if candidate.lower() == filename.lower():
return os.path.join(location, candidate)
class PackageApkGetter(PackageFileGetter):
name = 'package_apk'
plugin = 'apk'
class PackageJarGetter(PackageFileGetter):
name = 'package_jar'
plugin = 'jar'
class PackageReventGetter(ReventGetter):
name = 'package_revent'
def get_base_location(self, resource):
return get_owner_path(resource)
class EnvironmentApkGetter(EnvironmentFileGetter):
name = 'environment_apk'
plugin = 'apk'
class EnvironmentJarGetter(EnvironmentFileGetter):
name = 'environment_jar'
plugin = 'jar'
class EnvironmentReventGetter(ReventGetter):
name = 'enviroment_revent'
def get_base_location(self, resource):
return resource.owner.dependencies_directory
class ExecutableGetter(ResourceGetter):
name = 'exe_getter'
resource_type = 'executable'
priority = GetterPriority.environment
def get(self, resource, **kwargs):
if settings.assets_repository:
path = os.path.join(settings.assets_repository, resource.platform, resource.filename)
if os.path.isfile(path):
return path
class PackageExecutableGetter(ExecutableGetter):
name = 'package_exe_getter'
priority = GetterPriority.package
def get(self, resource, **kwargs):
path = os.path.join(get_owner_path(resource), 'bin', resource.platform, resource.filename)
if os.path.isfile(path):
return path
class EnvironmentExecutableGetter(ExecutableGetter):
name = 'env_exe_getter'
def get(self, resource, **kwargs):
paths = [
os.path.join(resource.owner.dependencies_directory, 'bin',
resource.platform, resource.filename),
os.path.join(settings.user_directory, 'bin',
resource.platform, resource.filename),
]
for path in paths:
if os.path.isfile(path):
return path
class DependencyFileGetter(ResourceGetter):
name = 'filer'
description = """
Gets resources from the specified mount point. Copies them the local dependencies
directory, and returns the path to the local copy.
"""
resource_type = 'file'
relative_path = '' # May be overridden by subclasses.
priority = GetterPriority.remote
parameters = [
Parameter('mount_point', default='/', global_alias='remote_assets_path',
description='Local mount point for the remote filer.'),
]
def __init__(self, resolver, **kwargs):
super(DependencyFileGetter, self).__init__(resolver, **kwargs)
def get(self, resource, **kwargs):
force = kwargs.get('force')
remote_path = os.path.join(self.mount_point, self.relative_path, resource.path)
local_path = os.path.join(resource.owner.dependencies_directory, os.path.basename(resource.path))
if not os.path.isfile(local_path) or force:
if not os.path.isfile(remote_path):
return None
self.logger.debug('Copying {} to {}'.format(remote_path, local_path))
shutil.copy(remote_path, local_path)
return local_path
class PackageCommonDependencyGetter(ResourceGetter):
name = 'packaged_common_dependency'
resource_type = 'file'
priority = GetterPriority.package - 1 # check after owner-specific locations
def get(self, resource, **kwargs):
path = os.path.join(settings.package_directory, 'common', resource.path)
if os.path.exists(path):
return path
class EnvironmentCommonDependencyGetter(ResourceGetter):
name = 'environment_common_dependency'
resource_type = 'file'
priority = GetterPriority.environment - 1 # check after owner-specific locations
def get(self, resource, **kwargs):
path = os.path.join(settings.dependencies_directory,
os.path.basename(resource.path))
if os.path.exists(path):
return path
class PackageDependencyGetter(ResourceGetter):
name = 'packaged_dependency'
resource_type = 'file'
priority = GetterPriority.package
def get(self, resource, **kwargs):
owner_path = inspect.getfile(resource.owner.__class__)
path = os.path.join(os.path.dirname(owner_path), resource.path)
if os.path.exists(path):
return path
class EnvironmentDependencyGetter(ResourceGetter):
name = 'environment_dependency'
resource_type = 'file'
priority = GetterPriority.environment
def get(self, resource, **kwargs):
path = os.path.join(resource.owner.dependencies_directory, os.path.basename(resource.path))
if os.path.exists(path):
return path
class PluginAssetGetter(DependencyFileGetter):
name = 'plugin_asset'
resource_type = 'plugin_asset'
class HttpGetter(ResourceGetter):
name = 'http_assets'
description = """
Downloads resources from a server based on an index fetched from the specified URL.
Given a URL, this will try to fetch ``<URL>/index.json``. The index file maps plugin
names to a list of corresponing asset descriptons. Each asset description continas a path
(relative to the base URL) of the resource and a SHA256 hash, so that this Getter can
verify whether the resource on the remote has changed.
For example, let's assume we want to get the APK file for workload "foo", and that
assets are hosted at ``http://example.com/assets``. This Getter will first try to
donwload ``http://example.com/assests/index.json``. The index file may contian
something like ::
{
"foo": [
{
"path": "foo-app.apk",
"sha256": "b14530bb47e04ed655ac5e80e69beaa61c2020450e18638f54384332dffebe86"
},
{
"path": "subdir/some-other-asset.file",
"sha256": "48d9050e9802246d820625717b72f1c2ba431904b8484ca39befd68d1dbedfff"
}
]
}
This Getter will look through the list of assets for "foo" (in this case, two) check
the paths until it finds one matching the resource (in this case, "foo-app.apk").
Finally, it will try to dowload that file relative to the base URL and plugin name
(in this case, "http://example.com/assets/foo/foo-app.apk"). The downloaded version
will be cached locally, so that in the future, the getter will check the SHA256 hash
of the local file against the one advertised inside index.json, and provided that hasn't
changed, it won't try to download the file again.
"""
priority = GetterPriority.remote
resource_type = ['apk', 'file', 'jar', 'revent']
parameters = [
Parameter('url', global_alias='remote_assets_url',
description="""URL of the index file for assets on an HTTP server."""),
Parameter('username',
description="""User name for authenticating with assets URL"""),
Parameter('password',
description="""Password for authenticationg with assets URL"""),
Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets',
description="""If ``True``, will always attempt to fetch assets from the remote, even if
a local cached copy is available."""),
Parameter('chunk_size', kind=int, default=1024,
description="""Chunk size for streaming large assets."""),
]
def __init__(self, resolver, **kwargs):
super(HttpGetter, self).__init__(resolver, **kwargs)
self.index = None
def get(self, resource, **kwargs):
if not resource.owner:
return # TODO: add support for unowned resources
if not self.index:
self.index = self.fetch_index()
asset = self.resolve_resource(resource)
if not asset:
return
return self.download_asset(asset, resource.owner.name)
def fetch_index(self):
if not self.url:
return {}
index_url = urljoin(self.url, 'index.json')
response = self.geturl(index_url)
if response.status_code != httplib.OK:
message = 'Could not fetch "{}"; recieved "{} {}"'
self.logger.error(message.format(index_url, response.status_code, response.reason))
return {}
return json.loads(response.content)
def download_asset(self, asset, owner_name):
url = urljoin(self.url, owner_name, asset['path'])
local_path = _f(os.path.join(settings.dependencies_directory, '__remote',
owner_name, asset['path'].replace('/', os.sep)))
if os.path.isfile(local_path) and not self.always_fetch:
local_sha = sha256(local_path)
if local_sha == asset['sha256']:
self.logger.debug('Local SHA256 matches; not re-downloading')
return local_path
self.logger.debug('Downloading {}'.format(url))
response = self.geturl(url, stream=True)
if response.status_code != httplib.OK:
message = 'Could not download asset "{}"; recieved "{} {}"'
self.logger.warning(message.format(url, response.status_code, response.reason))
return
with open(local_path, 'wb') as wfh:
for chunk in response.iter_content(chunk_size=self.chunk_size):
wfh.write(chunk)
return local_path
def geturl(self, url, stream=False):
if self.username:
auth = (self.username, self.password)
else:
auth = None
return requests.get(url, auth=auth, stream=stream)
def resolve_resource(self, resource):
assets = self.index.get(resource.owner.name, {})
if not assets:
return {}
if resource.name in ['apk', 'jar']:
paths = [a['path'] for a in assets]
version = getattr(resource, 'version', None)
found = get_from_list_by_plugin(resource, paths, resource.name, version)
if found:
for a in assets:
if a['path'] == found:
return a
elif resource.name == 'revent':
filename = '.'.join([resource.owner.device.name, resource.stage, 'revent']).lower()
for asset in assets:
pathname = os.path.basename(asset['path']).lower()
if pathname == filename:
return asset
else: # file
for asset in assets:
if asset['path'].lower() == resource.path.lower():
return asset
class RemoteFilerGetter(ResourceGetter):
name = 'filer_assets'
description = """
Finds resources on a (locally mounted) remote filer and caches them locally.
This assumes that the filer is mounted on the local machine (e.g. as a samba share).
"""
priority = GetterPriority.remote
resource_type = ['apk', 'file', 'jar', 'revent']
parameters = [
Parameter('remote_path', global_alias='remote_assets_path', default='',
description="""Path, on the local system, where the assets are located."""),
Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets',
description="""If ``True``, will always attempt to fetch assets from the remote, even if
a local cached copy is available."""),
]
def get(self, resource, **kwargs):
version = kwargs.get('version')
if resource.owner:
remote_path = os.path.join(self.remote_path, resource.owner.name)
local_path = os.path.join(settings.user_directory, '__filer', resource.owner.dependencies_directory)
return self.try_get_resource(resource, version, remote_path, local_path)
else:
result = None
for entry in os.listdir(remote_path):
remote_path = os.path.join(self.remote_path, entry)
local_path = os.path.join(settings.user_directory, '__filer', settings.dependencies_directory, entry)
result = self.try_get_resource(resource, version, remote_path, local_path)
if result:
break
return result
def try_get_resource(self, resource, version, remote_path, local_path):
if not self.always_fetch:
result = self.get_from(resource, version, local_path)
if result:
return result
if remote_path:
# Didn't find it cached locally; now check the remoted
result = self.get_from(resource, version, remote_path)
if not result:
return result
else: # remote path is not set
return None
# Found it remotely, cache locally, then return it
local_full_path = os.path.join(_d(local_path), os.path.basename(result))
self.logger.debug('cp {} {}'.format(result, local_full_path))
shutil.copy(result, local_full_path)
return local_full_path
def get_from(self, resource, version, location): # pylint: disable=no-self-use
if resource.name in ['apk', 'jar']:
return get_from_location_by_plugin(resource, location, resource.name, version)
elif resource.name == 'file':
filepath = os.path.join(location, resource.path)
if os.path.exists(filepath):
return filepath
elif resource.name == 'revent':
filename = '.'.join([resource.owner.device.model, resource.stage, 'revent']).lower()
alternate_location = os.path.join(location, 'revent_files')
# There tends to be some confusion as to where revent files should
# be placed. This looks both in the plugin's directory, and in
# 'revent_files' subdirectory under it, if it exists.
if os.path.isdir(alternate_location):
for candidate in os.listdir(alternate_location):
if candidate.lower() == filename.lower():
return os.path.join(alternate_location, candidate)
if os.path.isdir(location):
for candidate in os.listdir(location):
if candidate.lower() == filename.lower():
return os.path.join(location, candidate)
else:
raise ValueError('Unexpected resource type: {}'.format(resource.name))
# Utility functions
def get_from_location_by_plugin(resource, location, plugin, version=None):
try:
found_files = [os.path.join(location, f) for f in os.listdir(location)]
except OSError:
return None
try:
return get_from_list_by_plugin(resource, found_files, plugin, version)
except ResourceError:
raise ResourceError('More than one .{} found in {} for {}.'.format(plugin,
location,
resource.owner.name))
def get_from_list_by_plugin(resource, filelist, plugin, version=None):
filelist = [ff for ff in filelist
if os.path.splitext(ff)[1].lower().endswith(plugin)]
if version:
filelist = [ff for ff in filelist if version.lower() in os.path.basename(ff).lower()]
if len(filelist) == 1:
return filelist[0]
elif not filelist:
return None
else:
raise ResourceError('More than one .{} found in {} for {}.'.format(plugin,
filelist,
resource.owner.name))
def get_owner_path(resource):
if resource.owner is NO_ONE:
return os.path.join(os.path.dirname(__base_filepath), 'common')
else:
return os.path.dirname(sys.modules[resource.owner.__module__].__file__)

View File

@@ -26,7 +26,7 @@ from wa.framework.exception import JobError
from wa.utils import counter
from wa.utils.serializer import json
from wa.utils.misc import ensure_directory_exists as _d
from wa.utils.types import TreeNode, caseless_string
from wa.utils.types import caseless_string

View File

@@ -45,11 +45,14 @@ class Signal(object):
:param name: name is the identifier of the Signal object. Signal instances with
the same name refer to the same execution stage/stage.
:param invert_priority: boolean parameter that determines whether multiple
callbacks for the same signal should be ordered with
ascending or descending priorities. Typically this flag
should be set to True if the Signal is triggered AFTER an
a state/stage has been reached. That way callbacks with high
priorities will be called right after the event has occured.
callbacks for the same signal should be
ordered with ascending or descending
priorities. Typically this flag should be
set to True if the Signal is triggered
AFTER an a state/stage has been reached.
That way callbacks with high priorities
will be called right after the event has
occured.
"""
self.name = name
self.description = description
@@ -94,6 +97,10 @@ WARNING_LOGGED = Signal('warning-logged')
# even if there is an error, so you cannot assume in the handler that the
# device has booted successfully. In most cases, you should instead use the
# non-paired signals below.
BEFORE_RUN_INIT = Signal('before-run-init', invert_priority=True)
SUCCESSFUL_RUN_INIT = Signal('successful-run-init')
AFTER_RUN_INIT = Signal('after-run-init')
BEFORE_FLASHING = Signal('before-flashing', invert_priority=True)
SUCCESSFUL_FLASHING = Signal('successful-flashing')
AFTER_FLASHING = Signal('after-flashing')

80
wa/framework/target.py Normal file
View File

@@ -0,0 +1,80 @@
import string
from copy import copy
from devlib import Platform, AndroidTarget
class TargetInfo(object):
@staticmethod
def from_pod(pod):
instance = TargetInfo()
instance.target = pod['target']
instance.abi = pod['abi']
instance.cpuinfo = Cpuinfo(pod['cpuinfo'])
instance.os = pod['os']
instance.os_version = pod['os_version']
instance.abi = pod['abi']
instance.is_rooted = pod['is_rooted']
instance.kernel_version = KernelVersion(pod['kernel_release'],
pod['kernel_version'])
instance.kernel_config = KernelConfig(pod['kernel_config'])
if pod["target"] == "AndroidTarget":
instance.screen_resolution = pod['screen_resolution']
instance.prop = pod['prop']
instance.prop = pod['android_id']
return instance
def __init__(self, target=None):
if target:
self.target = target.__class__.__name__
self.cpuinfo = target.cpuinfo
self.os = target.os
self.os_version = target.os_version
self.abi = target.abi
self.is_rooted = target.is_rooted
self.kernel_version = target.kernel_version
self.kernel_config = target.config
if isinstance(target, AndroidTarget):
self.screen_resolution = target.screen_resolution
self.prop = target.getprop()
self.android_id = target.android_id
else:
self.target = None
self.cpuinfo = None
self.os = None
self.os_version = None
self.abi = None
self.is_rooted = None
self.kernel_version = None
self.kernel_config = None
if isinstance(target, AndroidTarget):
self.screen_resolution = None
self.prop = None
self.android_id = None
def to_pod(self):
pod = {}
pod['target'] = self.target
pod['abi'] = self.abi
pod['cpuinfo'] = self.cpuinfo.sections
pod['os'] = self.os
pod['os_version'] = self.os_version
pod['abi'] = self.abi
pod['is_rooted'] = self.is_rooted
pod['kernel_release'] = self.kernel_version.release
pod['kernel_version'] = self.kernel_version.version
pod['kernel_config'] = dict(self.kernel_config.iteritems())
if self.target == "AndroidTarget":
pod['screen_resolution'] = self.screen_resolution
pod['prop'] = self.prop
pod['android_id'] = self.android_id
return pod

View File

View File

@@ -0,0 +1,20 @@
from copy import copy
#Not going to be used for now.
class TargetConfig(dict):
"""
Represents a configuration for a target.
"""
def __init__(self, config=None):
if isinstance(config, TargetConfig):
self.__dict__ = copy(config.__dict__)
elif hasattr(config, 'iteritems'):
for k, v in config.iteritems:
self.set(k, v)
elif config:
raise ValueError(config)
def set(self, name, value):
setattr(self, name, value)

View File

@@ -0,0 +1,252 @@
from collections import OrderedDict
from copy import copy
from devlib import (LinuxTarget, AndroidTarget, LocalLinuxTarget,
Platform, Juno, TC2, Gem5SimulationPlatform)
from wa.framework import pluginloader
from wa.framework.exception import PluginLoaderError
from wa.framework.plugin import Plugin, Parameter
from wa.utils.types import list_of_strings, list_of_ints
def get_target_descriptions(loader=pluginloader):
targets = {}
for cls in loader.list_target_descriptors():
descriptor = cls()
for desc in descriptor.get_descriptions():
if desc.name in targets:
msg = 'Duplicate target "{}" returned by {} and {}'
prev_dtor = targets[desc.name].source
raise PluginLoaderError(msg.format(dsc.name, prev_dtor.name,
descriptor.name))
targets[desc.name] = desc
return targets.values()
class TargetDescription(object):
def __init__(self, name, source, description=None, target=None, platform=None,
conn=None, target_params=None, platform_params=None,
conn_params=None):
self.name = name
self.source = source
self.description = description
self.target = target
self.platform = platform
self.connection = conn
self._set('target_params', target_params)
self._set('platform_params', platform_params)
self._set('conn_params', conn_params)
def _set(self, attr, vals):
if vals is None:
vals = {}
elif isiterable(vals):
if not hasattr(vals, 'iteritems'):
vals = {v.name: v for v in vals}
else:
msg = '{} must be iterable; got "{}"'
raise ValueError(msg.format(attr, vals))
setattr(self, attr, vals)
class TargetDescriptor(Plugin):
kind = 'target_descriptor'
def get_descriptions(self):
return []
COMMON_TARGET_PARAMS = [
Parameter('working_directory', kind=str,
description='''
On-target working directory that will be used by WA. This
directory must be writable by the user WA logs in as without
the need for privilege elevation.
'''),
Parameter('executables_directory', kind=str,
description='''
On-target directory where WA will install its executable
binaries. This location must allow execution. This location does
*not* need to be writable by unprivileged users or rooted devices
(WA will install with elevated privileges as necessary).
'''),
Parameter('modules', kind=list_of_strings,
description='''
A list of additional modules to be installed for the target.
``devlib`` implements functionality for particular subsystems as
modules. A number of "default" modules (e.g. for cpufreq
subsystem) are loaded automatically, unless explicitly disabled.
If additional modules need to be loaded, they may be specified
using this parameter.
Please see ``devlab`` documentation for information on the available
modules.
'''),
]
COMMON_PLATFORM_PARAMS = [
Parameter('core_names', kind=list_of_strings,
description='''
List of names of CPU cores in the order that they appear to the
kernel. If not specified, it will be inferred from the platform.
'''),
Parameter('core_clusters', kind=list_of_ints,
description='''
Cluster mapping corresponding to the cores in ``core_names``.
Cluster indexing starts at ``0``. If not specified, this will be
inferred from ``core_names`` -- consecutive cores with the same
name will be assumed to share a cluster.
'''),
Parameter('big_core', kind=str,
description='''
The name of the big cores in a big.LITTLE system. If not
specified, this will be inferred, either from the name (if one of
the names in ``core_names`` matches known big cores), or by
assuming that the last cluster is big.
'''),
Parameter('model', kind=str,
description='''
Hardware model of the platform. If not specified, an attempt will
be made to read it from target.
'''),
Parameter('modules', kind=list_of_strings,
description='''
An additional list of modules to be loaded into the target.
'''),
]
VEXPRESS_PLATFORM_PARAMS = [
Parameter('serial_port', kind=str,
description='''
The serial device/port on the host for the initial connection to
the target (used for early boot, flashing, etc).
'''),
Parameter('baudrate', kind=int,
description='''
Baud rate for the serial connection.
'''),
Parameter('vemsd_mount', kind=str,
description='''
VExpress MicroSD card mount location. This is a MicroSD card in
the VExpress device that is mounted on the host via USB. The card
contains configuration files for the platform and firmware and
kernel images to be flashed.
'''),
Parameter('bootloader', kind=str,
allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'],
description='''
Selects the bootloader mechanism used by the board. Depending on
firmware version, a number of possible boot mechanisms may be use.
Please see ``devlib`` documentation for descriptions.
'''),
Parameter('hard_reset_method', kind=str,
allowed_values=['dtr', 'reboottxt'],
description='''
There are a couple of ways to reset VersatileExpress board if the
software running on the board becomes unresponsive. Both require
configuration to be enabled (please see ``devlib`` documentation).
``dtr``: toggle the DTR line on the serial connection
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
'''),
]
GEM5_PLATFORM_PARAMS = [
Parameter('host_output_dir', kind=str, mandatory=True,
description='''
Path on the host where gem5 output (e.g. stats file) will be placed.
'''),
Parameter('gem5_bin', kind=str, mandatory=True,
description='''
Path to the gem5 binary
'''),
Parameter('gem5_args', kind=str, mandatory=True,
description='''
Arguments to be passed to the gem5 binary
'''),
Parameter('gem5_virtio', kind=str, mandatory=True,
description='''
VirtIO device setup arguments to be passed to gem5. VirtIO is used
to transfer files between the simulation and the host.
'''),
]
# name --> (target_class, params_list, defaults)
TARGETS = {
'linux': (LinuxTarget, COMMON_TARGET_PARAMS, None),
'android': (AndroidTarget, COMMON_TARGET_PARAMS +
[Parameter('package_data_directory', kind=str, default='/data/data',
description='''
Directory containing Android data
'''),
], None),
'local': (LocalLinuxTarget, COMMON_TARGET_PARAMS, None),
}
# name --> (platform_class, params_list, defaults)
PLATFORMS = {
'generic': (Platform, COMMON_PLATFORM_PARAMS, None),
'juno': (Juno, COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
{
'vemsd_mount': '/media/JUNO',
'baudrate': 115200,
'bootloader': 'u-boot',
'hard_reset_method': 'dtr',
}),
'tc2': (TC2, COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
{
'vemsd_mount': '/media/VEMSD',
'baudrate': 38400,
'bootloader': 'bootmon',
'hard_reset_method': 'reboottxt',
}),
'gem5': (Gem5SimulationPlatform, GEM5_PLATFORM_PARAMS, None),
}
class DefaultTargetDescriptor(TargetDescriptor):
name = 'devlib_targets'
description = """
The default target descriptor that provides descriptions in the form
<platform>_<target>.
These map directly onto ``Target``\ s and ``Platform``\ s supplied by ``devlib``.
"""
def get_descriptions(self):
result = []
for target_name, target_tuple in TARGETS.iteritems():
target, target_params = self._get_item(target_tuple)
for platform_name, platform_tuple in PLATFORMS.iteritems():
platform, platform_params = self._get_item(platform_tuple)
name = '{}_{}'.format(platform_name, target_name)
td = TargetDescription(name, self)
td.target = target
td.platform = platform
td.target_params = target_params
td.platform_params = platform_params
result.append(td)
return result
def _get_item(self, item_tuple):
cls, params, defaults = item_tuple
if not defaults:
return cls, params
param_map = OrderedDict((p.name, copy(p)) for p in params)
for name, value in defaults.iteritems():
if name not in param_map:
raise ValueError('Unexpected default "{}"'.format(name))
param_map[name].default = value
return cls, param_map.values()

View File

@@ -0,0 +1,78 @@
from devlib import AndroidTarget
from devlib.exception import TargetError
from devlib.target import KernelConfig, KernelVersion, Cpuinfo
class TargetInfo(object):
@staticmethod
def from_pod(pod):
instance = TargetInfo()
instance.target = pod['target']
instance.abi = pod['abi']
instance.cpuinfo = Cpuinfo(pod['cpuinfo'])
instance.os = pod['os']
instance.os_version = pod['os_version']
instance.abi = pod['abi']
instance.is_rooted = pod['is_rooted']
instance.kernel_version = KernelVersion(pod['kernel_release'],
pod['kernel_version'])
instance.kernel_config = KernelConfig(pod['kernel_config'])
if pod["target"] == "AndroidTarget":
instance.screen_resolution = pod['screen_resolution']
instance.prop = pod['prop']
instance.prop = pod['android_id']
return instance
def __init__(self, target=None):
if target:
self.target = target.__class__.__name__
self.cpuinfo = target.cpuinfo
self.os = target.os
self.os_version = target.os_version
self.abi = target.abi
self.is_rooted = target.is_rooted
self.kernel_version = target.kernel_version
self.kernel_config = target.config
if isinstance(target, AndroidTarget):
self.screen_resolution = target.screen_resolution
self.prop = target.getprop()
self.android_id = target.android_id
else:
self.target = None
self.cpuinfo = None
self.os = None
self.os_version = None
self.abi = None
self.is_rooted = None
self.kernel_version = None
self.kernel_config = None
if isinstance(target, AndroidTarget):
self.screen_resolution = None
self.prop = None
self.android_id = None
def to_pod(self):
pod = {}
pod['target'] = self.target
pod['abi'] = self.abi
pod['cpuinfo'] = self.cpuinfo.sections
pod['os'] = self.os
pod['os_version'] = self.os_version
pod['abi'] = self.abi
pod['is_rooted'] = self.is_rooted
pod['kernel_release'] = self.kernel_version.release
pod['kernel_version'] = self.kernel_version.version
pod['kernel_config'] = dict(self.kernel_config.iteritems())
if self.target == "AndroidTarget":
pod['screen_resolution'] = self.screen_resolution
pod['prop'] = self.prop
pod['android_id'] = self.android_id
return pod

View File

@@ -0,0 +1,383 @@
import logging
import tempfile
import threading
import os
import time
import shutil
import sys
from wa.framework import signal
from wa.framework.exception import WorkerThreadError, ConfigError
from wa.framework.plugin import Parameter
from wa.framework.target.info import TargetInfo
from wa.framework.target.runtime_config import (SysfileValuesRuntimeConfig,
HotplugRuntimeConfig,
CpufreqRuntimeConfig,
CpuidleRuntimeConfig)
from wa.utils.misc import isiterable
from wa.utils.serializer import json
from devlib import LocalLinuxTarget, LinuxTarget, AndroidTarget
from devlib.utils.types import identifier
# from wa.target.manager import AndroidTargetManager, LinuxTargetManager
class TargetManager(object):
name = 'target-manager'
description = """
Instanciated the required target and performs configuration and validation
of the device.
"""
parameters = [
Parameter('disconnect', kind=bool, default=False,
description="""
Specifies whether the target should be disconnected from
at the end of the run.
"""),
]
DEVICE_MAPPING = {'test' : {'platform_name':'generic',
'target_name': 'android'},
'other': {'platform_name':'test',
'target_name': 'linux'},
}
runtime_config_cls = [
# order matters
SysfileValuesRuntimeConfig,
HotplugRuntimeConfig,
CpufreqRuntimeConfig,
CpuidleRuntimeConfig,
]
def __init__(self, name, parameters):
self.name = name
self.target = None
self.assistant = None
self.target_name = None
self.platform_name = None
self.parameters = parameters
self.disconnect = parameters.get('disconnect')
self.info = TargetInfo()
# Determine platform and target based on passed name
self._parse_name()
# Create target
self._get_target()
# Create an assistant to perform target specific configuration
self._get_assistant()
### HERE FOR TESTING, WILL BE CALLED EXTERNALLY ###
# Connect to device and retrieve details.
# self.initialize()
# self.add_parameters()
# self.validate_parameters()
# self.set_parameters()
def initialize(self):
self.runtime_configs = [cls(self.target) for cls in self.runtime_config_cls]
# if self.parameters:
# self.logger.info('Connecting to the device')
with signal.wrap('TARGET_CONNECT'):
self.target.connect()
# self.info.load(self.target)
# info_file = os.path.join(self.context.info_directory, 'target.json')
# with open(info_file, 'w') as wfh:
# json.dump(self.info.to_pod(), wfh)
def finalize(self):
# self.logger.info('Disconnecting from the device')
if self.disconnect:
with signal.wrap('TARGET_DISCONNECT'):
self.target.disconnect()
def add_parameters(self, parameters=None):
if parameters:
self.parameters = parameters
if not self.parameters:
raise ConfigError('No Configuration Provided')
for name in self.parameters.keys():
for cfg in self.runtime_configs:
# if name in cfg.supported_parameters:
if any(parameter in name for parameter in cfg.supported_parameters):
cfg.add(name, self.parameters.pop(name))
def validate_parameters(self):
for cfg in self.runtime_configs:
cfg.validate()
def set_parameters(self):
for cfg in self.runtime_configs:
cfg.set()
def clear_parameters(self):
for cfg in self.runtime_configs:
cfg.clear()
def _parse_name(self):
# Try and get platform and target
self.name = identifier(self.name.replace('-', '_'))
if '_' in self.name:
self.platform_name, self.target_name = self.name.split('_', 1)
elif self.name in self.DEVICE_MAPPING:
self.platform_name = self.DEVICE_MAPPING[self.name]['platform_name']
self.target_name = self.DEVICE_MAPPING[self.name]['target_name']
else:
raise ConfigError('Unknown Device Specified {}'.format(self.name))
def _get_target(self):
# Create a corresponding target and target-assistant
if self.target_name == 'android':
self.target = AndroidTarget()
elif self.target_name == 'linux':
self.target = LinuxTarget() # pylint: disable=redefined-variable-type
elif self.target_name == 'localLinux':
self.target = LocalLinuxTarget()
else:
raise ConfigError('Unknown Target Specified {}'.format(self.target_name))
def _get_assistant(self):
# Create a corresponding target and target-assistant to help with platformy stuff?
if self.target_name == 'android':
self.assistant = AndroidAssistant(self.target)
elif self.target_name in ['linux', 'localLinux']:
self.assistant = LinuxAssistant(self.target) # pylint: disable=redefined-variable-type
else:
raise ConfigError('Unknown Target Specified {}'.format(self.target_name))
# def validate_runtime_parameters(self, parameters):
# for name, value in parameters.iteritems():
# self.add_parameter(name, value)
# self.validate_parameters()
# def set_runtime_parameters(self, parameters):
# # self.clear()
# for name, value in parameters.iteritems():
# self.add_parameter(name, value)
# self.set_parameters()
class LinuxAssistant(object):
name = 'linux-assistant'
description = """
Performs configuration, instrumentation, etc. during runs on Linux targets.
"""
def __init__(self, target, **kwargs):
self.target = target
# parameters = [
# Parameter('disconnect', kind=bool, default=False,
# description="""
# Specifies whether the target should be disconnected from
# at the end of the run.
# """),
# ]
# runtime_config_cls = [
# # order matters
# SysfileValuesRuntimeConfig,
# HotplugRuntimeConfig,
# CpufreqRuntimeConfig,
# CpuidleRuntimeConfig,
# ]
# def __init__(self, target, context, **kwargs):
# # super(LinuxTargetManager, self).__init__(target, context, **kwargs)
# self.target = target
# self.context = context
# self.info = TargetInfo()
# self.runtime_configs = [cls(target) for cls in self.runtime_config_cls]
# def __init__(self):
# # super(LinuxTargetManager, self).__init__(target, context, **kwargs)
# self.target = target
# self.info = TargetInfo()
# self.parameters = parameters
# self.info = TargetInfo()
# self.runtime_configs = [cls(target) for cls in self.runtime_config_cls]
# def initialize(self):
# # self.runtime_configs = [cls(self.target) for cls in self.runtime_config_cls]
# # if self.parameters:
# self.logger.info('Connecting to the device')
# with signal.wrap('TARGET_CONNECT'):
# self.target.connect()
# self.info.load(self.target)
# # info_file = os.path.join(self.context.info_directory, 'target.json')
# # with open(info_file, 'w') as wfh:
# # json.dump(self.info.to_pod(), wfh)
# def finalize(self, runner):
# self.logger.info('Disconnecting from the device')
# if self.disconnect:
# with signal.wrap('TARGET_DISCONNECT'):
# self.target.disconnect()
# def _add_parameters(self):
# for name, value in self.parameters.iteritems():
# self.add_parameter(name, value)
# def validate_runtime_parameters(self, parameters):
# self.clear()
# for name, value in parameters.iteritems():
# self.add_parameter(name, value)
# self.validate_parameters()
# def set_runtime_parameters(self, parameters):
# self.clear()
# for name, value in parameters.iteritems():
# self.add_parameter(name, value)
# self.set_parameters()
# def clear_parameters(self):
# for cfg in self.runtime_configs:
# cfg.clear()
# def add_parameter(self, name, value):
# for cfg in self.runtime_configs:
# if name in cfg.supported_parameters:
# cfg.add(name, value)
# return
# raise ConfigError('Unexpected runtime parameter "{}".'.format(name))
# def validate_parameters(self):
# for cfg in self.runtime_configs:
# cfg.validate()
# def set_parameters(self):
# for cfg in self.runtime_configs:
# cfg.set()
class AndroidAssistant(LinuxAssistant):
name = 'android-assistant'
description = """
Extends ``LinuxTargetManager`` with Android-specific operations.
"""
parameters = [
Parameter('logcat_poll_period', kind=int,
description="""
If specified, logcat will cached in a temporary file on the
host every ``logcat_poll_period`` seconds. This is useful for
longer job executions, where on-device logcat buffer may not be
big enough to capture output for the entire execution.
"""),
]
def __init__(self, target, **kwargs):
super(AndroidAssistant, self).__init__(target)
self.logcat_poll_period = kwargs.get('logcat_poll_period', None)
if self.logcat_poll_period:
self.logcat_poller = LogcatPoller(target, self.logcat_poll_period)
else:
self.logcat_poller = None
# def __init__(self, target, context, **kwargs):
# super(AndroidAssistant, self).__init__(target, context, **kwargs)
# self.logcat_poll_period = kwargs.get('logcat_poll_period', None)
# if self.logcat_poll_period:
# self.logcat_poller = LogcatPoller(target, self.logcat_poll_period)
# else:
# self.logcat_poller = None
# def next_job(self, job):
# super(AndroidAssistant, self).next_job(job)
# if self.logcat_poller:
# self.logcat_poller.start()
# def job_done(self, job):
# super(AndroidAssistant, self).job_done(job)
# if self.logcat_poller:
# self.logcat_poller.stop()
# outfile = os.path.join(self.context.output_directory, 'logcat.log')
# self.logger.debug('Dumping logcat to {}'.format(outfile))
# self.dump_logcat(outfile)
# self.clear()
def dump_logcat(self, outfile):
if self.logcat_poller:
self.logcat_poller.write_log(outfile)
else:
self.target.dump_logcat(outfile)
def clear_logcat(self):
if self.logcat_poller:
self.logcat_poller.clear_buffer()
class LogcatPoller(threading.Thread):
def __init__(self, target, period=60, timeout=30):
super(LogcatPoller, self).__init__()
self.target = target
self.logger = logging.getLogger('logcat')
self.period = period
self.timeout = timeout
self.stop_signal = threading.Event()
self.lock = threading.Lock()
self.buffer_file = tempfile.mktemp()
self.last_poll = 0
self.daemon = True
self.exc = None
def start(self):
self.logger.debug('starting polling')
try:
while True:
if self.stop_signal.is_set():
break
with self.lock:
current_time = time.time()
if (current_time - self.last_poll) >= self.period:
self.poll()
time.sleep(0.5)
except Exception: # pylint: disable=W0703
self.exc = WorkerThreadError(self.name, sys.exc_info())
self.logger.debug('polling stopped')
def stop(self):
self.logger.debug('Stopping logcat polling')
self.stop_signal.set()
self.join(self.timeout)
if self.is_alive():
self.logger.error('Could not join logcat poller thread.')
if self.exc:
raise self.exc # pylint: disable=E0702
def clear_buffer(self):
self.logger.debug('clearing logcat buffer')
with self.lock:
self.target.clear_logcat()
with open(self.buffer_file, 'w') as _: # NOQA
pass
def write_log(self, outfile):
with self.lock:
self.poll()
if os.path.isfile(self.buffer_file):
shutil.copy(self.buffer_file, outfile)
else: # there was no logcat trace at this time
with open(outfile, 'w') as _: # NOQA
pass
def close(self):
self.logger.debug('closing poller')
if os.path.isfile(self.buffer_file):
os.remove(self.buffer_file)
def poll(self):
self.last_poll = time.time()
self.target.dump_logcat(self.buffer_file, append=True, timeout=self.timeout)
self.target.clear_logcat()

View File

@@ -0,0 +1,454 @@
from collections import defaultdict, OrderedDict
from wa.framework.plugin import Plugin
from wa.framework.exception import ConfigError
from devlib.exception import TargetError
from devlib.utils.misc import unique
from devlib.utils.types import integer
class RuntimeConfig(Plugin):
kind = 'runtime-config'
parameters = [
]
# class RuntimeConfig(object):
@property
def supported_parameters(self):
raise NotImplementedError()
@property
def core_names(self):
return unique(self.target.core_names)
def __init__(self, target):
super(RuntimeConfig, self).__init__()
self.target = target
def initialize(self, context):
pass
def add(self, name, value):
raise NotImplementedError()
def validate(self):
return True
def set(self):
raise NotImplementedError()
def clear(self):
raise NotImplementedError()
class HotplugRuntimeConfig(RuntimeConfig):
##### NOTE: Currently if initialized with cores hotplugged, this will fail when trying to hotplug back in
@property
def supported_parameters(self):
params = ['cores']
return params
def __init__(self, target):
super(HotplugRuntimeConfig, self).__init__(target)
self.num_cores = defaultdict(dict)
def add(self, name, value):
if not self.target.has('hotplug'):
raise TargetError('Target does not support hotplug.')
core, _ = split_parameter_name(name, self.supported_parameters)
# cpus = cpusFromPrefix(core, self.target)
# core = name.split('_')[0]
value = integer(value)
if core not in self.core_names:
raise ValueError(name)
max_cores = self.core_count(core)
if value > max_cores:
message = 'Cannot set number of {}\'s to {}; max is {}'
raise ValueError(message.format(core, value, max_cores))
self.num_cores[core] = value
if all(v == 0 for v in self.num_cores.values()):
raise ValueError('Cannot set number of all cores to 0')
def set(self):
for c, n in reversed(sorted(self.num_cores.iteritems(),
key=lambda x: x[1])):
self.set_num_online_cpus(c, n)
def clear(self):
self.num_cores = defaultdict(dict)
def set_num_online_cpus(self, core, number):
indexes = [i for i, c in enumerate(self.target.core_names) if c == core]
self.target.hotplug.online(*indexes[:number])
self.target.hotplug.offline(*indexes[number:])
def core_count(self, core):
return sum(1 for c in self.target.core_names if c == core)
class SysfileValuesRuntimeConfig(RuntimeConfig):
@property
def supported_parameters(self):
return ['sysfile_values']
def __init__(self, target):
super(SysfileValuesRuntimeConfig, self).__init__(target)
self.sysfile_values = OrderedDict()
def add(self, name, value):
for f, v in value.iteritems():
if f.endswith('+'):
f = f[:-1]
elif f.endswith('+!'):
f = f[:-2] + '!'
else:
if f.endswith('!'):
self._check_exists(f[:-1])
else:
self._check_exists(f)
self.sysfile_values[f] = v
def set(self):
for f, v in self.sysfile_values.iteritems():
verify = True
if f.endswith('!'):
verify = False
f = f[:-1]
self.target.write_value(f, v, verify=verify)
def clear(self):
self.sysfile_values = OrderedDict()
def _check_exists(self, path):
if not self.target.file_exists(path):
raise ConfigError('Sysfile "{}" does not exist.'.format(path))
class CpufreqRuntimeConfig(RuntimeConfig):
@property
def supported_parameters(self):
params = ['frequency']
params.extend(['max_frequency'])
params.extend(['min_frequency'])
params.extend(['governor'])
params.extend(['governor_tunables'])
return params
def __init__(self, target):
super(CpufreqRuntimeConfig, self).__init__(target)
self.config = defaultdict(dict)
self.supports_userspace = None
self.supported_freqs = {}
self.supported_govenors = {}
self.min_supported_freq = {}
self.max_supported_freq = {}
for cpu in self.target.list_online_cpus():
self.supported_freqs[cpu] = self.target.cpufreq.list_frequencies(cpu) or []
self.supported_govenors[cpu] = self.target.cpufreq.list_governors(cpu) or []
def add(self, name, value):
if not self.target.has('cpufreq'):
raise TargetError('Target does not support cpufreq.')
prefix, parameter = split_parameter_name(name, self.supported_parameters)
# Get list of valid cpus for a given prefix.
cpus = uniqueDomainCpusFromPrefix(prefix, self.target)
for cpu in cpus:
# if cpu not in self.target.list_online_cpus():
# message = 'Unexpected core name "{}"; must be in {}'
# raise ConfigError(message.format(core, self.core_names))
# try:
# cpu = self.target.list_online_cpus(core)[0]
# except IndexError:
# message = 'Cannot retrieve frequencies for {} as no CPUs are online.'
# raise TargetError(message.format(core))
if parameter.endswith('frequency'):
try:
value = integer(value)
except ValueError:
if value.upper() == 'MAX':
value = self.supported_freqs[cpu][-1]
elif value.upper() == 'MIN':
value = self.supported_freqs[cpu][0]
else:
msg = 'Invalid value {} specified for {}'
raise ConfigError(msg.format(value, parameter))
self.config[cpu][parameter] = value
def set(self):
for cpu in self.config:
config = self.config[cpu]
if config.get('governor'):
self.configure_governor(cpu,
config.get('governor'),
config.get('governor_tunables'))
self.configure_frequency(cpu,
config.get('frequency'),
config.get('min_frequency'),
config.get('max_frequency'))
def clear(self):
self.config = defaultdict(dict)
def validate(self):
for cpu in self.config:
if cpu not in self.target.list_online_cpus():
message = 'Cannot configure frequencies for {} as no CPUs are online.'
raise TargetError(message.format(cpu))
config = self.config[cpu]
minf = config.get('min_frequency')
maxf = config.get('max_frequency')
freq = config.get('frequency')
governor = config.get('governor')
governor_tunables = config.get('governor_tunables')
if maxf and minf > maxf:
message = '{}: min_frequency ({}) cannot be greater than max_frequency ({})'
raise ConfigError(message.format(cpu, minf, maxf))
if maxf and freq > maxf:
message = '{}: cpu frequency ({}) cannot be greater than max_frequency ({})'
raise ConfigError(message.format(cpu, freq, maxf))
if freq and minf > freq:
message = '{}: min_frequency ({}) cannot be greater than cpu frequency ({})'
raise ConfigError(message.format(cpu, minf, freq))
# Check that either userspace governor is available or min and max do not differ to frequency
if 'userspace' not in self.supported_govenors[cpu]:
self.supports_userspace = False
if minf and minf != freq:
message = '{}: "userspace" governor not available, min frequency ({}) cannot be different to frequency {}'
raise ConfigError(message.format(cpu, minf, freq))
if maxf and maxf != freq:
message = '{}: "userspace" governor not available, max frequency ({}) cannot be different to frequency {}'
raise ConfigError(message.format(cpu, maxf, freq))
else:
self.supports_userspace = True
# Check that specified values are available on the cpu
if minf and not minf in self.supported_freqs[cpu]:
msg = '{}: Minimum frequency {}Hz not available. Must be in {}'.format(cpu, minf, self.supported_freqs[cpu])
raise TargetError(msg)
if maxf and not maxf in self.supported_freqs[cpu]:
msg = '{}: Maximum frequency {}Hz not available. Must be in {}'.format(cpu, maxf, self.supported_freqs[cpu])
raise TargetError(msg)
if freq and not freq in self.supported_freqs[cpu]:
msg = '{}: Frequency {}Hz not available. Must be in {}'.format(cpu, freq, self.supported_freqs[cpu])
raise TargetError(msg)
if governor and governor not in self.supported_govenors[cpu]:
raise TargetError('{}: {} governor not available'.format(cpu, governor))
if governor_tunables and not governor:
raise TargetError('{}: {} governor tunables cannot be provided without a governor'.format(cpu, governor))
def configure_frequency(self, cpu, freq=None, min_freq=None, max_freq=None):
if cpu not in self.target.list_online_cpus():
message = 'Cannot configure frequencies for {} as no CPUs are online.'
raise TargetError(message.format(cpu))
current_min_freq = self.target.cpufreq.get_min_frequency(cpu)
current_freq = self.target.cpufreq.get_frequency(cpu)
current_max_freq = self.target.cpufreq.get_max_frequency(cpu)
if freq:
# If 'userspace' governor is not available 'spoof' functionality
if not self.supports_userspace:
min_freq = max_freq = freq
# else: # Find better alternative for this.
# Set min/max frequency if required
# if not min_freq:
# min_freq = self.target.cpufreq.get_min_frequency(cpu)
# if not max_freq:
# max_freq = self.target.cpufreq.get_max_frequency(cpu)
if freq < current_freq:
self.target.cpufreq.set_min_frequency(cpu, min_freq)
if self.supports_userspace:
self.target.cpufreq.set_frequency(cpu, freq)
self.target.cpufreq.set_max_frequency(cpu, max_freq)
else:
self.target.cpufreq.set_max_frequency(cpu, max_freq)
if self.supports_userspace:
self.target.cpufreq.set_frequency(cpu, freq)
self.target.cpufreq.set_min_frequency(cpu, min_freq)
return
if max_freq:
if max_freq < current_min_freq:
if min_freq:
self.target.cpufreq.set_min_frequency(cpu, min_freq)
self.target.cpufreq.set_max_frequency(cpu, max_freq)
min_freq_set = True
else:
message = '{}: Cannot set max_frequency ({}) below current min frequency ({}).'
raise TargetError(message.format(cpu, max_freq, current_min_freq))
else:
self.target.cpufreq.set_max_frequency(cpu, max_freq)
if min_freq and not min_freq_set:
current_max_freq = max_freq or current_max_freq
if min_freq > current_max_freq:
message = '{}: Cannot set min_frequency ({}) below current max frequency ({}).'
raise TargetError(message.format(cpu, max_freq, current_min_freq))
self.target.cpufreq.set_min_frequency(cpu, min_freq)
def configure_governor(self, cpu, governor, governor_tunables=None):
if cpu not in self.target.list_online_cpus():
message = 'Cannot configure governor for {} as no CPUs are online.'
raise TargetError(message.format(cpu))
# for cpu in self.target.list_online_cpus(cpu): #All cpus or only online?
if governor not in self.supported_govenors[cpu]:
raise TargetError('{}: {} governor not available'.format(cpu, governor))
if governor_tunables:
self.target.cpufreq.set_governor(cpu, governor, **governor_tunables)
else:
self.target.cpufreq.set_governor(cpu, governor)
class CpuidleRuntimeConfig(RuntimeConfig):
@property
def supported_parameters(self):
params = ['idle_states']
return params
def __init__(self, target):
super(CpuidleRuntimeConfig, self).__init__(target)
self.config = defaultdict(dict)
self.aliases = ['ENABLE_ALL', 'DISABLE_ALL']
self.available_states = {}
for cpu in self.target.list_online_cpus():
self.available_states[cpu] = self.target.cpuidle.get_states(cpu) or []
def add(self, name, values):
if not self.target.has('cpufreq'):
raise TargetError('Target does not support cpufreq.')
prefix, _ = split_parameter_name(name, self.supported_parameters)
cpus = uniqueDomainCpusFromPrefix(prefix, self.target)
for cpu in cpus:
if values in self.aliases:
self.config[cpu] = [values]
else:
self.config[cpu] = values
def validate(self):
for cpu in self.config:
if cpu not in self.target.list_online_cpus():
message = 'Cannot configure idle states for {} as no CPUs are online.'
raise TargetError(message.format(cpu))
for state in self.config[cpu]:
state = state[1:] if state.startswith('~') else state
# self.available_states.extend(self.aliases)
if state not in self.available_states[cpu] + self.aliases:
message = 'Unexpected idle state "{}"; must be in {}'
raise ConfigError(message.format(state, self.available_states))
def clear(self):
self.config = defaultdict(dict)
def set(self):
for cpu in self.config:
for state in self.config[cpu]:
self.configure_idle_state(state, cpu)
def configure_idle_state(self, state, cpu=None):
if cpu is not None:
if cpu not in self.target.list_online_cpus():
message = 'Cannot configure idle state for {} as no CPUs are online {}.'
raise TargetError(message.format(self.target.core_names[cpu], self.target.list_online_cpus()))
else:
cpu = 0
# Check for aliases
if state == 'ENABLE_ALL':
self.target.cpuidle.enable_all(cpu)
elif state == 'DISABLE_ALL':
self.target.cpuidle.disable_all(cpu)
elif state.startswith('~'):
self.target.cpuidle.disable(state[1:], cpu)
else:
self.target.cpuidle.enable(state, cpu)
# TO BE MOVED TO UTILS FILE
import re
# Function to return the cpu prefix without the trailing underscore if
# present from a given list of parameters, and its matching parameter
def split_parameter_name(name, params):
for param in sorted(params, key=len)[::-1]: # Try matching longest parameter first
if len(name.split(param)) > 1:
prefix, _ = name.split(param)
return prefix[:-1], param
message = 'Cannot split {}, must in the form [core_]parameter'
raise ConfigError(message.format(name))
def cpusFromPrefix(prefix, target):
# Deal with big little substitution
if prefix.lower() == 'big':
prefix = target.big_core
if not prefix:
raise ConfigError('big core name could not be retrieved')
elif prefix.lower() == 'little':
prefix = target.little_core
if not prefix:
raise ConfigError('little core name could not be retrieved')
cpu_list = target.list_online_cpus() + target.list_offline_cpus()
# Apply to all cpus
if not prefix:
cpus = cpu_list
# Return all cores with specified name
elif prefix in target.core_names:
cpus = target.core_cpus(prefix)
# Check if core number has been supplied.
else:
# core_no = prefix[4]
core_no = re.match('cpu([0-9]+)', prefix, re.IGNORECASE)
if core_no:
cpus = [int(core_no.group(1))]
if cpus[0] not in cpu_list:
message = 'CPU{} is not available, must be in {}'
raise ConfigError(message.format(cpus[0], cpu_list))
else:
message = 'Unexpected core name "{}"'
raise ConfigError(message.format(prefix))
# Should this be applied for everything or just all cpus?
# Make sure not to include any cpus within the same frequency domain
# for cpu in cpus:
# if cpu not in cpus: # Already removed
# continue
# cpus = [c for c in cpus if (c is cpu) or
# (c not in target.cpufreq.get_domain_cpus(cpu))]
# print 'Final results ' + str(cpus)
# return cpus
return cpus
# Function to only return cpus list on different frequency domains.
def uniqueDomainCpusFromPrefix(prefix, target):
cpus = cpusFromPrefix(prefix, target)
for cpu in cpus:
if cpu not in cpus: # Already removed
continue
cpus = [c for c in cpus if (c is cpu) or
(c not in target.cpufreq.get_domain_cpus(cpu))]
return cpus

View File

@@ -32,9 +32,10 @@ class Workload(TargetedPlugin):
def init_resources(self, context):
"""
This method may be used to perform early resource discovery and initialization. This is invoked
during the initial loading stage and before the device is ready, so cannot be used for any
device-dependent initialization. This method is invoked before the workload instance is
This method may be used to perform early resource discovery and
initialization. This is invoked during the initial loading stage and
before the device is ready, so cannot be used for any device-dependent
initialization. This method is invoked before the workload instance is
validated.
"""
@@ -59,7 +60,10 @@ class Workload(TargetedPlugin):
pass
def run(self, context):
"""Execute the workload. This is the method that performs the actual "work" of the"""
"""
Execute the workload. This is the method that performs the actual
"work" of the.
"""
pass
def update_result(self, context):
@@ -83,7 +87,8 @@ class Workload(TargetedPlugin):
class UiAutomatorGUI(object):
def __init__(self, target, package='', klass='UiAutomation', method='runUiAutoamtion'):
def __init__(self, target, package='', klass='UiAutomation',
method='runUiAutoamtion'):
self.target = target
self.uiauto_package = package
self.uiauto_class = klass