mirror of
				https://github.com/ARM-software/workload-automation.git
				synced 2025-10-31 15:12:25 +00:00 
			
		
		
		
	Initial commit of open source Workload Automation.
This commit is contained in:
		
							
								
								
									
										16
									
								
								wlauto/core/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								wlauto/core/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,16 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
							
								
								
									
										244
									
								
								wlauto/core/agenda.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										244
									
								
								wlauto/core/agenda.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,244 @@ | ||||
| #    Copyright 2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
| import os | ||||
| from copy import copy | ||||
| from collections import OrderedDict, defaultdict | ||||
|  | ||||
| from wlauto.exceptions import ConfigError | ||||
| from wlauto.utils.misc import load_struct_from_yaml, LoadSyntaxError | ||||
| from wlauto.utils.types import counter, reset_counter | ||||
|  | ||||
| import yaml | ||||
|  | ||||
|  | ||||
| def get_aliased_param(d, aliases, default=None, pop=True): | ||||
|     alias_map = [i for i, a in enumerate(aliases) if a in d] | ||||
|     if len(alias_map) > 1: | ||||
|         message = 'Only one of {} may be specified in a single entry' | ||||
|         raise ConfigError(message.format(aliases)) | ||||
|     elif alias_map: | ||||
|         if pop: | ||||
|             return d.pop(aliases[alias_map[0]]) | ||||
|         else: | ||||
|             return d[aliases[alias_map[0]]] | ||||
|     else: | ||||
|         return default | ||||
|  | ||||
|  | ||||
| class AgendaEntry(object): | ||||
|  | ||||
|     def to_dict(self): | ||||
|         return copy(self.__dict__) | ||||
|  | ||||
|  | ||||
| class AgendaWorkloadEntry(AgendaEntry): | ||||
|     """ | ||||
|     Specifies execution of a workload, including things like the number of | ||||
|     iterations, device runtime_parameters configuration, etc. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, **kwargs): | ||||
|         super(AgendaWorkloadEntry, self).__init__() | ||||
|         self.id = kwargs.pop('id') | ||||
|         self.workload_name = get_aliased_param(kwargs, ['workload_name', 'name']) | ||||
|         if not self.workload_name: | ||||
|             raise ConfigError('No workload name specified in entry {}'.format(self.id)) | ||||
|         self.label = kwargs.pop('label', self.workload_name) | ||||
|         self.number_of_iterations = kwargs.pop('iterations', None) | ||||
|         self.boot_parameters = get_aliased_param(kwargs, | ||||
|                                                  ['boot_parameters', 'boot_params'], | ||||
|                                                  default=OrderedDict()) | ||||
|         self.runtime_parameters = get_aliased_param(kwargs, | ||||
|                                                     ['runtime_parameters', 'runtime_params'], | ||||
|                                                     default=OrderedDict()) | ||||
|         self.workload_parameters = get_aliased_param(kwargs, | ||||
|                                                      ['workload_parameters', 'workload_params', 'params'], | ||||
|                                                      default=OrderedDict()) | ||||
|         self.instrumentation = kwargs.pop('instrumentation', []) | ||||
|         self.flash = kwargs.pop('flash', OrderedDict()) | ||||
|         if kwargs: | ||||
|             raise ConfigError('Invalid entry(ies) in workload {}: {}'.format(self.id, ', '.join(kwargs.keys()))) | ||||
|  | ||||
|  | ||||
| class AgendaSectionEntry(AgendaEntry): | ||||
|     """ | ||||
|     Specifies execution of a workload, including things like the number of | ||||
|     iterations, device runtime_parameters configuration, etc. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, agenda, **kwargs): | ||||
|         super(AgendaSectionEntry, self).__init__() | ||||
|         self.id = kwargs.pop('id') | ||||
|         self.number_of_iterations = kwargs.pop('iterations', None) | ||||
|         self.boot_parameters = get_aliased_param(kwargs, | ||||
|                                                  ['boot_parameters', 'boot_params'], | ||||
|                                                  default=OrderedDict()) | ||||
|         self.runtime_parameters = get_aliased_param(kwargs, | ||||
|                                                     ['runtime_parameters', 'runtime_params', 'params'], | ||||
|                                                     default=OrderedDict()) | ||||
|         self.workload_parameters = get_aliased_param(kwargs, | ||||
|                                                      ['workload_parameters', 'workload_params'], | ||||
|                                                      default=OrderedDict()) | ||||
|         self.instrumentation = kwargs.pop('instrumentation', []) | ||||
|         self.flash = kwargs.pop('flash', OrderedDict()) | ||||
|         self.workloads = [] | ||||
|         for w in kwargs.pop('workloads', []): | ||||
|             self.workloads.append(agenda.get_workload_entry(w)) | ||||
|         if kwargs: | ||||
|             raise ConfigError('Invalid entry(ies) in section {}: {}'.format(self.id, ', '.join(kwargs.keys()))) | ||||
|  | ||||
|     def to_dict(self): | ||||
|         d = copy(self.__dict__) | ||||
|         d['workloads'] = [w.to_dict() for w in self.workloads] | ||||
|         return d | ||||
|  | ||||
|  | ||||
| class AgendaGlobalEntry(AgendaEntry): | ||||
|     """ | ||||
|     Workload configuration global to all workloads. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, **kwargs): | ||||
|         super(AgendaGlobalEntry, self).__init__() | ||||
|         self.number_of_iterations = kwargs.pop('iterations', None) | ||||
|         self.boot_parameters = get_aliased_param(kwargs, | ||||
|                                                  ['boot_parameters', 'boot_params'], | ||||
|                                                  default=OrderedDict()) | ||||
|         self.runtime_parameters = get_aliased_param(kwargs, | ||||
|                                                     ['runtime_parameters', 'runtime_params', 'params'], | ||||
|                                                     default=OrderedDict()) | ||||
|         self.workload_parameters = get_aliased_param(kwargs, | ||||
|                                                      ['workload_parameters', 'workload_params'], | ||||
|                                                      default=OrderedDict()) | ||||
|         self.instrumentation = kwargs.pop('instrumentation', []) | ||||
|         self.flash = kwargs.pop('flash', OrderedDict()) | ||||
|         if kwargs: | ||||
|             raise ConfigError('Invalid entries in global section: {}'.format(kwargs)) | ||||
|  | ||||
|  | ||||
| class Agenda(object): | ||||
|  | ||||
|     def __init__(self, source=None): | ||||
|         self.filepath = None | ||||
|         self.config = None | ||||
|         self.global_ = None | ||||
|         self.sections = [] | ||||
|         self.workloads = [] | ||||
|         self._seen_ids = defaultdict(set) | ||||
|         if source: | ||||
|             try: | ||||
|                 reset_counter('section') | ||||
|                 reset_counter('workload') | ||||
|                 self._load(source) | ||||
|             except (ConfigError, LoadSyntaxError, SyntaxError), e: | ||||
|                 raise ConfigError(str(e)) | ||||
|  | ||||
|     def add_workload_entry(self, w): | ||||
|         entry = self.get_workload_entry(w) | ||||
|         self.workloads.append(entry) | ||||
|  | ||||
|     def get_workload_entry(self, w): | ||||
|         if isinstance(w, basestring): | ||||
|             w = {'name': w} | ||||
|         if not isinstance(w, dict): | ||||
|             raise ConfigError('Invalid workload entry: "{}" in {}'.format(w, self.filepath)) | ||||
|         self._assign_id_if_needed(w, 'workload') | ||||
|         return AgendaWorkloadEntry(**w) | ||||
|  | ||||
|     def _load(self, source): | ||||
|         raw = self._load_raw_from_source(source) | ||||
|         if not isinstance(raw, dict): | ||||
|             message = '{} does not contain a valid agenda structure; top level must be a dict.' | ||||
|             raise ConfigError(message.format(self.filepath)) | ||||
|         for k, v in raw.iteritems(): | ||||
|             if k == 'config': | ||||
|                 self.config = v | ||||
|             elif k == 'global': | ||||
|                 self.global_ = AgendaGlobalEntry(**v) | ||||
|             elif k == 'sections': | ||||
|                 self._collect_existing_ids(v, 'section') | ||||
|                 for s in v: | ||||
|                     if not isinstance(s, dict): | ||||
|                         raise ConfigError('Invalid section entry: "{}" in {}'.format(s, self.filepath)) | ||||
|                     self._collect_existing_ids(s.get('workloads', []), 'workload') | ||||
|                 for s in v: | ||||
|                     self._assign_id_if_needed(s, 'section') | ||||
|                     self.sections.append(AgendaSectionEntry(self, **s)) | ||||
|             elif k == 'workloads': | ||||
|                 self._collect_existing_ids(v, 'workload') | ||||
|                 for w in v: | ||||
|                     self.workloads.append(self.get_workload_entry(w)) | ||||
|             else: | ||||
|                 raise ConfigError('Unexpected agenda entry "{}" in {}'.format(k, self.filepath)) | ||||
|  | ||||
|     def _load_raw_from_source(self, source): | ||||
|         if hasattr(source, 'read') and hasattr(source, 'name'):  # file-like object | ||||
|             self.filepath = source.name | ||||
|             raw = load_struct_from_yaml(text=source.read()) | ||||
|         elif isinstance(source, basestring): | ||||
|             if os.path.isfile(source): | ||||
|                 self.filepath = source | ||||
|                 raw = load_struct_from_yaml(filepath=self.filepath) | ||||
|             else:  # assume YAML text | ||||
|                 raw = load_struct_from_yaml(text=source) | ||||
|         else: | ||||
|             raise ConfigError('Unknown agenda source: {}'.format(source)) | ||||
|         return raw | ||||
|  | ||||
|     def _collect_existing_ids(self, ds, pool): | ||||
|         # Collection needs to take place first  so that auto IDs can be | ||||
|         # correctly assigned, e.g. if someone explicitly specified an ID | ||||
|         # of '1' for one of the workloads. | ||||
|         for d in ds: | ||||
|             if isinstance(d, dict) and 'id' in d: | ||||
|                 did = str(d['id']) | ||||
|                 if did in self._seen_ids[pool]: | ||||
|                     raise ConfigError('Duplicate {} ID: {}'.format(pool, did)) | ||||
|                 self._seen_ids[pool].add(did) | ||||
|  | ||||
|     def _assign_id_if_needed(self, d, pool): | ||||
|         # Also enforces string IDs | ||||
|         if d.get('id') is None: | ||||
|             did = str(counter(pool)) | ||||
|             while did in self._seen_ids[pool]: | ||||
|                 did = str(counter(pool)) | ||||
|             d['id'] = did | ||||
|             self._seen_ids[pool].add(did) | ||||
|         else: | ||||
|             d['id'] = str(d['id']) | ||||
|  | ||||
|  | ||||
| # Modifying the yaml parser to use  an OrderedDict, rather then regular Python | ||||
| # dict for mappings. This preservers the order in which the items are | ||||
| # specified. See | ||||
| #   http://stackoverflow.com/a/21048064 | ||||
|  | ||||
| _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG | ||||
|  | ||||
|  | ||||
| def dict_representer(dumper, data): | ||||
|     return dumper.represent_mapping(_mapping_tag, data.iteritems()) | ||||
|  | ||||
|  | ||||
| def dict_constructor(loader, node): | ||||
|     return OrderedDict(loader.construct_pairs(node)) | ||||
|  | ||||
|  | ||||
| yaml.add_representer(OrderedDict, dict_representer) | ||||
| yaml.add_constructor(_mapping_tag, dict_constructor) | ||||
							
								
								
									
										195
									
								
								wlauto/core/bootstrap.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										195
									
								
								wlauto/core/bootstrap.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,195 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
| import os | ||||
| import shutil | ||||
| import imp | ||||
| import sys | ||||
| import re | ||||
| from collections import namedtuple, OrderedDict | ||||
|  | ||||
| from wlauto.exceptions import ConfigError | ||||
| from wlauto.utils.misc import merge_dicts, normalize, unique | ||||
| from wlauto.utils.types import identifier | ||||
|  | ||||
|  | ||||
| _this_dir = os.path.dirname(__file__) | ||||
| _user_home = os.path.expanduser('~') | ||||
|  | ||||
| # loading our external packages over those from the environment | ||||
| sys.path.insert(0, os.path.join(_this_dir, '..', 'external')) | ||||
|  | ||||
|  | ||||
| # Defines extension points for the WA framework. This table is used by the | ||||
| # ExtensionLoader (among other places) to identify extensions it should look | ||||
| # for. | ||||
| # Parameters that need to be specified in a tuple for each extension type: | ||||
| #     name: The name of the extension type. This will be used to resolve get_ | ||||
| #           and list_methods in the extension loader. | ||||
| #     class: The base class for the extension type. Extension loader will check | ||||
| #            whether classes it discovers are subclassed from this. | ||||
| #     default package: This is the package that will be searched for extensions | ||||
| #                      of that type by default (if not other packages are | ||||
| #                      specified when creating the extension loader). This | ||||
| #                      package *must* exist. | ||||
| #    default path: This is the subdirectory under the environment_root which | ||||
| #                  will be searched for extensions of this type by default (if | ||||
| #                  no other paths are specified when creating the extension | ||||
| #                  loader). This directory will be automatically created if it | ||||
| #                  does not exist. | ||||
|  | ||||
| #pylint: disable=C0326 | ||||
| _EXTENSION_TYPE_TABLE = [ | ||||
|     # name,               class,                                    default package,            default path | ||||
|     ('command',           'wlauto.core.command.Command',            'wlauto.commands',          'commands'), | ||||
|     ('device',            'wlauto.core.device.Device',              'wlauto.devices',           'devices'), | ||||
|     ('instrument',        'wlauto.core.instrumentation.Instrument', 'wlauto.instrumentation',   'instruments'), | ||||
|     ('module',            'wlauto.core.extension.Module',           'wlauto.modules',           'modules'), | ||||
|     ('resource_getter',   'wlauto.core.resource.ResourceGetter',    'wlauto.resource_getters',  'resource_getters'), | ||||
|     ('result_processor',  'wlauto.core.result.ResultProcessor',     'wlauto.result_processors', 'result_processors'), | ||||
|     ('workload',          'wlauto.core.workload.Workload',          'wlauto.workloads',         'workloads'), | ||||
| ] | ||||
| _Extension = namedtuple('_Extension', 'name, cls, default_package, default_path') | ||||
| _extensions = [_Extension._make(ext) for ext in _EXTENSION_TYPE_TABLE]  # pylint: disable=W0212 | ||||
|  | ||||
|  | ||||
| class ConfigLoader(object): | ||||
|     """ | ||||
|     This class is responsible for loading and validating config files. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self): | ||||
|         self._loaded = False | ||||
|         self._config = {} | ||||
|         self.config_count = 0 | ||||
|         self._loaded_files = [] | ||||
|         self.environment_root = None | ||||
|         self.output_directory = 'wa_output' | ||||
|         self.reboot_after_each_iteration = True | ||||
|         self.dependencies_directory = None | ||||
|         self.agenda = None | ||||
|         self.extension_packages = [] | ||||
|         self.extension_paths = [] | ||||
|         self.extensions = [] | ||||
|         self.verbosity = 0 | ||||
|         self.debug = False | ||||
|         self.package_directory = os.path.dirname(_this_dir) | ||||
|         self.commands = {} | ||||
|  | ||||
|     @property | ||||
|     def meta_directory(self): | ||||
|         return os.path.join(self.output_directory, '__meta') | ||||
|  | ||||
|     @property | ||||
|     def log_file(self): | ||||
|         return os.path.join(self.output_directory, 'run.log') | ||||
|  | ||||
|     def update(self, source): | ||||
|         if isinstance(source, dict): | ||||
|             self.update_from_dict(source) | ||||
|         else: | ||||
|             self.config_count += 1 | ||||
|             self.update_from_file(source) | ||||
|  | ||||
|     def update_from_file(self, source): | ||||
|         try: | ||||
|             new_config = imp.load_source('config_{}'.format(self.config_count), source) | ||||
|         except SyntaxError, e: | ||||
|             message = 'Sytax error in config: {}'.format(str(e)) | ||||
|             raise ConfigError(message) | ||||
|         self._config = merge_dicts(self._config, vars(new_config), | ||||
|                                    list_duplicates='first', match_types=False, dict_type=OrderedDict) | ||||
|         self._loaded_files.append(source) | ||||
|         self._loaded = True | ||||
|  | ||||
|     def update_from_dict(self, source): | ||||
|         normalized_source = dict((identifier(k), v) for k, v in source.iteritems()) | ||||
|         self._config = merge_dicts(self._config, normalized_source, list_duplicates='first', | ||||
|                                    match_types=False, dict_type=OrderedDict) | ||||
|         self._loaded = True | ||||
|  | ||||
|     def get_config_paths(self): | ||||
|         return [lf.rstrip('c') for lf in self._loaded_files] | ||||
|  | ||||
|     def _check_loaded(self): | ||||
|         if not self._loaded: | ||||
|             raise ConfigError('Config file not loaded.') | ||||
|  | ||||
|     def __getattr__(self, name): | ||||
|         self._check_loaded() | ||||
|         return self._config.get(normalize(name)) | ||||
|  | ||||
|  | ||||
| def init_environment(env_root, dep_dir, extension_paths, overwrite_existing=False):  # pylint: disable=R0914 | ||||
|     """Initialise a fresh user environment creating the workload automation""" | ||||
|     if os.path.exists(env_root): | ||||
|         if not overwrite_existing: | ||||
|             raise ConfigError('Environment {} already exists.'.format(env_root)) | ||||
|         shutil.rmtree(env_root) | ||||
|  | ||||
|     os.makedirs(env_root) | ||||
|     with open(os.path.join(_this_dir, '..', 'config_example.py')) as rf: | ||||
|         text = re.sub(r'""".*?"""', '', rf.read(), 1, re.DOTALL) | ||||
|         with open(os.path.join(_env_root, 'config.py'), 'w') as wf: | ||||
|             wf.write(text) | ||||
|  | ||||
|     os.makedirs(dep_dir) | ||||
|     for path in extension_paths: | ||||
|         os.makedirs(path) | ||||
|  | ||||
|     # If running with sudo on POSIX, change the ownership to the real user. | ||||
|     real_user = os.getenv('SUDO_USER') | ||||
|     if real_user: | ||||
|         import pwd  # done here as module won't import on win32 | ||||
|         user_entry = pwd.getpwnam(real_user) | ||||
|         uid, gid = user_entry.pw_uid, user_entry.pw_gid | ||||
|         os.chown(env_root, uid, gid) | ||||
|         # why, oh why isn't there a recusive=True option for os.chown? | ||||
|         for root, dirs, files in os.walk(env_root): | ||||
|             for d in dirs: | ||||
|                 os.chown(os.path.join(root, d), uid, gid) | ||||
|             for f in files:  # pylint: disable=W0621 | ||||
|                 os.chown(os.path.join(root, f), uid, gid) | ||||
|  | ||||
|  | ||||
| _env_root = os.getenv('WA_USER_DIRECTORY', os.path.join(_user_home, '.workload_automation')) | ||||
| _dep_dir = os.path.join(_env_root, 'dependencies') | ||||
| _extension_paths = [os.path.join(_env_root, ext.default_path) for ext in _extensions] | ||||
| _extension_paths.extend(os.getenv('WA_EXTENSION_PATHS', '').split(os.pathsep)) | ||||
|  | ||||
| if not os.path.isdir(_env_root): | ||||
|     init_environment(_env_root, _dep_dir, _extension_paths) | ||||
| elif not os.path.isfile(os.path.join(_env_root, 'config.py')): | ||||
|     with open(os.path.join(_this_dir, '..', 'config_example.py')) as f: | ||||
|         f_text = re.sub(r'""".*?"""', '', f.read(), 1, re.DOTALL) | ||||
|         with open(os.path.join(_env_root, 'config.py'), 'w') as f: | ||||
|             f.write(f_text) | ||||
|  | ||||
| settings = ConfigLoader() | ||||
| settings.environment_root = _env_root | ||||
| settings.dependencies_directory = _dep_dir | ||||
| settings.extension_paths = _extension_paths | ||||
| settings.extensions = _extensions | ||||
|  | ||||
| _packages_file = os.path.join(_env_root, 'packages') | ||||
| if os.path.isfile(_packages_file): | ||||
|     with open(_packages_file) as fh: | ||||
|         settings.extension_packages = unique(fh.read().split()) | ||||
|  | ||||
| _env_config = os.path.join(settings.environment_root, 'config.py') | ||||
| settings.update(_env_config) | ||||
|  | ||||
							
								
								
									
										67
									
								
								wlauto/core/command.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										67
									
								
								wlauto/core/command.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,67 @@ | ||||
| #    Copyright 2014-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
| import textwrap | ||||
|  | ||||
| from wlauto.core.extension import Extension | ||||
| from wlauto.core.entry_point import init_argument_parser | ||||
| from wlauto.utils.doc import format_body | ||||
|  | ||||
|  | ||||
| class Command(Extension): | ||||
|     """ | ||||
|     Defines a Workload Automation command. This will be executed from the command line as | ||||
|     ``wa <command> [args ...]``. This defines the name to be used when invoking wa, the | ||||
|     code that will actually be executed on invocation and the argument parser to be used | ||||
|     to parse the reset of the command line arguments. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     help = None | ||||
|     usage = None | ||||
|     description = None | ||||
|     epilog = None | ||||
|     formatter_class = None | ||||
|  | ||||
|     def __init__(self, subparsers): | ||||
|         super(Command, self).__init__() | ||||
|         self.group = subparsers | ||||
|         parser_params = dict(help=(self.help or self.description), usage=self.usage, | ||||
|                              description=format_body(textwrap.dedent(self.description), 80), | ||||
|                              epilog=self.epilog) | ||||
|         if self.formatter_class: | ||||
|             parser_params['formatter_class'] = self.formatter_class | ||||
|         self.parser = subparsers.add_parser(self.name, **parser_params) | ||||
|         init_argument_parser(self.parser)  # propagate top-level options | ||||
|         self.initialize() | ||||
|  | ||||
|     def initialize(self): | ||||
|         """ | ||||
|         Perform command-specific initialisation (e.g. adding command-specific options to the command's | ||||
|         parser). | ||||
|  | ||||
|         """ | ||||
|         pass | ||||
|  | ||||
|     def execute(self, args): | ||||
|         """ | ||||
|         Execute this command. | ||||
|  | ||||
|         :args: An ``argparse.Namespace`` containing command line arguments (as returned by | ||||
|                ``argparse.ArgumentParser.parse_args()``. This would usually be the result of | ||||
|                invoking ``self.parser``. | ||||
|  | ||||
|         """ | ||||
|         raise NotImplementedError() | ||||
							
								
								
									
										756
									
								
								wlauto/core/configuration.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										756
									
								
								wlauto/core/configuration.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,756 @@ | ||||
| #    Copyright 2014-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
| import os | ||||
| import json | ||||
| from copy import copy | ||||
| from collections import OrderedDict | ||||
|  | ||||
| from wlauto.exceptions import ConfigError | ||||
| from wlauto.utils.misc import merge_dicts, merge_lists, load_struct_from_file | ||||
| from wlauto.utils.types import regex_type, identifier | ||||
|  | ||||
|  | ||||
| class SharedConfiguration(object): | ||||
|  | ||||
|     def __init__(self): | ||||
|         self.number_of_iterations = None | ||||
|         self.workload_name = None | ||||
|         self.label = None | ||||
|         self.boot_parameters = OrderedDict() | ||||
|         self.runtime_parameters = OrderedDict() | ||||
|         self.workload_parameters = OrderedDict() | ||||
|         self.instrumentation = [] | ||||
|  | ||||
|  | ||||
| class ConfigurationJSONEncoder(json.JSONEncoder): | ||||
|  | ||||
|     def default(self, obj):  # pylint: disable=E0202 | ||||
|         if isinstance(obj, WorkloadRunSpec): | ||||
|             return obj.to_dict() | ||||
|         elif isinstance(obj, RunConfiguration): | ||||
|             return obj.to_dict() | ||||
|         elif isinstance(obj, RebootPolicy): | ||||
|             return obj.policy | ||||
|         elif isinstance(obj, regex_type): | ||||
|             return obj.pattern | ||||
|         else: | ||||
|             return json.JSONEncoder.default(self, obj) | ||||
|  | ||||
|  | ||||
| class WorkloadRunSpec(object): | ||||
|     """ | ||||
|     Specifies execution of a workload, including things like the number of | ||||
|     iterations, device runtime_parameters configuration, etc. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     # These should be handled by the framework if not explicitly specified | ||||
|     # so it's a programming error if they're not | ||||
|     framework_mandatory_parameters = ['id', 'number_of_iterations'] | ||||
|  | ||||
|     # These *must* be specified by the user (through one mechanism or another) | ||||
|     # and it is a configuration error if they're not. | ||||
|     mandatory_parameters = ['workload_name'] | ||||
|  | ||||
|     def __init__(self, | ||||
|                  id=None,  # pylint: disable=W0622 | ||||
|                  number_of_iterations=None, | ||||
|                  workload_name=None, | ||||
|                  boot_parameters=None, | ||||
|                  label=None, | ||||
|                  section_id=None, | ||||
|                  workload_parameters=None, | ||||
|                  runtime_parameters=None, | ||||
|                  instrumentation=None, | ||||
|                  flash=None, | ||||
|                  ):  # pylint: disable=W0622 | ||||
|         self.id = id | ||||
|         self.number_of_iterations = number_of_iterations | ||||
|         self.workload_name = workload_name | ||||
|         self.label = label or self.workload_name | ||||
|         self.section_id = section_id | ||||
|         self.boot_parameters = boot_parameters or OrderedDict() | ||||
|         self.runtime_parameters = runtime_parameters or OrderedDict() | ||||
|         self.workload_parameters = workload_parameters or OrderedDict() | ||||
|         self.instrumentation = instrumentation or [] | ||||
|         self.flash = flash or OrderedDict() | ||||
|         self._workload = None | ||||
|         self._section = None | ||||
|         self.enabled = True | ||||
|  | ||||
|     def set(self, param, value): | ||||
|         if param in ['id', 'section_id', 'number_of_iterations', 'workload_name', 'label']: | ||||
|             if value is not None: | ||||
|                 setattr(self, param, value) | ||||
|         elif param in ['boot_parameters', 'runtime_parameters', 'workload_parameters', 'flash']: | ||||
|             setattr(self, param, merge_dicts(getattr(self, param), value, list_duplicates='last', | ||||
|                                              dict_type=OrderedDict, should_normalize=False)) | ||||
|         elif param in ['instrumentation']: | ||||
|             setattr(self, param, merge_lists(getattr(self, param), value, duplicates='last')) | ||||
|         else: | ||||
|             raise ValueError('Unexpected workload spec parameter: {}'.format(param)) | ||||
|  | ||||
|     def validate(self): | ||||
|         for param_name in self.framework_mandatory_parameters: | ||||
|             param = getattr(self, param_name) | ||||
|             if param is None: | ||||
|                 msg = '{} not set for workload spec.' | ||||
|                 raise RuntimeError(msg.format(param_name)) | ||||
|         for param_name in self.mandatory_parameters: | ||||
|             param = getattr(self, param_name) | ||||
|             if param is None: | ||||
|                 msg = '{} not set for workload spec for workload {}' | ||||
|                 raise ConfigError(msg.format(param_name, self.id)) | ||||
|  | ||||
|     def match_selectors(self, selectors): | ||||
|         """ | ||||
|         Returns ``True`` if this spec matches the specified selectors, and | ||||
|         ``False`` otherwise. ``selectors`` must be a dict-like object with | ||||
|         attribute names mapping onto selector values. At the moment, only equality | ||||
|         selection is supported; i.e. the value of the attribute of the spec must | ||||
|         match exactly the corresponding value specified in the ``selectors`` dict. | ||||
|  | ||||
|         """ | ||||
|         if not selectors: | ||||
|             return True | ||||
|         for k, v in selectors.iteritems(): | ||||
|             if getattr(self, k, None) != v: | ||||
|                 return False | ||||
|         return True | ||||
|  | ||||
|     @property | ||||
|     def workload(self): | ||||
|         if self._workload is None: | ||||
|             raise RuntimeError("Workload for {} has not been loaded".format(self)) | ||||
|         return self._workload | ||||
|  | ||||
|     @property | ||||
|     def secition(self): | ||||
|         if self.section_id and self._section is None: | ||||
|             raise RuntimeError("Section for {} has not been loaded".format(self)) | ||||
|         return self._section | ||||
|  | ||||
|     def load(self, device, ext_loader): | ||||
|         """Loads the workload for the specified device using the specified loader. | ||||
|         This must be done before attempting to execute the spec.""" | ||||
|         self._workload = ext_loader.get_workload(self.workload_name, device, **self.workload_parameters) | ||||
|  | ||||
|     def to_dict(self): | ||||
|         d = copy(self.__dict__) | ||||
|         del d['_workload'] | ||||
|         del d['_section'] | ||||
|         return d | ||||
|  | ||||
|     def __str__(self): | ||||
|         return '{} {}'.format(self.id, self.label) | ||||
|  | ||||
|     def __cmp__(self, other): | ||||
|         if not isinstance(other, WorkloadRunSpec): | ||||
|             return cmp('WorkloadRunSpec', other.__class__.__name__) | ||||
|         return cmp(self.id, other.id) | ||||
|  | ||||
|  | ||||
| class _SpecConfig(object): | ||||
|     # TODO: This is a bit of HACK for alias resolution. This formats Alias | ||||
|     #       params as if they came from config. | ||||
|  | ||||
|     def __init__(self, name, params=None): | ||||
|         setattr(self, name, params or {}) | ||||
|  | ||||
|  | ||||
| class RebootPolicy(object): | ||||
|     """ | ||||
|     Represents the reboot policy for the execution -- at what points the device | ||||
|     should be rebooted. This, in turn, is controlled by the policy value that is | ||||
|     passed in on construction and would typically be read from the user's settings. | ||||
|     Valid policy values are: | ||||
|  | ||||
|     :never: The device will never be rebooted. | ||||
|     :as_needed: Only reboot the device if it becomes unresponsive, or needs to be flashed, etc. | ||||
|     :initial: The device will be rebooted when the execution first starts, just before | ||||
|               executing the first workload spec. | ||||
|     :each_spec: The device will be rebooted before running a new workload spec. | ||||
|     :each_iteration: The device will be rebooted before each new iteration. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_iteration'] | ||||
|  | ||||
|     def __init__(self, policy): | ||||
|         policy = policy.strip().lower().replace(' ', '_') | ||||
|         if policy not in self.valid_policies: | ||||
|             message = 'Invalid reboot policy {}; must be one of {}'.format(policy, ', '.join(self.valid_policies)) | ||||
|             raise ConfigError(message) | ||||
|         self.policy = policy | ||||
|  | ||||
|     @property | ||||
|     def can_reboot(self): | ||||
|         return self.policy != 'never' | ||||
|  | ||||
|     @property | ||||
|     def perform_initial_boot(self): | ||||
|         return self.policy not in ['never', 'as_needed'] | ||||
|  | ||||
|     @property | ||||
|     def reboot_on_each_spec(self): | ||||
|         return self.policy in ['each_spec', 'each_iteration'] | ||||
|  | ||||
|     @property | ||||
|     def reboot_on_each_iteration(self): | ||||
|         return self.policy == 'each_iteration' | ||||
|  | ||||
|     def __str__(self): | ||||
|         return self.policy | ||||
|  | ||||
|     __repr__ = __str__ | ||||
|  | ||||
|     def __cmp__(self, other): | ||||
|         if isinstance(other, RebootPolicy): | ||||
|             return cmp(self.policy, other.policy) | ||||
|         else: | ||||
|             return cmp(self.policy, other) | ||||
|  | ||||
|  | ||||
| class RunConfigurationItem(object): | ||||
|     """ | ||||
|     This represents a predetermined "configuration point" (an individual setting) | ||||
|     and describes how it must be handled when encountered. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     # Also defines the NULL value for each category | ||||
|     valid_categories = { | ||||
|         'scalar': None, | ||||
|         'list': [], | ||||
|         'dict': {}, | ||||
|     } | ||||
|  | ||||
|     # A callable that takes an arbitrary number of positional arguments | ||||
|     # is also valid. | ||||
|     valid_methods = ['keep', 'replace', 'merge'] | ||||
|  | ||||
|     def __init__(self, name, category, method): | ||||
|         if category not in self.valid_categories: | ||||
|             raise ValueError('Invalid category: {}'.format(category)) | ||||
|         if not callable(method) and method not in self.valid_methods: | ||||
|             raise ValueError('Invalid method: {}'.format(method)) | ||||
|         if category == 'scalar' and method == 'merge': | ||||
|             raise ValueError('Method cannot be "merge" for a scalar') | ||||
|         self.name = name | ||||
|         self.category = category | ||||
|         self.method = method | ||||
|  | ||||
|     def combine(self, *args): | ||||
|         """ | ||||
|         Combine the provided values according to the method for this | ||||
|         configuration item. Order matters -- values are assumed to be | ||||
|         in the order they were specified by the user. The resulting value | ||||
|         is also checked to patch the specified type. | ||||
|  | ||||
|         """ | ||||
|         args = [a for a in args if a is not None] | ||||
|         if not args: | ||||
|             return self.valid_categories[self.category] | ||||
|         if self.method == 'keep' or len(args) == 1: | ||||
|             value = args[0] | ||||
|         elif self.method == 'replace': | ||||
|             value = args[-1] | ||||
|         elif self.method == 'merge': | ||||
|             if self.category == 'list': | ||||
|                 value = merge_lists(*args, duplicates='last', dict_type=OrderedDict) | ||||
|             elif self.category == 'dict': | ||||
|                 value = merge_dicts(*args, | ||||
|                                     should_merge_lists=True, | ||||
|                                     should_normalize=False, | ||||
|                                     list_duplicates='last', | ||||
|                                     dict_type=OrderedDict) | ||||
|             else: | ||||
|                 raise ValueError('Unexpected category for merge : "{}"'.format(self.category)) | ||||
|         elif callable(self.method): | ||||
|             value = self.method(*args) | ||||
|         else: | ||||
|             raise ValueError('Unexpected method: "{}"'.format(self.method)) | ||||
|  | ||||
|         return value | ||||
|  | ||||
|  | ||||
| def _combine_ids(*args): | ||||
|     return '_'.join(args) | ||||
|  | ||||
|  | ||||
| class RunConfiguration(object): | ||||
|     """ | ||||
|     Loads and maintains the unified configuration for this run. This includes configuration | ||||
|     for WA execution as a whole, and parameters for specific specs. | ||||
|  | ||||
|     WA configuration mechanism aims to be flexible and easy to use, while at the same | ||||
|     time providing storing validation and early failure on error. To meet these requirements, | ||||
|     the implementation gets rather complicated. This is going to be a quick overview of | ||||
|     the underlying mechanics. | ||||
|  | ||||
|     .. note:: You don't need to know this to use WA, or to write extensions for it. From | ||||
|               the point of view of extension writers, configuration from various sources | ||||
|               "magically" appears as attributes of their classes. This explanation peels | ||||
|               back the curtain and is intended for those who, for one reason or another, | ||||
|               need to understand how the magic works. | ||||
|  | ||||
|     **terminology** | ||||
|  | ||||
|     run | ||||
|  | ||||
|         A single execution of a WA agenda. | ||||
|  | ||||
|     run config(uration) (object) | ||||
|  | ||||
|         An instance of this class. There is one per run. | ||||
|  | ||||
|     config(uration) item | ||||
|  | ||||
|         A single configuration entry or "setting", e.g. the device interface to use. These | ||||
|         can be for the run as a whole, or for a specific extension. | ||||
|  | ||||
|     (workload) spec | ||||
|  | ||||
|         A specification of a single workload execution. This combines workload configuration | ||||
|         with things like the number of iterations to run, which instruments to enable, etc. | ||||
|         More concretely, this is an instance of :class:`WorkloadRunSpec`. | ||||
|  | ||||
|     **overview** | ||||
|  | ||||
|     There are three types of WA configuration: | ||||
|  | ||||
|         1. "Meta" configuration that determines how the rest of the configuration is | ||||
|            processed (e.g. where extensions get loaded from). Since this does not pertain | ||||
|            to *run* configuration, it will not be covered further. | ||||
|         2. Global run configuration, e.g. which workloads, result processors and instruments | ||||
|            will be enabled for a run. | ||||
|         3. Per-workload specification configuration, that determines how a particular workload | ||||
|            instance will get executed (e.g. what workload parameters will be used, how many | ||||
|            iterations. | ||||
|  | ||||
|     **run configuration** | ||||
|  | ||||
|     Run configuration may appear in a config file (usually ``~/.workload_automation/config.py``), | ||||
|     or in the ``config`` section of an agenda. Configuration is specified as a nested structure | ||||
|     of dictionaries (associative arrays, or maps) and lists in the syntax following the format | ||||
|     implied by the file extension (currently, YAML and Python are supported). If the same | ||||
|     configuration item appears in more than one source, they are merged with conflicting entries | ||||
|     taking the value from the last source that specified them. | ||||
|  | ||||
|     In addition to a fixed set of global configuration items, configuration for any WA | ||||
|     Extension (instrument, result processor, etc) may also be specified, namespaced under | ||||
|     the extension's name (i.e. the extensions name is a key in the global config with value | ||||
|     being a dict of parameters and their values). Some Extension parameters also specify a | ||||
|     "global alias" that may appear at the top-level of the config rather than under the | ||||
|     Extension's name. It is *not* an error to specify configuration for an Extension that has | ||||
|     not been enabled for a particular run; such configuration will be ignored. | ||||
|  | ||||
|  | ||||
|     **per-workload configuration** | ||||
|  | ||||
|     Per-workload configuration can be specified in three places in the agenda: the | ||||
|     workload entry in the ``workloads`` list, the ``global`` entry (configuration there | ||||
|     will be applied to every workload entry), and in a section entry in ``sections`` list | ||||
|     ( configuration in every section will be applied to every workload entry separately, | ||||
|     creating a "cross-product" of section and workload configurations; additionally, | ||||
|     sections may specify their own workload lists). | ||||
|  | ||||
|     If they same configuration item appears in more than one of the above places, they will | ||||
|     be merged in the following order: ``global``, ``section``, ``workload``, with conflicting | ||||
|     scalar values in the later overriding those from previous locations. | ||||
|  | ||||
|  | ||||
|     **Global parameter aliases** | ||||
|  | ||||
|     As mentioned above, an Extension's parameter may define a global alias, which will be | ||||
|     specified and picked up from the top-level config, rather than config for that specific | ||||
|     extension. It is an error to specify the value for a parameter both through a global | ||||
|     alias and through extension config dict in the same configuration file. It is, however, | ||||
|     possible to use a global alias in one file, and specify extension configuration for the | ||||
|     same parameter in another file, in which case, the usual merging rules would apply. | ||||
|  | ||||
|     **Loading and validation of configuration** | ||||
|  | ||||
|     Validation of user-specified configuration happens at several stages of run initialisation, | ||||
|     to ensure that appropriate context for that particular type of validation is available and | ||||
|     that meaningful errors can be reported, as early as is feasible. | ||||
|  | ||||
|     - Syntactic validation is performed when configuration is first loaded. | ||||
|       This is done by the loading mechanism (e.g. YAML parser), rather than WA itself. WA | ||||
|       propagates any errors encountered as ``ConfigError``\ s. | ||||
|     - Once a config file is loaded into a Python structure, it scanned to | ||||
|       extract settings. Static configuration is validated and added to the config. Extension | ||||
|       configuration is collected into a collection of "raw" config, and merged as appropriate, but | ||||
|       is not processed further at this stage. | ||||
|     - Once all configuration sources have been processed, the configuration as a whole | ||||
|       is validated (to make sure there are no missing settings, etc). | ||||
|     - Extensions are loaded through the run config object, which instantiates | ||||
|       them with appropriate parameters based on the "raw" config collected earlier. When an | ||||
|       Extension is instantiated in such a way, it's config is "officially" added to run configuration | ||||
|       tracked by the run config object. Raw config is discarded at the end of the run, so | ||||
|       that any config that wasn't loaded in this way is not recoded (as it was not actually used). | ||||
|     - Extension parameters a validated individually (for type, value ranges, etc) as they are | ||||
|       loaded in the Extension's __init__. | ||||
|     - An extension's ``validate()`` method is invoked before it is used (exactly when this | ||||
|       happens depends on the extension's type) to perform any final validation *that does not | ||||
|       rely on the target being present* (i.e. this would happen before WA connects to the target). | ||||
|       This can be used perform inter-parameter validation for an extension (e.g. when valid range for | ||||
|       one parameter depends on another), and more general WA state assumptions (e.g. a result | ||||
|       processor can check that an instrument it depends on has been installed). | ||||
|     - Finally, it is the responsibility of individual extensions to validate any assumptions | ||||
|       they make about the target device (usually as part of their ``setup()``). | ||||
|  | ||||
|     **Handling of Extension aliases.** | ||||
|  | ||||
|     WA extensions can have zero or more aliases (not to be confused with global aliases for extension | ||||
|     *parameters*). An extension allows associating an alternative name for the extension with a set | ||||
|     of parameter values. In other words aliases associate common configurations for an extension with | ||||
|     a name, providing a shorthand for it. For example, "t-rex_offscreen" is an alias for "glbenchmark" | ||||
|     workload that specifies that "use_case" should be "t-rex" and "variant" should be "offscreen". | ||||
|  | ||||
|     **special loading rules** | ||||
|  | ||||
|     Note that as a consequence of being able to specify configuration for *any* Extension namespaced | ||||
|     under the Extension's name in the top-level config, two distinct mechanisms exist form configuring | ||||
|     devices and workloads. This is valid, however due to their nature, they are handled in a special way. | ||||
|     This may be counter intuitive, so configuration of devices and workloads creating entries for their | ||||
|     names in the config is discouraged in favour of using the "normal" mechanisms of configuring them | ||||
|     (``device_config`` for devices and workload specs in the agenda for workloads). | ||||
|  | ||||
|     In both cases (devices and workloads), "normal" config will always override named extension config | ||||
|     *irrespective of which file it was specified in*. So a ``adb_name`` name specified in ``device_config`` | ||||
|     inside ``~/.workload_automation/config.py`` will override ``adb_name`` specified for ``juno`` in the | ||||
|     agenda (even when device is set to "juno"). | ||||
|  | ||||
|     Again, this ignores normal loading rules, so the use of named extension configuration for devices | ||||
|     and workloads is discouraged. There maybe some situations where this behaviour is useful however | ||||
|     (e.g. maintaining configuration for different devices in one config file). | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     default_reboot_policy = 'as_needed' | ||||
|     default_execution_order = 'by_iteration' | ||||
|  | ||||
|     # This is generic top-level configuration. | ||||
|     general_config = [ | ||||
|         RunConfigurationItem('run_name', 'scalar', 'replace'), | ||||
|         RunConfigurationItem('project', 'scalar', 'replace'), | ||||
|         RunConfigurationItem('project_stage', 'dict', 'replace'), | ||||
|         RunConfigurationItem('execution_order', 'scalar', 'replace'), | ||||
|         RunConfigurationItem('reboot_policy', 'scalar', 'replace'), | ||||
|         RunConfigurationItem('device', 'scalar', 'replace'), | ||||
|         RunConfigurationItem('flashing_config', 'dict', 'replace'), | ||||
|     ] | ||||
|  | ||||
|     # Configuration specified for each workload spec. "workload_parameters" | ||||
|     # aren't listed because they are handled separately. | ||||
|     workload_config = [ | ||||
|         RunConfigurationItem('id', 'scalar', _combine_ids), | ||||
|         RunConfigurationItem('number_of_iterations', 'scalar', 'replace'), | ||||
|         RunConfigurationItem('workload_name', 'scalar', 'replace'), | ||||
|         RunConfigurationItem('label', 'scalar', 'replace'), | ||||
|         RunConfigurationItem('section_id', 'scalar', 'replace'), | ||||
|         RunConfigurationItem('boot_parameters', 'dict', 'merge'), | ||||
|         RunConfigurationItem('runtime_parameters', 'dict', 'merge'), | ||||
|         RunConfigurationItem('instrumentation', 'list', 'merge'), | ||||
|         RunConfigurationItem('flash', 'dict', 'merge'), | ||||
|     ] | ||||
|  | ||||
|     # List of names that may be present in configuration (and it is valid for | ||||
|     # them to be there) but are not handled buy RunConfiguration. | ||||
|     ignore_names = ['logging'] | ||||
|  | ||||
|     def get_reboot_policy(self): | ||||
|         if not self._reboot_policy: | ||||
|             self._reboot_policy = RebootPolicy(self.default_reboot_policy) | ||||
|         return self._reboot_policy | ||||
|  | ||||
|     def set_reboot_policy(self, value): | ||||
|         if isinstance(value, RebootPolicy): | ||||
|             self._reboot_policy = value | ||||
|         else: | ||||
|             self._reboot_policy = RebootPolicy(value) | ||||
|  | ||||
|     reboot_policy = property(get_reboot_policy, set_reboot_policy) | ||||
|  | ||||
|     @property | ||||
|     def all_instrumentation(self): | ||||
|         result = set() | ||||
|         for spec in self.workload_specs: | ||||
|             result = result.union(set(spec.instrumentation)) | ||||
|         return result | ||||
|  | ||||
|     def __init__(self, ext_loader): | ||||
|         self.ext_loader = ext_loader | ||||
|         self.device = None | ||||
|         self.device_config = None | ||||
|         self.execution_order = None | ||||
|         self.project = None | ||||
|         self.project_stage = None | ||||
|         self.run_name = None | ||||
|         self.instrumentation = {} | ||||
|         self.result_processors = {} | ||||
|         self.workload_specs = [] | ||||
|         self.flashing_config = {} | ||||
|         self.other_config = {}  # keeps track of used config for extensions other than of the four main kinds. | ||||
|         self._used_config_items = [] | ||||
|         self._global_instrumentation = [] | ||||
|         self._reboot_policy = None | ||||
|         self._agenda = None | ||||
|         self._finalized = False | ||||
|         self._general_config_map = {i.name: i for i in self.general_config} | ||||
|         self._workload_config_map = {i.name: i for i in self.workload_config} | ||||
|         # Config files may contains static configuration for extensions that | ||||
|         # would not be part of this of this run (e.g. DB connection settings | ||||
|         # for a result processor that has not been enabled). Such settings | ||||
|         # should not be part of configuration for this run (as they will not | ||||
|         # be affecting it), but we still need to keep track it in case a later | ||||
|         # config (e.g. from the agenda) enables the extension. | ||||
|         # For this reason, all extension config is first loaded into the | ||||
|         # following dict and when an extension is identified as need for the | ||||
|         # run, its config is picked up from this "raw" dict and it becomes part | ||||
|         # of the run configuration. | ||||
|         self._raw_config = {'instrumentation': [], 'result_processors': []} | ||||
|  | ||||
|     def get_extension(self, ext_name, *args): | ||||
|         self._check_finalized() | ||||
|         self._load_default_config_if_necessary(ext_name) | ||||
|         ext_config = self._raw_config[ext_name] | ||||
|         ext_cls = self.ext_loader.get_extension_class(ext_name) | ||||
|         if ext_cls.kind not in ['workload', 'device', 'instrument', 'result_processor']: | ||||
|             self.other_config[ext_name] = ext_config | ||||
|         return self.ext_loader.get_extension(ext_name, *args, **ext_config) | ||||
|  | ||||
|     def to_dict(self): | ||||
|         d = copy(self.__dict__) | ||||
|         to_remove = ['ext_loader', 'workload_specs'] + [k for k in d.keys() if k.startswith('_')] | ||||
|         for attr in to_remove: | ||||
|             del d[attr] | ||||
|         d['workload_specs'] = [s.to_dict() for s in self.workload_specs] | ||||
|         d['reboot_policy'] = self.reboot_policy  # this is a property so not in __dict__ | ||||
|         return d | ||||
|  | ||||
|     def load_config(self, source): | ||||
|         """Load configuration from the specified source. The source must be | ||||
|         either a path to a valid config file or a dict-like object. Currently, | ||||
|         config files can be either python modules (.py extension) or YAML documents | ||||
|         (.yaml extension).""" | ||||
|         if self._finalized: | ||||
|             raise ValueError('Attempting to load a config file after run configuration has been finalized.') | ||||
|         try: | ||||
|             config_struct = _load_raw_struct(source) | ||||
|             self._merge_config(config_struct) | ||||
|         except ConfigError as e: | ||||
|             message = 'Error in {}:\n\t{}' | ||||
|             raise ConfigError(message.format(getattr(source, 'name', None), e.message)) | ||||
|  | ||||
|     def set_agenda(self, agenda, selectors=None): | ||||
|         """Set the agenda for this run; Unlike with config files, there can only be one agenda.""" | ||||
|         if self._agenda: | ||||
|             # note: this also guards against loading an agenda after finalized() has been called, | ||||
|             #       as that would have required an agenda to be set. | ||||
|             message = 'Attempting to set a second agenda {};\n\talready have agenda {} set' | ||||
|             raise ValueError(message.format(agenda.filepath, self._agenda.filepath)) | ||||
|         try: | ||||
|             self._merge_config(agenda.config or {}) | ||||
|             self._load_specs_from_agenda(agenda, selectors) | ||||
|             self._agenda = agenda | ||||
|         except ConfigError as e: | ||||
|             message = 'Error in {}:\n\t{}' | ||||
|             raise ConfigError(message.format(agenda.filepath, e.message)) | ||||
|  | ||||
|     def finalize(self): | ||||
|         """This must be invoked once all configuration sources have been loaded. This will | ||||
|         do the final processing, setting instrumentation and result processor configuration | ||||
|         for the run And making sure that all the mandatory config has been specified.""" | ||||
|         if self._finalized: | ||||
|             return | ||||
|         if not self._agenda: | ||||
|             raise ValueError('Attempting to finalize run configuration before an agenda is loaded.') | ||||
|         self._finalize_config_list('instrumentation') | ||||
|         self._finalize_config_list('result_processors') | ||||
|         if not self.device: | ||||
|             raise ConfigError('Device not specified in the config.') | ||||
|         self._finalize_device_config() | ||||
|         if not self.reboot_policy.reboot_on_each_spec: | ||||
|             for spec in self.workload_specs: | ||||
|                 if spec.boot_parameters: | ||||
|                     message = 'spec {} specifies boot_parameters; reboot policy must be at least "each_spec"' | ||||
|                     raise ConfigError(message.format(spec.id)) | ||||
|         for spec in self.workload_specs: | ||||
|             for globinst in self._global_instrumentation: | ||||
|                 if globinst not in spec.instrumentation: | ||||
|                     spec.instrumentation.append(globinst) | ||||
|             spec.validate() | ||||
|         self._finalized = True | ||||
|  | ||||
|     def serialize(self, wfh): | ||||
|         json.dump(self, wfh, cls=ConfigurationJSONEncoder, indent=4) | ||||
|  | ||||
|     def _merge_config(self, config): | ||||
|         """ | ||||
|         Merge the settings specified by the ``config`` dict-like object into current | ||||
|         configuration. | ||||
|  | ||||
|         """ | ||||
|         if not isinstance(config, dict): | ||||
|             raise ValueError('config must be a dict; found {}'.format(config.__class__.__name__)) | ||||
|  | ||||
|         for k, v in config.iteritems(): | ||||
|             k = identifier(k) | ||||
|             if k in self.ext_loader.global_param_aliases: | ||||
|                 self._resolve_global_alias(k, v) | ||||
|             elif k in self._general_config_map: | ||||
|                 self._set_run_config_item(k, v) | ||||
|             elif self.ext_loader.has_extension(k): | ||||
|                 self._set_extension_config(k, v) | ||||
|             elif k == 'device_config': | ||||
|                 self._set_raw_dict(k, v) | ||||
|             elif k in ['instrumentation', 'result_processors']: | ||||
|                 # Instrumentation can be enabled and disabled by individual | ||||
|                 # workloads, so we need to track it in two places: a list of | ||||
|                 # all instruments for the run (as they will all need to be | ||||
|                 # initialized and installed, and a list of only the "global" | ||||
|                 # instruments which can then be merged into instrumentation | ||||
|                 # lists of individual workload specs. | ||||
|                 self._set_raw_list('_global_{}'.format(k), v) | ||||
|                 self._set_raw_list(k, v) | ||||
|             elif k in self.ignore_names: | ||||
|                 pass | ||||
|             else: | ||||
|                 raise ConfigError('Unknown configuration option: {}'.format(k)) | ||||
|  | ||||
|     def _resolve_global_alias(self, name, value): | ||||
|         ga = self.ext_loader.global_param_aliases[name] | ||||
|         for param, ext in ga.iteritems(): | ||||
|             for name in [ext.name] + [a.name for a in ext.aliases]: | ||||
|                 self._load_default_config_if_necessary(name) | ||||
|                 self._raw_config[name][param.name] = value | ||||
|  | ||||
|     def _set_run_config_item(self, name, value): | ||||
|         item = self._general_config_map[name] | ||||
|         combined_value = item.combine(getattr(self, name, None), value) | ||||
|         setattr(self, name, combined_value) | ||||
|  | ||||
|     def _set_extension_config(self, name, value): | ||||
|         default_config = self.ext_loader.get_default_config(name) | ||||
|         self._set_raw_dict(name, value, default_config) | ||||
|  | ||||
|     def _set_raw_dict(self, name, value, default_config=None): | ||||
|         existing_config = self._raw_config.get(name, default_config or {}) | ||||
|         new_config = _merge_config_dicts(existing_config, value) | ||||
|         self._raw_config[name] = new_config | ||||
|  | ||||
|     def _set_raw_list(self, name, value): | ||||
|         old_value = self._raw_config.get(name, []) | ||||
|         new_value = merge_lists(old_value, value, duplicates='last') | ||||
|         self._raw_config[name] = new_value | ||||
|  | ||||
|     def _finalize_config_list(self, attr_name): | ||||
|         """Note: the name is somewhat misleading. This finalizes a list | ||||
|         form the specified configuration (e.g. "instrumentation"); internal | ||||
|         representation is actually a dict, not a list...""" | ||||
|         ext_config = {} | ||||
|         raw_list = self._raw_config.get(attr_name, []) | ||||
|         for extname in raw_list: | ||||
|             default_config = self.ext_loader.get_default_config(extname) | ||||
|             ext_config[extname] = self._raw_config.get(extname, default_config) | ||||
|         list_name = '_global_{}'.format(attr_name) | ||||
|         setattr(self, list_name, raw_list) | ||||
|         setattr(self, attr_name, ext_config) | ||||
|  | ||||
|     def _finalize_device_config(self): | ||||
|         self._load_default_config_if_necessary(self.device) | ||||
|         config = _merge_config_dicts(self._raw_config.get(self.device), | ||||
|                                      self._raw_config.get('device_config', {})) | ||||
|         self.device_config = config | ||||
|  | ||||
|     def _load_default_config_if_necessary(self, name): | ||||
|         if name not in self._raw_config: | ||||
|             self._raw_config[name] = self.ext_loader.get_default_config(name) | ||||
|  | ||||
|     def _load_specs_from_agenda(self, agenda, selectors): | ||||
|         global_dict = agenda.global_.to_dict() if agenda.global_ else {} | ||||
|         if agenda.sections: | ||||
|             for section_entry in agenda.sections: | ||||
|                 section_dict = section_entry.to_dict() | ||||
|                 for workload_entry in agenda.workloads + section_entry.workloads: | ||||
|                     workload_dict = workload_entry.to_dict() | ||||
|                     self._load_workload_spec(global_dict, section_dict, workload_dict, selectors) | ||||
|         else:  # no sections were specified | ||||
|             for workload_entry in agenda.workloads: | ||||
|                 workload_dict = workload_entry.to_dict() | ||||
|                 self._load_workload_spec(global_dict, {}, workload_dict, selectors) | ||||
|  | ||||
|     def _load_workload_spec(self, global_dict, section_dict, workload_dict, selectors): | ||||
|         spec = WorkloadRunSpec() | ||||
|         for name, config in self._workload_config_map.iteritems(): | ||||
|             value = config.combine(global_dict.get(name), section_dict.get(name), workload_dict.get(name)) | ||||
|             spec.set(name, value) | ||||
|         if section_dict: | ||||
|             spec.set('section_id', section_dict.get('id')) | ||||
|  | ||||
|         realname, alias_config = self.ext_loader.resolve_alias(spec.workload_name) | ||||
|         if not spec.label: | ||||
|             spec.label = spec.workload_name | ||||
|         spec.workload_name = realname | ||||
|         dicts = [self.ext_loader.get_default_config(realname), | ||||
|                  alias_config, | ||||
|                  self._raw_config.get(spec.workload_name), | ||||
|                  global_dict.get('workload_parameters'), | ||||
|                  section_dict.get('workload_parameters'), | ||||
|                  workload_dict.get('workload_parameters')] | ||||
|         dicts = [d for d in dicts if d is not None] | ||||
|         value = _merge_config_dicts(*dicts) | ||||
|         spec.set('workload_parameters', value) | ||||
|  | ||||
|         if not spec.number_of_iterations: | ||||
|             spec.number_of_iterations = 1 | ||||
|  | ||||
|         if spec.match_selectors(selectors): | ||||
|             instrumentation_config = self._raw_config['instrumentation'] | ||||
|             for instname in spec.instrumentation: | ||||
|                 if instname not in instrumentation_config: | ||||
|                     instrumentation_config.append(instname) | ||||
|             self.workload_specs.append(spec) | ||||
|  | ||||
|     def _check_finalized(self): | ||||
|         if not self._finalized: | ||||
|             raise ValueError('Attempting to access configuration before it has been finalized.') | ||||
|  | ||||
|  | ||||
| def _load_raw_struct(source): | ||||
|     """Load a raw dict config structure from the specified source.""" | ||||
|     if isinstance(source, basestring): | ||||
|         if os.path.isfile(source): | ||||
|             raw = load_struct_from_file(filepath=source) | ||||
|         else: | ||||
|             raise ConfigError('File "{}" does not exit'.format(source)) | ||||
|     elif isinstance(source, dict): | ||||
|         raw = source | ||||
|     else: | ||||
|         raise ConfigError('Unknown config source: {}'.format(source)) | ||||
|     return raw | ||||
|  | ||||
|  | ||||
| def _merge_config_dicts(*args, **kwargs): | ||||
|     """Provides a different set of default settings for ```merge_dicts`` """ | ||||
|     return merge_dicts(*args, | ||||
|                        should_merge_lists=kwargs.get('should_merge_lists', False), | ||||
|                        should_normalize=kwargs.get('should_normalize', False), | ||||
|                        list_duplicates=kwargs.get('list_duplicates', 'last'), | ||||
|                        dict_type=kwargs.get('dict_type', OrderedDict)) | ||||
							
								
								
									
										418
									
								
								wlauto/core/device.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										418
									
								
								wlauto/core/device.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,418 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
| """ | ||||
| Base classes for device interfaces. | ||||
|  | ||||
|     :Device: The base class for all devices. This defines the interface that must be | ||||
|              implemented by all devices and therefore any workload and instrumentation | ||||
|              can always rely on. | ||||
|     :AndroidDevice: Implements most of the :class:`Device` interface, and extends it | ||||
|                     with a number of Android-specific methods. | ||||
|     :BigLittleDevice: Subclasses :class:`AndroidDevice` to implement big.LITTLE-specific | ||||
|                       runtime parameters. | ||||
|     :SimpleMulticoreDevice: Subclasses :class:`AndroidDevice` to implement homogeneous cores | ||||
|                           device runtime parameters. | ||||
|  | ||||
| """ | ||||
|  | ||||
| import os | ||||
| import imp | ||||
| import string | ||||
| from collections import OrderedDict | ||||
| from contextlib import contextmanager | ||||
|  | ||||
| from wlauto.core.extension import Extension, ExtensionMeta, AttributeCollection, Parameter | ||||
| from wlauto.exceptions import DeviceError, ConfigError | ||||
| from wlauto.utils.types import list_of_strings, list_of_integers | ||||
|  | ||||
|  | ||||
| __all__ = ['RuntimeParameter', 'CoreParameter', 'Device', 'DeviceMeta'] | ||||
|  | ||||
|  | ||||
| class RuntimeParameter(object): | ||||
|     """ | ||||
|     A runtime parameter which has its getter and setter methods associated it | ||||
|     with it. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, name, getter, setter, | ||||
|                  getter_args=None, setter_args=None, | ||||
|                  value_name='value', override=False): | ||||
|         """ | ||||
|         :param name: the name of the parameter. | ||||
|         :param getter: the getter method which returns the value of this parameter. | ||||
|         :param setter: the setter method which sets the value of this parameter. The setter | ||||
|                        always expects to be passed one argument when it is called. | ||||
|         :param getter_args: keyword arguments to be used when invoking the getter. | ||||
|         :param setter_args: keyword arguments to be used when invoking the setter. | ||||
|         :param override: A ``bool`` that specifies whether a parameter of the same name further up the | ||||
|                             hierarchy should be overridden. If this is ``False`` (the default), an exception | ||||
|                             will be raised by the ``AttributeCollection`` instead. | ||||
|  | ||||
|         """ | ||||
|         self.name = name | ||||
|         self.getter = getter | ||||
|         self.setter = setter | ||||
|         self.getter_args = getter_args or {} | ||||
|         self.setter_args = setter_args or {} | ||||
|         self.value_name = value_name | ||||
|         self.override = override | ||||
|  | ||||
|     def __str__(self): | ||||
|         return self.name | ||||
|  | ||||
|     __repr__ = __str__ | ||||
|  | ||||
|  | ||||
| class CoreParameter(RuntimeParameter): | ||||
|     """A runtime parameter that will get expanded into a RuntimeParameter for each core type.""" | ||||
|  | ||||
|     def get_runtime_parameters(self, core_names): | ||||
|         params = [] | ||||
|         for core in set(core_names): | ||||
|             name = string.Template(self.name).substitute(core=core) | ||||
|             getter = string.Template(self.getter).substitute(core=core) | ||||
|             setter = string.Template(self.setter).substitute(core=core) | ||||
|             getargs = dict(self.getter_args.items() + [('core', core)]) | ||||
|             setargs = dict(self.setter_args.items() + [('core', core)]) | ||||
|             params.append(RuntimeParameter(name, getter, setter, getargs, setargs, self.value_name, self.override)) | ||||
|         return params | ||||
|  | ||||
|  | ||||
| class DeviceMeta(ExtensionMeta): | ||||
|  | ||||
|     to_propagate = ExtensionMeta.to_propagate + [ | ||||
|         ('runtime_parameters', RuntimeParameter, AttributeCollection), | ||||
|     ] | ||||
|  | ||||
|  | ||||
| class Device(Extension): | ||||
|     """ | ||||
|     Base class for all devices supported by Workload Automation. Defines | ||||
|     the interface the rest of WA uses to interact with devices. | ||||
|  | ||||
|         :name: Unique name used to identify the device. | ||||
|         :platform: The name of the device's platform (e.g. ``Android``) this may | ||||
|                    be used by workloads and instrumentation to assess whether they | ||||
|                    can run on the device. | ||||
|         :working_directory: a string of the directory which is | ||||
|                             going to be used by the workloads on the device. | ||||
|         :binaries_directory: a string of the binary directory for | ||||
|                              the device. | ||||
|         :has_gpu:     Should be ``True`` if the device as a separate GPU, and | ||||
|                     ``False`` if graphics processing is done on a CPU. | ||||
|  | ||||
|                     .. note:: Pretty much all devices currently on the market | ||||
|                                 have GPUs, however this may not be the case for some | ||||
|                                 development boards. | ||||
|  | ||||
|         :path_module: The name of one of the modules implementing the os.path | ||||
|                       interface, e.g. ``posixpath`` or ``ntpath``. You can provide | ||||
|                       your own implementation rather than relying on one of the | ||||
|                       standard library modules, in which case you need to specify | ||||
|                       the *full* path to you module. e.g. '/home/joebloggs/mypathimp.py' | ||||
|         :parameters: A list of RuntimeParameter objects. The order of the objects | ||||
|                      is very important as the setters and getters will be called | ||||
|                      in the order the RuntimeParameter objects inserted. | ||||
|         :active_cores: This should be a list of all the currently active cpus in | ||||
|                       the device in ``'/sys/devices/system/cpu/online'``. The | ||||
|                       returned list should be read from the device at the time | ||||
|                       of read request. | ||||
|  | ||||
|     """ | ||||
|     __metaclass__ = DeviceMeta | ||||
|  | ||||
|     parameters = [ | ||||
|         Parameter('core_names', kind=list_of_strings, mandatory=True, default=None, | ||||
|                   description=""" | ||||
|                   This is a list of all cpu cores on the device with each | ||||
|                   element being the core type, e.g. ``['a7', 'a7', 'a15']``. The | ||||
|                   order of the cores must match the order they are listed in | ||||
|                   ``'/sys/devices/system/cpu'``. So in this case, ``'cpu0'`` must | ||||
|                   be an A7 core, and ``'cpu2'`` an A15.' | ||||
|                   """), | ||||
|         Parameter('core_clusters', kind=list_of_integers, mandatory=True, default=None, | ||||
|                   description=""" | ||||
|                   This is a list indicating the cluster affinity of the CPU cores, | ||||
|                   each element correponding to the cluster ID of the core coresponding | ||||
|                   to it's index. E.g. ``[0, 0, 1]`` indicates that cpu0 and cpu1 are on | ||||
|                   cluster 0, while cpu2 is on cluster 1. | ||||
|                   """), | ||||
|     ] | ||||
|  | ||||
|     runtime_parameters = [] | ||||
|  | ||||
|     # These must be overwritten by subclasses. | ||||
|     name = None | ||||
|     platform = None | ||||
|     default_working_directory = None | ||||
|     has_gpu = None | ||||
|     path_module = None | ||||
|     active_cores = None | ||||
|  | ||||
|     def __init__(self, **kwargs):  # pylint: disable=W0613 | ||||
|         super(Device, self).__init__(**kwargs) | ||||
|         if not self.path_module: | ||||
|             raise NotImplementedError('path_module must be specified by the deriving classes.') | ||||
|         libpath = os.path.dirname(os.__file__) | ||||
|         modpath = os.path.join(libpath, self.path_module) | ||||
|         if not modpath.lower().endswith('.py'): | ||||
|             modpath += '.py' | ||||
|         try: | ||||
|             self.path = imp.load_source('device_path', modpath) | ||||
|         except IOError: | ||||
|             raise DeviceError('Unsupported path module: {}'.format(self.path_module)) | ||||
|  | ||||
|     def reset(self): | ||||
|         """ | ||||
|         Initiate rebooting of the device. | ||||
|  | ||||
|         Added in version 2.1.3. | ||||
|  | ||||
|         """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def boot(self, *args, **kwargs): | ||||
|         """ | ||||
|         Perform the seteps necessary to boot the device to the point where it is ready | ||||
|         to accept other commands. | ||||
|  | ||||
|         Changed in version 2.1.3: no longer expected to wait until boot completes. | ||||
|  | ||||
|         """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def connect(self, *args, **kwargs): | ||||
|         """ | ||||
|         Establish a connection to the device that will be used for subsequent commands. | ||||
|  | ||||
|         Added in version 2.1.3. | ||||
|         """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def disconnect(self): | ||||
|         """ Close the established connection to the device. """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def initialize(self, context, *args, **kwargs): | ||||
|         """ | ||||
|         Default implementation just calls through to init(). May be overriden by specialised | ||||
|         abstract sub-cleasses to implent platform-specific intialization without requiring | ||||
|         concrete implementations to explicitly invoke parent's init(). | ||||
|  | ||||
|         Added in version 2.1.3. | ||||
|  | ||||
|         """ | ||||
|         self.init(context, *args, **kwargs) | ||||
|  | ||||
|     def init(self, context, *args, **kwargs): | ||||
|         """ | ||||
|         Initialize the device. This method *must* be called after a device reboot before | ||||
|         any other commands can be issued, however it may also be called without rebooting. | ||||
|  | ||||
|         It is up to device-specific implementations to identify what initialisation needs | ||||
|         to be preformed on a particular invocation. Bear in mind that no assumptions can be | ||||
|         made about the state of the device prior to the initiation of workload execution, | ||||
|         so full initialisation must be performed at least once, even if no reboot has occurred. | ||||
|         After that, the device-specific implementation may choose to skip initialization if | ||||
|         the device has not been rebooted; it is up to the implementation to keep track of | ||||
|         that, however. | ||||
|  | ||||
|         All arguments are device-specific (see the documentation for the your device). | ||||
|  | ||||
|         """ | ||||
|         pass | ||||
|  | ||||
|     def ping(self): | ||||
|         """ | ||||
|         This must return successfully if the device is able to receive commands, or must | ||||
|         raise :class:`wlauto.exceptions.DeviceUnresponsiveError` if the device cannot respond. | ||||
|  | ||||
|         """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def get_runtime_parameter_names(self): | ||||
|         return [p.name for p in self._expand_runtime_parameters()] | ||||
|  | ||||
|     def get_runtime_parameters(self): | ||||
|         """ returns the runtime parameters that have been set. """ | ||||
|         # pylint: disable=cell-var-from-loop | ||||
|         runtime_parameters = OrderedDict() | ||||
|         for rtp in self._expand_runtime_parameters(): | ||||
|             if not rtp.getter: | ||||
|                 continue | ||||
|             getter = getattr(self, rtp.getter) | ||||
|             rtp_value = getter(**rtp.getter_args) | ||||
|             runtime_parameters[rtp.name] = rtp_value | ||||
|         return runtime_parameters | ||||
|  | ||||
|     def set_runtime_parameters(self, params): | ||||
|         """ | ||||
|         The parameters are taken from the keyword arguments and are specific to | ||||
|         a particular device. See the device documentation. | ||||
|  | ||||
|         """ | ||||
|         runtime_parameters = self._expand_runtime_parameters() | ||||
|         rtp_map = {rtp.name.lower(): rtp for rtp in runtime_parameters} | ||||
|  | ||||
|         params = OrderedDict((k.lower(), v) for k, v in params.iteritems()) | ||||
|  | ||||
|         expected_keys = rtp_map.keys() | ||||
|         if not set(params.keys()) <= set(expected_keys): | ||||
|             unknown_params = list(set(params.keys()).difference(set(expected_keys))) | ||||
|             raise ConfigError('Unknown runtime parameter(s): {}'.format(unknown_params)) | ||||
|  | ||||
|         for param in params: | ||||
|             rtp = rtp_map[param] | ||||
|             setter = getattr(self, rtp.setter) | ||||
|             args = dict(rtp.setter_args.items() + [(rtp.value_name, params[rtp.name.lower()])]) | ||||
|             setter(**args) | ||||
|  | ||||
|     def capture_screen(self, filepath): | ||||
|         """Captures the current device screen into the specified file in a PNG format.""" | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def get_properties(self, output_path): | ||||
|         """Captures and saves the device configuration properties version and | ||||
|          any other relevant information. Return them in a dict""" | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def listdir(self, path, **kwargs): | ||||
|         """ List the contents of the specified directory. """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def push_file(self, source, dest): | ||||
|         """ Push a file from the host file system onto the device. """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def pull_file(self, source, dest): | ||||
|         """ Pull a file from device system onto the host file system. """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def delete_file(self, filepath): | ||||
|         """ Delete the specified file on the device. """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def file_exists(self, filepath): | ||||
|         """ Check if the specified file or directory exist on the device. """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def get_pids_of(self, process_name): | ||||
|         """ Returns a list of PIDs of the specified process name. """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def kill(self, pid, as_root=False): | ||||
|         """ Kill the  process with the specified PID. """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def killall(self, process_name, as_root=False): | ||||
|         """ Kill all running processes with the specified name. """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def install(self, filepath, **kwargs): | ||||
|         """ Install the specified file on the device. What "install" means is device-specific | ||||
|         and may possibly also depend on the type of file.""" | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def uninstall(self, filepath): | ||||
|         """ Uninstall the specified file on the device. What "uninstall" means is device-specific | ||||
|         and may possibly also depend on the type of file.""" | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def execute(self, command, timeout=None, **kwargs): | ||||
|         """ | ||||
|         Execute the specified command command on the device and return the output. | ||||
|  | ||||
|         :param command: Command to be executed on the device. | ||||
|         :param timeout: If the command does not return after the specified time, | ||||
|                         execute() will abort with an error. If there is no timeout for | ||||
|                         the command, this should be set to 0 or None. | ||||
|  | ||||
|         Other device-specific keyword arguments may also be specified. | ||||
|  | ||||
|         :returns: The stdout output from the command. | ||||
|  | ||||
|         """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def set_sysfile_value(self, filepath, value, verify=True): | ||||
|         """ | ||||
|         Write the specified value to the specified file on the device | ||||
|         and verify that the value has actually been written. | ||||
|  | ||||
|         :param file: The file to be modified. | ||||
|         :param value: The value to be written to the file. Must be | ||||
|                       an int or a string convertable to an int. | ||||
|         :param verify: Specifies whether the value should be verified, once written. | ||||
|  | ||||
|         Should raise DeviceError if could write value. | ||||
|  | ||||
|         """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def get_sysfile_value(self, sysfile, kind=None): | ||||
|         """ | ||||
|         Get the contents of the specified sysfile. | ||||
|  | ||||
|         :param sysfile: The file who's contents will be returned. | ||||
|  | ||||
|         :param kind: The type of value to be expected in the sysfile. This can | ||||
|                      be any Python callable that takes a single str argument. | ||||
|                      If not specified or is None, the contents will be returned | ||||
|                      as a string. | ||||
|  | ||||
|         """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def start(self): | ||||
|         """ | ||||
|         This gets invoked before an iteration is started and is endented to help the | ||||
|         device manange any internal supporting functions. | ||||
|  | ||||
|         """ | ||||
|         pass | ||||
|  | ||||
|     def stop(self): | ||||
|         """ | ||||
|         This gets invoked after iteration execution has completed and is endented to help the | ||||
|         device manange any internal supporting functions. | ||||
|  | ||||
|         """ | ||||
|         pass | ||||
|  | ||||
|     def __str__(self): | ||||
|         return 'Device<{}>'.format(self.name) | ||||
|  | ||||
|     __repr__ = __str__ | ||||
|  | ||||
|     def _expand_runtime_parameters(self): | ||||
|         expanded_params = [] | ||||
|         for param in self.runtime_parameters: | ||||
|             if isinstance(param, CoreParameter): | ||||
|                 expanded_params.extend(param.get_runtime_parameters(self.core_names))  # pylint: disable=no-member | ||||
|             else: | ||||
|                 expanded_params.append(param) | ||||
|         return expanded_params | ||||
|  | ||||
|     @contextmanager | ||||
|     def _check_alive(self): | ||||
|         try: | ||||
|             yield | ||||
|         except Exception as e: | ||||
|             self.ping() | ||||
|             raise e | ||||
|  | ||||
							
								
								
									
										75
									
								
								wlauto/core/entry_point.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								wlauto/core/entry_point.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,75 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
| import sys | ||||
| import argparse | ||||
| import logging | ||||
|  | ||||
| from wlauto.core.bootstrap import settings | ||||
| from wlauto.core.extension_loader import ExtensionLoader | ||||
| from wlauto.exceptions import WAError | ||||
| from wlauto.utils.misc import get_traceback | ||||
| from wlauto.utils.log import init_logging | ||||
| from wlauto.utils.cli import init_argument_parser | ||||
| from wlauto.utils.doc import format_body | ||||
|  | ||||
|  | ||||
| import warnings | ||||
| warnings.filterwarnings(action='ignore', category=UserWarning, module='zope') | ||||
|  | ||||
|  | ||||
| logger = logging.getLogger('command_line') | ||||
|  | ||||
|  | ||||
| def load_commands(subparsers): | ||||
|     ext_loader = ExtensionLoader(paths=settings.extension_paths) | ||||
|     for command in ext_loader.list_commands(): | ||||
|         settings.commands[command.name] = ext_loader.get_command(command.name, subparsers=subparsers) | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|     try: | ||||
|         description = ("Execute automated workloads on a remote device and process " | ||||
|                        "the resulting output.\n\nUse \"wa <subcommand> -h\" to see " | ||||
|                        "help for individual subcommands.") | ||||
|         parser = argparse.ArgumentParser(description=format_body(description, 80), | ||||
|                                          prog='wa', | ||||
|                                          formatter_class=argparse.RawDescriptionHelpFormatter, | ||||
|                                          ) | ||||
|         init_argument_parser(parser) | ||||
|         load_commands(parser.add_subparsers(dest='command'))  # each command will add its own subparser | ||||
|         args = parser.parse_args() | ||||
|         settings.verbosity = args.verbose | ||||
|         settings.debug = args.debug | ||||
|         if args.config: | ||||
|             settings.update(args.config) | ||||
|         init_logging(settings.verbosity) | ||||
|  | ||||
|         command = settings.commands[args.command] | ||||
|         sys.exit(command.execute(args)) | ||||
|  | ||||
|     except KeyboardInterrupt: | ||||
|         logging.info('Got CTRL-C. Aborting.') | ||||
|         sys.exit(3) | ||||
|     except WAError, e: | ||||
|         logging.critical(e) | ||||
|         sys.exit(1) | ||||
|     except Exception, e:  # pylint: disable=broad-except | ||||
|         tb = get_traceback() | ||||
|         logging.critical(tb) | ||||
|         logging.critical('{}({})'.format(e.__class__.__name__, e)) | ||||
|         sys.exit(2) | ||||
|  | ||||
							
								
								
									
										798
									
								
								wlauto/core/execution.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										798
									
								
								wlauto/core/execution.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,798 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
| # pylint: disable=no-member | ||||
|  | ||||
| """ | ||||
| This module contains the execution logic for Workload Automation. It defines the | ||||
| following actors: | ||||
|  | ||||
|     WorkloadSpec: Identifies the workload to be run and defines parameters under | ||||
|                   which it should be executed. | ||||
|  | ||||
|     Executor: Responsible for the overall execution process. It instantiates | ||||
|               and/or intialises the other actors, does any necessary vaidation | ||||
|               and kicks off the whole process. | ||||
|  | ||||
|     Execution Context: Provides information about the current state of run | ||||
|                        execution to instrumentation. | ||||
|  | ||||
|     RunInfo: Information about the current run. | ||||
|  | ||||
|     Runner: This executes workload specs that are passed to it. It goes through | ||||
|             stages of execution, emitting an appropriate signal at each step to | ||||
|             allow instrumentation to do its stuff. | ||||
|  | ||||
| """ | ||||
| import os | ||||
| import uuid | ||||
| import logging | ||||
| import subprocess | ||||
| import random | ||||
| from copy import copy | ||||
| from datetime import datetime | ||||
| from contextlib import contextmanager | ||||
| from collections import Counter, defaultdict, OrderedDict | ||||
| from itertools import izip_longest | ||||
|  | ||||
| import wlauto.core.signal as signal | ||||
| from wlauto.core import instrumentation | ||||
| from wlauto.core.bootstrap import settings | ||||
| from wlauto.core.extension import Artifact | ||||
| from wlauto.core.configuration import RunConfiguration | ||||
| from wlauto.core.extension_loader import ExtensionLoader | ||||
| from wlauto.core.resolver import ResourceResolver | ||||
| from wlauto.core.result import ResultManager, IterationResult, RunResult | ||||
| from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError, | ||||
|                                DeviceError, DeviceNotRespondingError) | ||||
| from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, merge_dicts, format_duration | ||||
|  | ||||
|  | ||||
| # The maximum number of reboot attempts for an iteration. | ||||
| MAX_REBOOT_ATTEMPTS = 3 | ||||
|  | ||||
| # If something went wrong during device initialization, wait this | ||||
| # long (in seconds) before retrying. This is necessary, as retrying | ||||
| # immediately may not give the device enough time to recover to be able | ||||
| # to reboot. | ||||
| REBOOT_DELAY = 3 | ||||
|  | ||||
|  | ||||
| class RunInfo(object): | ||||
|     """ | ||||
|     Information about the current run, such as it's unique ID, run | ||||
|     time, etc. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, config): | ||||
|         self.config = config | ||||
|         self.uuid = uuid.uuid4() | ||||
|         self.start_time = None | ||||
|         self.end_time = None | ||||
|         self.duration = None | ||||
|         self.project = config.project | ||||
|         self.project_stage = config.project_stage | ||||
|         self.run_name = config.run_name | ||||
|         self.notes = None | ||||
|         self.device_properties = {} | ||||
|  | ||||
|     def to_dict(self): | ||||
|         d = copy(self.__dict__) | ||||
|         d['uuid'] = str(self.uuid) | ||||
|         del d['config'] | ||||
|         d = merge_dicts(d, self.config.to_dict()) | ||||
|         return d | ||||
|  | ||||
|  | ||||
| class ExecutionContext(object): | ||||
|     """ | ||||
|     Provides a context for instrumentation. Keeps track of things like | ||||
|     current workload and iteration. | ||||
|  | ||||
|     This class also provides two status members that can be used by workloads | ||||
|     and instrumentation to keep track of arbitrary state. ``result`` | ||||
|     is reset on each new iteration of a workload; run_status is maintained | ||||
|     throughout a Workload Automation run. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     # These are the artifacts generated by the core framework. | ||||
|     default_run_artifacts = [ | ||||
|         Artifact('runlog', 'run.log', 'log', mandatory=True, | ||||
|                  description='The log for the entire run.'), | ||||
|     ] | ||||
|  | ||||
|     @property | ||||
|     def current_iteration(self): | ||||
|         if self.current_job: | ||||
|             spec_id = self.current_job.spec.id | ||||
|             return self.job_iteration_counts[spec_id] | ||||
|         else: | ||||
|             return None | ||||
|  | ||||
|     @property | ||||
|     def workload(self): | ||||
|         return getattr(self.spec, 'workload', None) | ||||
|  | ||||
|     @property | ||||
|     def spec(self): | ||||
|         return getattr(self.current_job, 'spec', None) | ||||
|  | ||||
|     @property | ||||
|     def result(self): | ||||
|         return getattr(self.current_job, 'result', None) | ||||
|  | ||||
|     def __init__(self, device, config): | ||||
|         self.device = device | ||||
|         self.config = config | ||||
|         self.reboot_policy = config.reboot_policy | ||||
|         self.output_directory = None | ||||
|         self.current_job = None | ||||
|         self.resolver = None | ||||
|         self.last_error = None | ||||
|         self.run_info = None | ||||
|         self.run_result = None | ||||
|         self.run_output_directory = settings.output_directory | ||||
|         self.host_working_directory = settings.meta_directory | ||||
|         self.iteration_artifacts = None | ||||
|         self.run_artifacts = copy(self.default_run_artifacts) | ||||
|         self.job_iteration_counts = defaultdict(int) | ||||
|         self.aborted = False | ||||
|         if settings.agenda: | ||||
|             self.run_artifacts.append(Artifact('agenda', | ||||
|                                                os.path.join(self.host_working_directory, | ||||
|                                                             os.path.basename(settings.agenda)), | ||||
|                                                'meta', | ||||
|                                                mandatory=True, | ||||
|                                                description='Agenda for this run.')) | ||||
|         for i in xrange(1, settings.config_count + 1): | ||||
|             self.run_artifacts.append(Artifact('config_{}'.format(i), | ||||
|                                                os.path.join(self.host_working_directory, | ||||
|                                                             'config_{}.py'.format(i)), | ||||
|                                                kind='meta', | ||||
|                                                mandatory=True, | ||||
|                                                description='Config file used for the run.')) | ||||
|  | ||||
|     def initialize(self): | ||||
|         if not os.path.isdir(self.run_output_directory): | ||||
|             os.makedirs(self.run_output_directory) | ||||
|         self.output_directory = self.run_output_directory | ||||
|         self.resolver = ResourceResolver(self.config) | ||||
|         self.run_info = RunInfo(self.config) | ||||
|         self.run_result = RunResult(self.run_info) | ||||
|  | ||||
|     def next_job(self, job): | ||||
|         """Invoked by the runner when starting a new iteration of workload execution.""" | ||||
|         self.current_job = job | ||||
|         self.job_iteration_counts[self.spec.id] += 1 | ||||
|         self.current_job.result.iteration = self.current_iteration | ||||
|         if not self.aborted: | ||||
|             outdir_name = '_'.join(map(str, [self.spec.label, self.spec.id, self.current_iteration])) | ||||
|             self.output_directory = _d(os.path.join(self.run_output_directory, outdir_name)) | ||||
|             self.iteration_artifacts = [wa for wa in self.workload.artifacts] | ||||
|  | ||||
|     def end_job(self): | ||||
|         if self.current_job.result.status == IterationResult.ABORTED: | ||||
|             self.aborted = True | ||||
|         self.current_job = None | ||||
|         self.output_directory = self.run_output_directory | ||||
|  | ||||
|     def add_artifact(self, name, path, kind, *args, **kwargs): | ||||
|         if self.current_job is None: | ||||
|             self.add_run_artifact(name, path, kind, *args, **kwargs) | ||||
|         else: | ||||
|             self.add_iteration_artifact(name, path, kind, *args, **kwargs) | ||||
|  | ||||
|     def add_run_artifact(self, name, path, kind, *args, **kwargs): | ||||
|         path = _check_artifact_path(path, self.run_output_directory) | ||||
|         self.run_artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs)) | ||||
|  | ||||
|     def add_iteration_artifact(self, name, path, kind, *args, **kwargs): | ||||
|         path = _check_artifact_path(path, self.output_directory) | ||||
|         self.iteration_artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs)) | ||||
|  | ||||
|     def get_artifact(self, name): | ||||
|         if self.iteration_artifacts: | ||||
|             for art in self.iteration_artifacts: | ||||
|                 if art.name == name: | ||||
|                     return art | ||||
|         for art in self.run_artifacts: | ||||
|             if art.name == name: | ||||
|                 return art | ||||
|         return None | ||||
|  | ||||
|  | ||||
| def _check_artifact_path(path, rootpath): | ||||
|     if path.startswith(rootpath): | ||||
|         return os.path.abspath(path) | ||||
|     rootpath = os.path.abspath(rootpath) | ||||
|     full_path = os.path.join(rootpath, path) | ||||
|     if not os.path.isfile(full_path): | ||||
|         raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path)) | ||||
|     return full_path | ||||
|  | ||||
|  | ||||
| class Executor(object): | ||||
|     """ | ||||
|     The ``Executor``'s job is to set up the execution context and pass to a ``Runner`` | ||||
|     along with a loaded run specification. Once the ``Runner`` has done its thing, | ||||
|     the ``Executor`` performs some final reporint before returning. | ||||
|  | ||||
|     The initial context set up involves combining configuration from various sources, | ||||
|     loading of requided workloads, loading and installation of instruments and result | ||||
|     processors, etc. Static validation of the combined configuration is also performed. | ||||
|  | ||||
|     """ | ||||
|     # pylint: disable=R0915 | ||||
|  | ||||
|     def __init__(self): | ||||
|         self.logger = logging.getLogger('Executor') | ||||
|         self.error_logged = False | ||||
|         self.warning_logged = False | ||||
|         self.config = None | ||||
|         self.ext_loader = None | ||||
|         self.device = None | ||||
|         self.context = None | ||||
|  | ||||
|     def execute(self, agenda, selectors=None):  # NOQA | ||||
|         """ | ||||
|         Execute the run specified by an agenda. Optionally, selectors may be used to only | ||||
|         selecute a subset of the specified agenda. | ||||
|  | ||||
|         Params:: | ||||
|  | ||||
|             :agenda: an ``Agenda`` instance to be executed. | ||||
|             :selectors: A dict mapping selector name to the coresponding values. | ||||
|  | ||||
|         **Selectors** | ||||
|  | ||||
|         Currently, the following seectors are supported: | ||||
|  | ||||
|         ids | ||||
|             The value must be a sequence of workload specfication IDs to be executed. Note | ||||
|             that if sections are specified inthe agenda, the workload specifacation ID will | ||||
|             be a combination of the section and workload IDs. | ||||
|  | ||||
|         """ | ||||
|         signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED) | ||||
|         signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED) | ||||
|  | ||||
|         self.logger.info('Initializing') | ||||
|         self.ext_loader = ExtensionLoader(packages=settings.extension_packages, | ||||
|                                           paths=settings.extension_paths) | ||||
|  | ||||
|         self.logger.debug('Loading run configuration.') | ||||
|         self.config = RunConfiguration(self.ext_loader) | ||||
|         for filepath in settings.get_config_paths(): | ||||
|             self.config.load_config(filepath) | ||||
|         self.config.set_agenda(agenda, selectors) | ||||
|         self.config.finalize() | ||||
|         config_outfile = os.path.join(settings.meta_directory, 'run_config.json') | ||||
|         with open(config_outfile, 'w') as wfh: | ||||
|             self.config.serialize(wfh) | ||||
|  | ||||
|         self.logger.debug('Initialising device configuration.') | ||||
|         if not self.config.device: | ||||
|             raise ConfigError('Make sure a device is specified in the config.') | ||||
|         self.device = self.ext_loader.get_device(self.config.device, **self.config.device_config) | ||||
|         self.device.validate() | ||||
|  | ||||
|         self.context = ExecutionContext(self.device, self.config) | ||||
|  | ||||
|         self.logger.debug('Loading resource discoverers.') | ||||
|         self.context.initialize() | ||||
|         self.context.resolver.load() | ||||
|         self.context.add_artifact('run_config', config_outfile, 'meta') | ||||
|  | ||||
|         self.logger.debug('Installing instrumentation') | ||||
|         for name, params in self.config.instrumentation.iteritems(): | ||||
|             instrument = self.ext_loader.get_instrument(name, self.device, **params) | ||||
|             instrumentation.install(instrument) | ||||
|         instrumentation.validate() | ||||
|  | ||||
|         self.logger.debug('Installing result processors') | ||||
|         result_manager = ResultManager() | ||||
|         for name, params in self.config.result_processors.iteritems(): | ||||
|             processor = self.ext_loader.get_result_processor(name, **params) | ||||
|             result_manager.install(processor) | ||||
|         result_manager.validate() | ||||
|  | ||||
|         self.logger.debug('Loading workload specs') | ||||
|         for workload_spec in self.config.workload_specs: | ||||
|             workload_spec.load(self.device, self.ext_loader) | ||||
|             workload_spec.workload.init_resources(self.context) | ||||
|             workload_spec.workload.validate() | ||||
|  | ||||
|         if self.config.flashing_config: | ||||
|             if not self.device.flasher: | ||||
|                 msg = 'flashing_config specified for {} device that does not support flashing.' | ||||
|                 raise ConfigError(msg.format(self.device.name)) | ||||
|             self.logger.debug('Flashing the device') | ||||
|             self.device.flasher.flash(self.device) | ||||
|  | ||||
|         self.logger.info('Running workloads') | ||||
|         runner = self._get_runner(result_manager) | ||||
|         runner.init_queue(self.config.workload_specs) | ||||
|         runner.run() | ||||
|         self.execute_postamble() | ||||
|  | ||||
|     def execute_postamble(self): | ||||
|         """ | ||||
|         This happens after the run has completed. The overall results of the run are | ||||
|         summarised to the user. | ||||
|  | ||||
|         """ | ||||
|         result = self.context.run_result | ||||
|         counter = Counter() | ||||
|         for ir in result.iteration_results: | ||||
|             counter[ir.status] += 1 | ||||
|         self.logger.info('Done.') | ||||
|         self.logger.info('Run duration: {}'.format(format_duration(self.context.run_info.duration))) | ||||
|         status_summary = 'Ran a total of {} iterations: '.format(sum(self.context.job_iteration_counts.values())) | ||||
|         parts = [] | ||||
|         for status in IterationResult.values: | ||||
|             if status in counter: | ||||
|                 parts.append('{} {}'.format(counter[status], status)) | ||||
|         self.logger.info(status_summary + ', '.join(parts)) | ||||
|         self.logger.info('Results can be found in {}'.format(settings.output_directory)) | ||||
|  | ||||
|         if self.error_logged: | ||||
|             self.logger.warn('There were errors during execution.') | ||||
|             self.logger.warn('Please see {}'.format(settings.log_file)) | ||||
|         elif self.warning_logged: | ||||
|             self.logger.warn('There were warnings during execution.') | ||||
|             self.logger.warn('Please see {}'.format(settings.log_file)) | ||||
|  | ||||
|     def _get_runner(self, result_manager): | ||||
|         if not self.config.execution_order or self.config.execution_order == 'by_iteration': | ||||
|             if self.config.reboot_policy == 'each_spec': | ||||
|                 self.logger.info('each_spec reboot policy with the default by_iteration execution order is ' | ||||
|                                  'equivalent to each_iteration policy.') | ||||
|             runnercls = ByIterationRunner | ||||
|         elif self.config.execution_order in ['classic', 'by_spec']: | ||||
|             runnercls = BySpecRunner | ||||
|         elif self.config.execution_order == 'by_section': | ||||
|             runnercls = BySectionRunner | ||||
|         elif self.config.execution_order == 'random': | ||||
|             runnercls = RandomRunner | ||||
|         else: | ||||
|             raise ConfigError('Unexpected execution order: {}'.format(self.config.execution_order)) | ||||
|         return runnercls(self.device, self.context, result_manager) | ||||
|  | ||||
|     def _error_signalled_callback(self): | ||||
|         self.error_logged = True | ||||
|         signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED) | ||||
|  | ||||
|     def _warning_signalled_callback(self): | ||||
|         self.warning_logged = True | ||||
|         signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED) | ||||
|  | ||||
|  | ||||
| class RunnerJob(object): | ||||
|     """ | ||||
|     Represents a single execution of a ``RunnerJobDescription``. There will be one created for each iteration | ||||
|     specified by ``RunnerJobDescription.number_of_iterations``. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, spec): | ||||
|         self.spec = spec | ||||
|         self.iteration = None | ||||
|         self.result = IterationResult(self.spec) | ||||
|  | ||||
|  | ||||
| class Runner(object): | ||||
|     """ | ||||
|     This class is responsible for actually performing a workload automation | ||||
|     run. The main responsibility of this class is to emit appropriate signals | ||||
|     at the various stages of the run to allow things like traces an other | ||||
|     instrumentation to hook into the process. | ||||
|  | ||||
|     This is an abstract base class that defines each step of the run, but not | ||||
|     the order in which those steps are executed, which is left to the concrete | ||||
|     derived classes. | ||||
|  | ||||
|     """ | ||||
|     class _RunnerError(Exception): | ||||
|         """Internal runner error.""" | ||||
|         pass | ||||
|  | ||||
|     @property | ||||
|     def current_job(self): | ||||
|         if self.job_queue: | ||||
|             return self.job_queue[0] | ||||
|         return None | ||||
|  | ||||
|     @property | ||||
|     def previous_job(self): | ||||
|         if self.completed_jobs: | ||||
|             return self.completed_jobs[-1] | ||||
|         return None | ||||
|  | ||||
|     @property | ||||
|     def next_job(self): | ||||
|         if self.job_queue: | ||||
|             if len(self.job_queue) > 1: | ||||
|                 return self.job_queue[1] | ||||
|         return None | ||||
|  | ||||
|     @property | ||||
|     def spec_changed(self): | ||||
|         if self.previous_job is None and self.current_job is not None:  # Start of run | ||||
|             return True | ||||
|         if self.previous_job is not None and self.current_job is None:  # End of run | ||||
|             return True | ||||
|         return self.current_job.spec.id != self.previous_job.spec.id | ||||
|  | ||||
|     @property | ||||
|     def spec_will_change(self): | ||||
|         if self.current_job is None and self.next_job is not None:  # Start of run | ||||
|             return True | ||||
|         if self.current_job is not None and self.next_job is None:  # End of run | ||||
|             return True | ||||
|         return self.current_job.spec.id != self.next_job.spec.id | ||||
|  | ||||
|     def __init__(self, device, context, result_manager): | ||||
|         self.device = device | ||||
|         self.context = context | ||||
|         self.result_manager = result_manager | ||||
|         self.logger = logging.getLogger('Runner') | ||||
|         self.job_queue = [] | ||||
|         self.completed_jobs = [] | ||||
|         self._initial_reset = True | ||||
|  | ||||
|     def init_queue(self, specs): | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def run(self):  # pylint: disable=too-many-branches | ||||
|         self._send(signal.RUN_START) | ||||
|         self._initialize_run() | ||||
|  | ||||
|         try: | ||||
|             while self.job_queue: | ||||
|                 try: | ||||
|                     self._init_job() | ||||
|                     self._run_job() | ||||
|                 except KeyboardInterrupt: | ||||
|                     self.current_job.result.status = IterationResult.ABORTED | ||||
|                     raise | ||||
|                 except Exception, e:  # pylint: disable=broad-except | ||||
|                     self.current_job.result.status = IterationResult.FAILED | ||||
|                     self.current_job.result.add_event(e.message) | ||||
|                     if isinstance(e, DeviceNotRespondingError): | ||||
|                         self.logger.info('Device appears to be unresponsive.') | ||||
|                         if self.context.reboot_policy.can_reboot and self.device.can('reset_power'): | ||||
|                             self.logger.info('Attempting to hard-reset the device...') | ||||
|                             try: | ||||
|                                 self.device.hard_reset() | ||||
|                                 self.device.connect() | ||||
|                             except DeviceError:  # hard_boot not implemented for the device. | ||||
|                                 raise e | ||||
|                         else: | ||||
|                             raise e | ||||
|                     else:  # not a DeviceNotRespondingError | ||||
|                         self.logger.error(e) | ||||
|                 finally: | ||||
|                     self._finalize_job() | ||||
|         except KeyboardInterrupt: | ||||
|             self.logger.info('Got CTRL-C. Finalizing run... (CTRL-C again to abort).') | ||||
|             # Skip through the remaining jobs. | ||||
|             while self.job_queue: | ||||
|                 self.context.next_job(self.current_job) | ||||
|                 self.current_job.result.status = IterationResult.ABORTED | ||||
|                 self._finalize_job() | ||||
|         except DeviceNotRespondingError: | ||||
|             self.logger.info('Device unresponsive and recovery not possible. Skipping the rest of the run.') | ||||
|             self.context.aborted = True | ||||
|             while self.job_queue: | ||||
|                 self.context.next_job(self.current_job) | ||||
|                 self.current_job.result.status = IterationResult.SKIPPED | ||||
|                 self._finalize_job() | ||||
|  | ||||
|         instrumentation.enable_all() | ||||
|         self._finalize_run() | ||||
|         self._process_results() | ||||
|  | ||||
|         self.result_manager.finalize(self.context) | ||||
|         self._send(signal.RUN_END) | ||||
|  | ||||
|     def _initialize_run(self): | ||||
|         self.context.run_info.start_time = datetime.utcnow() | ||||
|         if self.context.reboot_policy.perform_initial_boot: | ||||
|             self.logger.info('\tBooting device') | ||||
|             with self._signal_wrap('INITIAL_BOOT'): | ||||
|                 self._reboot_device() | ||||
|         else: | ||||
|             self.logger.info('Connecting to device') | ||||
|             self.device.connect() | ||||
|         self.logger.info('Initializing device') | ||||
|         self.device.initialize(self.context) | ||||
|  | ||||
|         props = self.device.get_properties(self.context) | ||||
|         self.context.run_info.device_properties = props | ||||
|         self.result_manager.initialize(self.context) | ||||
|         self._send(signal.RUN_INIT) | ||||
|  | ||||
|         if instrumentation.check_failures(): | ||||
|             raise InstrumentError('Detected failure(s) during instrumentation initialization.') | ||||
|  | ||||
|     def _init_job(self): | ||||
|         self.current_job.result.status = IterationResult.RUNNING | ||||
|         self.context.next_job(self.current_job) | ||||
|  | ||||
|     def _run_job(self):   # pylint: disable=too-many-branches | ||||
|         spec = self.current_job.spec | ||||
|         if not spec.enabled: | ||||
|             self.logger.info('Skipping workload %s (iteration %s)', spec, self.context.current_iteration) | ||||
|             self.current_job.result.status = IterationResult.SKIPPED | ||||
|             return | ||||
|  | ||||
|         self.logger.info('Running workload %s (iteration %s)', spec, self.context.current_iteration) | ||||
|         if spec.flash: | ||||
|             if not self.context.reboot_policy.can_reboot: | ||||
|                 raise ConfigError('Cannot flash as reboot_policy does not permit rebooting.') | ||||
|             if not self.device.can('flash'): | ||||
|                 raise DeviceError('Device does not support flashing.') | ||||
|             self._flash_device(spec.flash) | ||||
|         elif not self.completed_jobs: | ||||
|             # Never reboot on the very fist job of a run, as we would have done | ||||
|             # the initial reboot if a reboot was needed. | ||||
|             pass | ||||
|         elif self.context.reboot_policy.reboot_on_each_spec and self.spec_changed: | ||||
|             self.logger.debug('Rebooting on spec change.') | ||||
|             self._reboot_device() | ||||
|         elif self.context.reboot_policy.reboot_on_each_iteration: | ||||
|             self.logger.debug('Rebooting on iteration.') | ||||
|             self._reboot_device() | ||||
|  | ||||
|         instrumentation.disable_all() | ||||
|         instrumentation.enable(spec.instrumentation) | ||||
|         self.device.start() | ||||
|  | ||||
|         if self.spec_changed: | ||||
|             self._send(signal.WORKLOAD_SPEC_START) | ||||
|         self._send(signal.ITERATION_START) | ||||
|  | ||||
|         try: | ||||
|             setup_ok = False | ||||
|             with self._handle_errors('Setting up device parameters'): | ||||
|                 self.device.set_runtime_parameters(spec.runtime_parameters) | ||||
|                 setup_ok = True | ||||
|  | ||||
|             if setup_ok: | ||||
|                 with self._handle_errors('running {}'.format(spec.workload.name)): | ||||
|                     self.current_job.result.status = IterationResult.RUNNING | ||||
|                     self._run_workload_iteration(spec.workload) | ||||
|             else: | ||||
|                 self.logger.info('\tSkipping the rest of the iterations for this spec.') | ||||
|                 spec.enabled = False | ||||
|         except KeyboardInterrupt: | ||||
|             self._send(signal.ITERATION_END) | ||||
|             self._send(signal.WORKLOAD_SPEC_END) | ||||
|             raise | ||||
|         else: | ||||
|             self._send(signal.ITERATION_END) | ||||
|             if self.spec_will_change or not spec.enabled: | ||||
|                 self._send(signal.WORKLOAD_SPEC_END) | ||||
|         finally: | ||||
|             self.device.stop() | ||||
|  | ||||
|     def _finalize_job(self): | ||||
|         self.context.run_result.iteration_results.append(self.current_job.result) | ||||
|         self.job_queue[0].iteration = self.context.current_iteration | ||||
|         self.completed_jobs.append(self.job_queue.pop(0)) | ||||
|         self.context.end_job() | ||||
|  | ||||
|     def _finalize_run(self): | ||||
|         self.logger.info('Finalizing.') | ||||
|         self._send(signal.RUN_FIN) | ||||
|  | ||||
|         with self._handle_errors('Disconnecting from the device'): | ||||
|             self.device.disconnect() | ||||
|  | ||||
|         info = self.context.run_info | ||||
|         info.end_time = datetime.utcnow() | ||||
|         info.duration = info.end_time - info.start_time | ||||
|  | ||||
|     def _process_results(self): | ||||
|         self.logger.info('Processing overall results') | ||||
|         with self._signal_wrap('OVERALL_RESULTS_PROCESSING'): | ||||
|             if instrumentation.check_failures(): | ||||
|                 self.context.run_result.non_iteration_errors = True | ||||
|             self.result_manager.process_run_result(self.context.run_result, self.context) | ||||
|  | ||||
|     def _run_workload_iteration(self, workload): | ||||
|         self.logger.info('\tSetting up') | ||||
|         with self._signal_wrap('WORKLOAD_SETUP'): | ||||
|             try: | ||||
|                 workload.setup(self.context) | ||||
|             except: | ||||
|                 self.logger.info('\tSkipping the rest of the iterations for this spec.') | ||||
|                 self.current_job.spec.enabled = False | ||||
|                 raise | ||||
|         try: | ||||
|  | ||||
|             self.logger.info('\tExecuting') | ||||
|             with self._handle_errors('Running workload'): | ||||
|                 with self._signal_wrap('WORKLOAD_EXECUTION'): | ||||
|                     workload.run(self.context) | ||||
|  | ||||
|             self.logger.info('\tProcessing result') | ||||
|             self._send(signal.BEFORE_WORKLOAD_RESULT_UPDATE) | ||||
|             try: | ||||
|                 if self.current_job.result.status != IterationResult.FAILED: | ||||
|                     with self._handle_errors('Processing workload result', | ||||
|                                              on_error_status=IterationResult.PARTIAL): | ||||
|                         workload.update_result(self.context) | ||||
|                         self._send(signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE) | ||||
|  | ||||
|                 if self.current_job.result.status == IterationResult.RUNNING: | ||||
|                     self.current_job.result.status = IterationResult.OK | ||||
|             finally: | ||||
|                 self._send(signal.AFTER_WORKLOAD_RESULT_UPDATE) | ||||
|  | ||||
|         finally: | ||||
|             self.logger.info('\tTearing down') | ||||
|             with self._handle_errors('Tearing down workload', | ||||
|                                      on_error_status=IterationResult.NONCRITICAL): | ||||
|                 with self._signal_wrap('WORKLOAD_TEARDOWN'): | ||||
|                     workload.teardown(self.context) | ||||
|             self.result_manager.add_result(self.current_job.result, self.context) | ||||
|  | ||||
|     def _flash_device(self, flashing_params): | ||||
|         with self._signal_wrap('FLASHING'): | ||||
|             self.device.flash(**flashing_params) | ||||
|             self.device.connect() | ||||
|  | ||||
|     def _reboot_device(self): | ||||
|         with self._signal_wrap('BOOT'): | ||||
|             for reboot_attempts in xrange(MAX_REBOOT_ATTEMPTS): | ||||
|                 if reboot_attempts: | ||||
|                     self.logger.info('\tRetrying...') | ||||
|                 with self._handle_errors('Rebooting device'): | ||||
|                     self.device.boot(**self.current_job.spec.boot_parameters) | ||||
|                     break | ||||
|             else: | ||||
|                 raise DeviceError('Could not reboot device; max reboot attempts exceeded.') | ||||
|             self.device.connect() | ||||
|  | ||||
|     def _send(self, s): | ||||
|         signal.send(s, self, self.context) | ||||
|  | ||||
|     def _take_screenshot(self, filename): | ||||
|         if self.context.output_directory: | ||||
|             filepath = os.path.join(self.context.output_directory, filename) | ||||
|         else: | ||||
|             filepath = os.path.join(settings.output_directory, filename) | ||||
|         self.device.capture_screen(filepath) | ||||
|  | ||||
|     @contextmanager | ||||
|     def _handle_errors(self, action, on_error_status=IterationResult.FAILED): | ||||
|         try: | ||||
|             if action is not None: | ||||
|                 self.logger.debug(action) | ||||
|             yield | ||||
|         except (KeyboardInterrupt, DeviceNotRespondingError): | ||||
|             raise | ||||
|         except (WAError, TimeoutError), we: | ||||
|             self.device.ping() | ||||
|             if self.current_job: | ||||
|                 self.current_job.result.status = on_error_status | ||||
|                 self.current_job.result.add_event(str(we)) | ||||
|             try: | ||||
|                 self._take_screenshot('error.png') | ||||
|             except Exception, e:  # pylint: disable=W0703 | ||||
|                 # We're already in error state, so the fact that taking a | ||||
|                 # screenshot failed is not surprising... | ||||
|                 pass | ||||
|             if action: | ||||
|                 action = action[0].lower() + action[1:] | ||||
|             self.logger.error('Error while {}:\n\t{}'.format(action, we)) | ||||
|         except Exception, e:  # pylint: disable=W0703 | ||||
|             error_text = '{}("{}")'.format(e.__class__.__name__, e) | ||||
|             if self.current_job: | ||||
|                 self.current_job.result.status = on_error_status | ||||
|                 self.current_job.result.add_event(error_text) | ||||
|             self.logger.error('Error while {}'.format(action)) | ||||
|             self.logger.error(error_text) | ||||
|             if isinstance(e, subprocess.CalledProcessError): | ||||
|                 self.logger.error('Got:') | ||||
|                 self.logger.error(e.output) | ||||
|             tb = get_traceback() | ||||
|             self.logger.error(tb) | ||||
|  | ||||
|     @contextmanager | ||||
|     def _signal_wrap(self, signal_name): | ||||
|         """Wraps the suite in before/after signals, ensuring | ||||
|         that after signal is always sent.""" | ||||
|         before_signal = getattr(signal, 'BEFORE_' + signal_name) | ||||
|         success_signal = getattr(signal, 'SUCCESSFUL_' + signal_name) | ||||
|         after_signal = getattr(signal, 'AFTER_' + signal_name) | ||||
|         try: | ||||
|             self._send(before_signal) | ||||
|             yield | ||||
|             self._send(success_signal) | ||||
|         finally: | ||||
|             self._send(after_signal) | ||||
|  | ||||
|  | ||||
| class BySpecRunner(Runner): | ||||
|     """ | ||||
|     This is that "classic" implementation that executes all iterations of a workload | ||||
|     spec before proceeding onto the next spec. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def init_queue(self, specs): | ||||
|         jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]  # pylint: disable=unused-variable | ||||
|         self.job_queue = [j for spec_jobs in jobs for j in spec_jobs] | ||||
|  | ||||
|  | ||||
| class BySectionRunner(Runner): | ||||
|     """ | ||||
|     Runs the first iteration for all benchmarks first, before proceeding to the next iteration, | ||||
|     i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2, C1, C2... | ||||
|  | ||||
|     If multiple sections where specified in the agenda, this will run all specs for the first section | ||||
|     followed by all specs for the seciod section, etc. | ||||
|  | ||||
|     e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run | ||||
|  | ||||
|     X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2 | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def init_queue(self, specs): | ||||
|         jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] | ||||
|         self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j] | ||||
|  | ||||
|  | ||||
| class ByIterationRunner(Runner): | ||||
|     """ | ||||
|     Runs the first iteration for all benchmarks first, before proceeding to the next iteration, | ||||
|     i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2, C1, C2... | ||||
|  | ||||
|     If multiple sections where specified in the agenda, this will run all sections for the first global | ||||
|     spec first, followed by all sections for the second spec, etc. | ||||
|  | ||||
|     e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run | ||||
|  | ||||
|     X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2 | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def init_queue(self, specs): | ||||
|         sections = OrderedDict() | ||||
|         for s in specs: | ||||
|             if s.section_id not in sections: | ||||
|                 sections[s.section_id] = [] | ||||
|             sections[s.section_id].append(s) | ||||
|         specs = [s for section_specs in izip_longest(*sections.values()) for s in section_specs if s] | ||||
|         jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] | ||||
|         self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j] | ||||
|  | ||||
|  | ||||
| class RandomRunner(Runner): | ||||
|     """ | ||||
|     This will run specs in a random order. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def init_queue(self, specs): | ||||
|         jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]  # pylint: disable=unused-variable | ||||
|         all_jobs = [j for spec_jobs in jobs for j in spec_jobs] | ||||
|         random.shuffle(all_jobs) | ||||
|         self.job_queue = all_jobs | ||||
							
								
								
									
										652
									
								
								wlauto/core/extension.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										652
									
								
								wlauto/core/extension.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,652 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
| # pylint: disable=E1101 | ||||
| import os | ||||
| import logging | ||||
| import inspect | ||||
| from copy import copy | ||||
| from collections import OrderedDict | ||||
|  | ||||
| from wlauto.core.bootstrap import settings | ||||
| from wlauto.exceptions import ValidationError, ConfigError | ||||
| from wlauto.utils.misc import isiterable, ensure_directory_exists as _d, get_article | ||||
| from wlauto.utils.types import identifier | ||||
|  | ||||
|  | ||||
| class AttributeCollection(object): | ||||
|     """ | ||||
|     Accumulator for extension attribute objects (such as Parameters or Artifacts). This will | ||||
|     replace any class member list accumulating such attributes through the magic of | ||||
|     metaprogramming\ [*]_. | ||||
|  | ||||
|     .. [*] which is totally safe and not going backfire in any way... | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     @property | ||||
|     def values(self): | ||||
|         return self._attrs.values() | ||||
|  | ||||
|     def __init__(self, attrcls): | ||||
|         self._attrcls = attrcls | ||||
|         self._attrs = OrderedDict() | ||||
|  | ||||
|     def add(self, p): | ||||
|         p = self._to_attrcls(p) | ||||
|         if p.name in self._attrs: | ||||
|             if p.override: | ||||
|                 newp = copy(self._attrs[p.name]) | ||||
|                 for a, v in p.__dict__.iteritems(): | ||||
|                     if v is not None: | ||||
|                         setattr(newp, a, v) | ||||
|                 self._attrs[p.name] = newp | ||||
|             else: | ||||
|                 # Duplicate attribute condition is check elsewhere. | ||||
|                 pass | ||||
|         else: | ||||
|             self._attrs[p.name] = p | ||||
|  | ||||
|     append = add | ||||
|  | ||||
|     def __str__(self): | ||||
|         return 'AC({})'.format(map(str, self._attrs.values())) | ||||
|  | ||||
|     __repr__ = __str__ | ||||
|  | ||||
|     def _to_attrcls(self, p): | ||||
|         if isinstance(p, basestring): | ||||
|             p = self._attrcls(p) | ||||
|         elif isinstance(p, tuple) or isinstance(p, list): | ||||
|             p = self._attrcls(*p) | ||||
|         elif isinstance(p, dict): | ||||
|             p = self._attrcls(**p) | ||||
|         elif not isinstance(p, self._attrcls): | ||||
|             raise ValueError('Invalid parameter value: {}'.format(p)) | ||||
|         if (p.name in self._attrs and not p.override and | ||||
|                 p.name != 'modules'):  # TODO: HACK due to "diamond dependecy" in workloads... | ||||
|             raise ValueError('Attribute {} has already been defined.'.format(p.name)) | ||||
|         return p | ||||
|  | ||||
|     def __iadd__(self, other): | ||||
|         for p in other: | ||||
|             self.add(p) | ||||
|         return self | ||||
|  | ||||
|     def __iter__(self): | ||||
|         return iter(self.values) | ||||
|  | ||||
|     def __contains__(self, p): | ||||
|         return p in self._attrs | ||||
|  | ||||
|     def __getitem__(self, i): | ||||
|         return self._attrs[i] | ||||
|  | ||||
|     def __len__(self): | ||||
|         return len(self._attrs) | ||||
|  | ||||
|  | ||||
| class AliasCollection(AttributeCollection): | ||||
|  | ||||
|     def __init__(self): | ||||
|         super(AliasCollection, self).__init__(Alias) | ||||
|  | ||||
|     def _to_attrcls(self, p): | ||||
|         if isinstance(p, tuple) or isinstance(p, list): | ||||
|             # must be in the form (name, {param: value, ...}) | ||||
|             p = self._attrcls(p[1], **p[1]) | ||||
|         elif not isinstance(p, self._attrcls): | ||||
|             raise ValueError('Invalid parameter value: {}'.format(p)) | ||||
|         if p.name in self._attrs: | ||||
|             raise ValueError('Attribute {} has already been defined.'.format(p.name)) | ||||
|         return p | ||||
|  | ||||
|  | ||||
| class ListCollection(list): | ||||
|  | ||||
|     def __init__(self, attrcls):  # pylint: disable=unused-argument | ||||
|         super(ListCollection, self).__init__() | ||||
|  | ||||
|  | ||||
| class Param(object): | ||||
|     """ | ||||
|     This is a generic parameter for an extension. Extensions instantiate this to declare which parameters | ||||
|     are supported. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, name, kind=None, mandatory=None, default=None, override=False, | ||||
|                  allowed_values=None, description=None, constraint=None, global_alias=None): | ||||
|         """ | ||||
|         Create a new Parameter object. | ||||
|  | ||||
|         :param name: The name of the parameter. This will become an instance member of the | ||||
|                      extension object to which the parameter is applied, so it must be a valid | ||||
|                      python  identifier. This is the only mandatory parameter. | ||||
|         :param kind: The type of parameter this is. This must be a callable that takes an arbitrary | ||||
|                      object and converts it to the expected type, or raised ``ValueError`` if such | ||||
|                      conversion is not possible. Most Python standard types -- ``str``, ``int``, ``bool``, etc. -- | ||||
|                      can be used here (though for ``bool``, ``wlauto.utils.misc.as_bool`` is preferred | ||||
|                      as it intuitively handles strings like ``'false'``). This defaults to ``str`` if | ||||
|                      not specified. | ||||
|         :param mandatory: If set to ``True``, then a non-``None`` value for this parameter *must* be | ||||
|                           provided on extension object construction, otherwise ``ConfigError`` will be | ||||
|                           raised. | ||||
|         :param default: The default value for this parameter. If no value is specified on extension | ||||
|                         construction, this value will be used instead. (Note: if this is specified and | ||||
|                         is not ``None``, then ``mandatory`` parameter will be ignored). | ||||
|         :param override: A ``bool`` that specifies whether a parameter of the same name further up the | ||||
|                          hierarchy should be overridden. If this is ``False`` (the default), an exception | ||||
|                          will be raised by the ``AttributeCollection`` instead. | ||||
|         :param allowed_values: This should be the complete list of allowed values for this parameter. | ||||
|                                Note: ``None`` value will always be allowed, even if it is not in this list. | ||||
|                                If you want to disallow ``None``, set ``mandatory`` to ``True``. | ||||
|         :param constraint: If specified, this must be a callable that takes the parameter value | ||||
|                            as an argument and return a boolean indicating whether the constraint | ||||
|                            has been satisfied. Alternatively, can be a two-tuple with said callable as | ||||
|                            the first element and a string describing the constraint as the second. | ||||
|         :param global_alias: This is an alternative alias for this parameter, unlike the name, this | ||||
|                              alias will not be namespaced under the owning extension's name (hence the | ||||
|                              global part). This is introduced primarily for backward compatibility -- so | ||||
|                              that old extension settings names still work. This should not be used for | ||||
|                              new parameters. | ||||
|  | ||||
|         """ | ||||
|         self.name = identifier(name) | ||||
|         if kind is not None and not callable(kind): | ||||
|             raise ValueError('Kind must be callable.') | ||||
|         self.kind = kind | ||||
|         self.mandatory = mandatory | ||||
|         self.default = default | ||||
|         self.override = override | ||||
|         self.allowed_values = allowed_values | ||||
|         self.description = description | ||||
|         if self.kind is None and not self.override: | ||||
|             self.kind = str | ||||
|         if constraint is not None and not callable(constraint) and not isinstance(constraint, tuple): | ||||
|             raise ValueError('Constraint must be callable or a (callable, str) tuple.') | ||||
|         self.constraint = constraint | ||||
|         self.global_alias = global_alias | ||||
|  | ||||
|     def set_value(self, obj, value=None): | ||||
|         if value is None: | ||||
|             if self.default is not None: | ||||
|                 value = self.default | ||||
|             elif self.mandatory: | ||||
|                 msg = 'No values specified for mandatory parameter {} in {}' | ||||
|                 raise ConfigError(msg.format(self.name, obj.name)) | ||||
|         else: | ||||
|             try: | ||||
|                 value = self.kind(value) | ||||
|             except (ValueError, TypeError): | ||||
|                 typename = self.get_type_name() | ||||
|                 msg = 'Bad value "{}" for {}; must be {} {}' | ||||
|                 article = get_article(typename) | ||||
|                 raise ConfigError(msg.format(value, self.name, article, typename)) | ||||
|         current_value = getattr(obj, self.name, None) | ||||
|         if current_value is None: | ||||
|             setattr(obj, self.name, value) | ||||
|         elif not isiterable(current_value): | ||||
|             setattr(obj, self.name, value) | ||||
|         else: | ||||
|             new_value = current_value + [value] | ||||
|             setattr(obj, self.name, new_value) | ||||
|  | ||||
|     def validate(self, obj): | ||||
|         value = getattr(obj, self.name, None) | ||||
|         if value is not None: | ||||
|             if self.allowed_values: | ||||
|                 self._validate_allowed_values(obj, value) | ||||
|             if self.constraint: | ||||
|                 self._validate_constraint(obj, value) | ||||
|         else: | ||||
|             if self.mandatory: | ||||
|                 msg = 'No value specified for mandatory parameter {} in {}.' | ||||
|                 raise ConfigError(msg.format(self.name, obj.name)) | ||||
|  | ||||
|     def get_type_name(self): | ||||
|         typename = str(self.kind) | ||||
|         if '\'' in typename: | ||||
|             typename = typename.split('\'')[1] | ||||
|         elif typename.startswith('<function'): | ||||
|             typename = typename.split()[1] | ||||
|         return typename | ||||
|  | ||||
|     def _validate_allowed_values(self, obj, value): | ||||
|         if 'list' in str(self.kind): | ||||
|             for v in value: | ||||
|                 if v not in self.allowed_values: | ||||
|                     msg = 'Invalid value {} for {} in {}; must be in {}' | ||||
|                     raise ConfigError(msg.format(v, self.name, obj.name, self.allowed_values)) | ||||
|         else: | ||||
|             if value not in self.allowed_values: | ||||
|                 msg = 'Invalid value {} for {} in {}; must be in {}' | ||||
|                 raise ConfigError(msg.format(value, self.name, obj.name, self.allowed_values)) | ||||
|  | ||||
|     def _validate_constraint(self, obj, value): | ||||
|         msg_vals = {'value': value, 'param': self.name, 'extension': obj.name} | ||||
|         if isinstance(self.constraint, tuple) and len(self.constraint) == 2: | ||||
|             constraint, msg = self.constraint  # pylint: disable=unpacking-non-sequence | ||||
|         elif callable(self.constraint): | ||||
|             constraint = self.constraint | ||||
|             msg = '"{value}" failed constraint validation for {param} in {extension}.' | ||||
|         else: | ||||
|             raise ValueError('Invalid constraint for {}: must be callable or a 2-tuple'.format(self.name)) | ||||
|         if not constraint(value): | ||||
|             raise ConfigError(value, msg.format(**msg_vals)) | ||||
|  | ||||
|     def __repr__(self): | ||||
|         d = copy(self.__dict__) | ||||
|         del d['description'] | ||||
|         return 'Param({})'.format(d) | ||||
|  | ||||
|     __str__ = __repr__ | ||||
|  | ||||
|  | ||||
| Parameter = Param | ||||
|  | ||||
|  | ||||
| class Artifact(object): | ||||
|     """ | ||||
|     This is an artifact generated during execution/post-processing of a workload. | ||||
|     Unlike metrics, this represents an actual artifact, such as a file, generated. | ||||
|     This may be "result", such as trace, or it could be "meta data" such as logs. | ||||
|     These are distinguished using the ``kind`` attribute, which also helps WA decide | ||||
|     how it should be handled. Currently supported kinds are: | ||||
|  | ||||
|         :log: A log file. Not part of "results" as such but contains information about the | ||||
|               run/workload execution that be useful for diagnostics/meta analysis. | ||||
|         :meta: A file containing metadata. This is not part of "results", but contains | ||||
|                information that may be necessary to reproduce the results (contrast with | ||||
|                ``log`` artifacts which are *not* necessary). | ||||
|         :data: This file contains new data, not available otherwise and should be considered | ||||
|                part of the "results" generated by WA. Most traces would fall into this category. | ||||
|         :export: Exported version of results or some other artifact. This signifies that | ||||
|                  this artifact does not contain any new data that is not available | ||||
|                  elsewhere and that it may be safely discarded without losing information. | ||||
|         :raw: Signifies that this is a raw dump/log that is normally processed to extract | ||||
|               useful information and is then discarded. In a sense, it is the opposite of | ||||
|               ``export``, but in general may also be discarded. | ||||
|  | ||||
|               .. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on | ||||
|                         how important it is to preserve this file, e.g. when archiving, vs | ||||
|                         how much space it takes up. Unlike ``export`` artifacts which are | ||||
|                         (almost) always ignored by other exporters as that would never result | ||||
|                         in data loss, ``raw`` files *may* be processed by exporters if they | ||||
|                         decided that the risk of losing potentially (though unlikely) useful | ||||
|                         data is greater than the time/space cost of handling the artifact (e.g. | ||||
|                         a database uploader may choose to ignore ``raw`` artifacts, where as a | ||||
|                         network filer archiver may choose to archive them). | ||||
|  | ||||
|         .. note: The kind parameter is intended to represent the logical function of a particular | ||||
|                  artifact, not it's intended means of processing -- this is left entirely up to the | ||||
|                  result processors. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     RUN = 'run' | ||||
|     ITERATION = 'iteration' | ||||
|  | ||||
|     valid_kinds = ['log', 'meta', 'data', 'export', 'raw'] | ||||
|  | ||||
|     def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None): | ||||
|         """" | ||||
|         :param name: Name that uniquely identifies this artifact. | ||||
|         :param path: The *relative* path of the artifact. Depending on the ``level`` | ||||
|                      must be either relative to the run or iteration output directory. | ||||
|                      Note: this path *must* be delimited using ``/`` irrespective of the | ||||
|                      operating system. | ||||
|         :param kind: The type of the artifact this is (e.g. log file, result, etc.) this | ||||
|                      will be used a hit to result processors. This must be one of ``'log'``, | ||||
|                      ``'meta'``, ``'data'``, ``'export'``, ``'raw'``. | ||||
|         :param level: The level at which the artifact will be generated. Must be either | ||||
|                       ``'iteration'`` or ``'run'``. | ||||
|         :param mandatory: Boolean value indicating whether this artifact must be present | ||||
|                           at the end of result processing for its level. | ||||
|         :param description: A free-form description of what this artifact is. | ||||
|  | ||||
|         """ | ||||
|         if kind not in self.valid_kinds: | ||||
|             raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds)) | ||||
|         self.name = name | ||||
|         self.path = path.replace('/', os.sep) if path is not None else path | ||||
|         self.kind = kind | ||||
|         self.level = level | ||||
|         self.mandatory = mandatory | ||||
|         self.description = description | ||||
|  | ||||
|     def exists(self, context): | ||||
|         """Returns ``True`` if artifact exists within the specified context, and | ||||
|         ``False`` otherwise.""" | ||||
|         fullpath = os.path.join(context.output_directory, self.path) | ||||
|         return os.path.exists(fullpath) | ||||
|  | ||||
|     def to_dict(self): | ||||
|         return copy(self.__dict__) | ||||
|  | ||||
|  | ||||
| class Alias(object): | ||||
|     """ | ||||
|     This represents a configuration alias for an extension, mapping an alternative name to | ||||
|     a set of parameter values, effectively providing an alternative set of default values. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, name, **kwargs): | ||||
|         self.name = name | ||||
|         self.params = kwargs | ||||
|         self.extension_name = None  # gets set by the MetaClass | ||||
|  | ||||
|     def validate(self, ext): | ||||
|         ext_params = set(p.name for p in ext.parameters) | ||||
|         for param in self.params: | ||||
|             if param not in ext_params: | ||||
|                 # Raising config error because aliases might have come through | ||||
|                 # the config. | ||||
|                 msg = 'Parameter {} (defined in alias {}) is invalid for {}' | ||||
|                 raise ConfigError(msg.format(param, self.name, ext.name)) | ||||
|  | ||||
|  | ||||
| class ExtensionMeta(type): | ||||
|     """ | ||||
|     This basically adds some magic to extensions to make implementing new extensions, such as | ||||
|     workloads less complicated. | ||||
|  | ||||
|     It ensures that certain class attributes (specified by the ``to_propagate`` | ||||
|     attribute of the metaclass) get propagated down the inheritance hierarchy. The assumption | ||||
|     is that the values of the attributes specified in the class are iterable; if that is not met, | ||||
|     Bad Things (tm) will happen. | ||||
|  | ||||
|     This also provides virtual method implementation, similar to those in C-derived OO languages, | ||||
|     and alias specifications. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     to_propagate = [ | ||||
|         ('parameters', Parameter, AttributeCollection), | ||||
|         ('artifacts', Artifact, AttributeCollection), | ||||
|         ('core_modules', str, ListCollection), | ||||
|     ] | ||||
|  | ||||
|     virtual_methods = ['validate'] | ||||
|  | ||||
|     def __new__(mcs, clsname, bases, attrs): | ||||
|         mcs._propagate_attributes(bases, attrs) | ||||
|         cls = type.__new__(mcs, clsname, bases, attrs) | ||||
|         mcs._setup_aliases(cls) | ||||
|         mcs._implement_virtual(cls, bases) | ||||
|         return cls | ||||
|  | ||||
|     @classmethod | ||||
|     def _propagate_attributes(mcs, bases, attrs): | ||||
|         """ | ||||
|         For attributes specified by to_propagate, their values will be a union of | ||||
|         that specified for cls and it's bases (cls values overriding those of bases | ||||
|         in case of conflicts). | ||||
|  | ||||
|         """ | ||||
|         for prop_attr, attr_cls, attr_collector_cls in mcs.to_propagate: | ||||
|             should_propagate = False | ||||
|             propagated = attr_collector_cls(attr_cls) | ||||
|             for base in bases: | ||||
|                 if hasattr(base, prop_attr): | ||||
|                     propagated += getattr(base, prop_attr) or [] | ||||
|                     should_propagate = True | ||||
|             if prop_attr in attrs: | ||||
|                 propagated += attrs[prop_attr] or [] | ||||
|                 should_propagate = True | ||||
|             if should_propagate: | ||||
|                 attrs[prop_attr] = propagated | ||||
|  | ||||
|     @classmethod | ||||
|     def _setup_aliases(mcs, cls): | ||||
|         if hasattr(cls, 'aliases'): | ||||
|             aliases, cls.aliases = cls.aliases, AliasCollection() | ||||
|             for alias in aliases: | ||||
|                 if isinstance(alias, basestring): | ||||
|                     alias = Alias(alias) | ||||
|                 alias.validate(cls) | ||||
|                 alias.extension_name = cls.name | ||||
|                 cls.aliases.add(alias) | ||||
|  | ||||
|     @classmethod | ||||
|     def _implement_virtual(mcs, cls, bases): | ||||
|         """ | ||||
|         This implements automatic method propagation to the bases, so | ||||
|         that you don't have to do something like | ||||
|  | ||||
|             super(cls, self).vmname() | ||||
|  | ||||
|         .. note:: current implementation imposes a restriction in that | ||||
|                   parameters into the function *must* be passed as keyword | ||||
|                   arguments. There *must not* be positional arguments on | ||||
|                   virutal method invocation. | ||||
|  | ||||
|         """ | ||||
|         methods = {} | ||||
|         for vmname in mcs.virtual_methods: | ||||
|             clsmethod = getattr(cls, vmname, None) | ||||
|             if clsmethod: | ||||
|                 basemethods = [getattr(b, vmname) for b in bases if hasattr(b, vmname)] | ||||
|                 methods[vmname] = [bm for bm in basemethods if bm != clsmethod] | ||||
|                 methods[vmname].append(clsmethod) | ||||
|  | ||||
|                 def wrapper(self, __name=vmname, **kwargs): | ||||
|                     for dm in methods[__name]: | ||||
|                         dm(self, **kwargs) | ||||
|  | ||||
|                 setattr(cls, vmname, wrapper) | ||||
|  | ||||
|  | ||||
| class Extension(object): | ||||
|     """ | ||||
|     Base class for all WA extensions. An extension is basically a plug-in. | ||||
|     It extends the functionality of WA in some way. Extensions are discovered | ||||
|     and loaded dynamically by the extension loader upon invocation of WA scripts. | ||||
|     Adding an extension is a matter of placing a class that implements an appropriate | ||||
|     interface somewhere it would be discovered by the loader. That "somewhere" is | ||||
|     typically one of the extension subdirectories under ``~/.workload_automation/``. | ||||
|  | ||||
|     """ | ||||
|     __metaclass__ = ExtensionMeta | ||||
|  | ||||
|     kind = None | ||||
|     name = None | ||||
|     parameters = [ | ||||
|         Parameter('modules', kind=list, | ||||
|                   description=""" | ||||
|                   Lists the modules to be loaded by this extension. A module is a plug-in that | ||||
|                   further extends functionality of an extension. | ||||
|                   """), | ||||
|     ] | ||||
|     artifacts = [] | ||||
|     aliases = [] | ||||
|     core_modules = [] | ||||
|  | ||||
|     @classmethod | ||||
|     def get_default_config(cls): | ||||
|         return {p.name: p.default for p in cls.parameters} | ||||
|  | ||||
|     @property | ||||
|     def dependencies_directory(self): | ||||
|         return _d(os.path.join(settings.dependencies_directory, self.name)) | ||||
|  | ||||
|     @property | ||||
|     def _classname(self): | ||||
|         return self.__class__.__name__ | ||||
|  | ||||
|     def __init__(self, **kwargs): | ||||
|         self.__check_from_loader() | ||||
|         self.logger = logging.getLogger(self._classname) | ||||
|         self._modules = [] | ||||
|         self.capabilities = getattr(self.__class__, 'capabilities', []) | ||||
|         for param in self.parameters: | ||||
|             param.set_value(self, kwargs.get(param.name)) | ||||
|         for key in kwargs: | ||||
|             if key not in self.parameters: | ||||
|                 message = 'Unexpected parameter "{}" for {}' | ||||
|                 raise ConfigError(message.format(key, self.name)) | ||||
|  | ||||
|     def get_config(self): | ||||
|         """ | ||||
|         Returns current configuration (i.e. parameter values) of this extension. | ||||
|  | ||||
|         """ | ||||
|         config = {} | ||||
|         for param in self.parameters: | ||||
|             config[param.name] = getattr(self, param.name, None) | ||||
|         return config | ||||
|  | ||||
|     def validate(self): | ||||
|         """ | ||||
|         Perform basic validation to ensure that this extension is capable of running. | ||||
|         This is intended as an early check to ensure the extension has not been mis-configured, | ||||
|         rather than a comprehensive check (that may, e.g., require access to the execution | ||||
|         context). | ||||
|  | ||||
|         This method may also be used to enforce (i.e. set as well as check) inter-parameter | ||||
|         constraints for the extension (e.g. if valid values for parameter A depend on the value | ||||
|         of parameter B -- something that is not possible to enfroce using ``Parameter``\ 's | ||||
|         ``constraint`` attribute. | ||||
|  | ||||
|         """ | ||||
|         if self.name is None: | ||||
|             raise ValidationError('Name not set for {}'.format(self._classname)) | ||||
|         for param in self.parameters: | ||||
|             param.validate(self) | ||||
|  | ||||
|     def check_artifacts(self, context, level): | ||||
|         """ | ||||
|         Make sure that all mandatory artifacts have been generated. | ||||
|  | ||||
|         """ | ||||
|         for artifact in self.artifacts: | ||||
|             if artifact.level != level or not artifact.mandatory: | ||||
|                 continue | ||||
|             fullpath = os.path.join(context.output_directory, artifact.path) | ||||
|             if not os.path.exists(fullpath): | ||||
|                 message = 'Mandatory "{}" has not been generated for {}.' | ||||
|                 raise ValidationError(message.format(artifact.path, self.name)) | ||||
|  | ||||
|     def __getattr__(self, name): | ||||
|         if name == '_modules': | ||||
|             raise ValueError('_modules accessed too early!') | ||||
|         for module in self._modules: | ||||
|             if hasattr(module, name): | ||||
|                 return getattr(module, name) | ||||
|         raise AttributeError(name) | ||||
|  | ||||
|     def load_modules(self, loader): | ||||
|         """ | ||||
|         Load the modules specified by the "modules" Parameter using the provided loader. A loader | ||||
|         can be any object that has an atribute called "get_module" that implements the following | ||||
|         signature:: | ||||
|  | ||||
|             get_module(name, owner, **kwargs) | ||||
|  | ||||
|         and returns an instance of :class:`wlauto.core.extension.Module`. If the module with the | ||||
|         specified name is not found, the loader must raise an appropriate exception. | ||||
|  | ||||
|         """ | ||||
|         modules = list(reversed(self.core_modules)) + list(reversed(self.modules or [])) | ||||
|         if not modules: | ||||
|             return | ||||
|         for module_spec in modules: | ||||
|             if not module_spec: | ||||
|                 continue | ||||
|             if isinstance(module_spec, basestring): | ||||
|                 name = module_spec | ||||
|                 params = {} | ||||
|             elif isinstance(module_spec, dict): | ||||
|                 if len(module_spec) != 1: | ||||
|                     message = 'Invalid module spec: {}; dict must have exctly one key -- the module name.' | ||||
|                     raise ValueError(message.format(module_spec)) | ||||
|                 name, params = module_spec.items()[0] | ||||
|             else: | ||||
|                 message = 'Invalid module spec: {}; must be a string or a one-key dict.' | ||||
|                 raise ValueError(message.format(module_spec)) | ||||
|  | ||||
|             if not isinstance(params, dict): | ||||
|                 message = 'Invalid module spec: {}; dict value must also be a dict.' | ||||
|                 raise ValueError(message.format(module_spec)) | ||||
|  | ||||
|             module = loader.get_module(name, owner=self, **params) | ||||
|             module.initialize() | ||||
|             for capability in module.capabilities: | ||||
|                 if capability not in self.capabilities: | ||||
|                     self.capabilities.append(capability) | ||||
|             self._modules.append(module) | ||||
|  | ||||
|     def has(self, capability): | ||||
|         """Check if this extension has the specified capability. The alternative method ``can`` is | ||||
|         identical to this. Which to use is up to the caller depending on what makes semantic sense | ||||
|         in the context of the capability, e.g. ``can('hard_reset')`` vs  ``has('active_cooling')``.""" | ||||
|         return capability in self.capabilities | ||||
|  | ||||
|     can = has | ||||
|  | ||||
|     def __check_from_loader(self): | ||||
|         """ | ||||
|         There are a few things that need to happen in order to get a valide extension instance. | ||||
|         Not all of them are currently done through standard Python initialisation mechanisms | ||||
|         (specifically, the loading of modules and alias resolution). In order to avoid potential | ||||
|         problems with not fully loaded extensions, make sure that an extension is *only* instantiated | ||||
|         by the loader. | ||||
|  | ||||
|         """ | ||||
|         stack = inspect.stack() | ||||
|         stack.pop(0)  # current frame | ||||
|         frame = stack.pop(0) | ||||
|         # skip throuth the init call chain | ||||
|         while stack and frame[3] == '__init__': | ||||
|             frame = stack.pop(0) | ||||
|         if frame[3] != '_instantiate': | ||||
|             message = 'Attempting to instantiate {} directly (must be done through an ExtensionLoader)' | ||||
|             raise RuntimeError(message.format(self.__class__.__name__)) | ||||
|  | ||||
|  | ||||
| class Module(Extension): | ||||
|     """ | ||||
|     This is a "plugin" for an extension this is intended to capture functionality that may be optional | ||||
|     for an extension, and so may or may not be present in a particular setup; or, conversely, functionality | ||||
|     that may be reusable between multiple devices, even if they are not with the same inheritance hierarchy. | ||||
|  | ||||
|     In other words, a Module is roughly equivalent to a kernel module and its primary purpose is to | ||||
|     implement WA "drivers" for various peripherals that may or may not be present in a particular setup. | ||||
|  | ||||
|     .. note:: A mudule is itself an Extension and can therefore have it's own modules. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     capabilities = [] | ||||
|  | ||||
|     @property | ||||
|     def root_owner(self): | ||||
|         owner = self.owner | ||||
|         while isinstance(owner, Module) and owner is not self: | ||||
|             owner = owner.owner | ||||
|         return owner | ||||
|  | ||||
|     def __init__(self, owner, **kwargs): | ||||
|         super(Module, self).__init__(**kwargs) | ||||
|         self.owner = owner | ||||
|         while isinstance(owner, Module): | ||||
|             if owner.name == self.name: | ||||
|                 raise ValueError('Circular module import for {}'.format(self.name)) | ||||
|  | ||||
|     def initialize(self): | ||||
|         pass | ||||
|  | ||||
							
								
								
									
										400
									
								
								wlauto/core/extension_loader.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										400
									
								
								wlauto/core/extension_loader.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,400 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
| import os | ||||
| import sys | ||||
| import inspect | ||||
| import imp | ||||
| import string | ||||
| import logging | ||||
| from functools import partial | ||||
| from collections import OrderedDict | ||||
|  | ||||
| from wlauto.core.bootstrap import settings | ||||
| from wlauto.core.extension import Extension | ||||
| from wlauto.exceptions import NotFoundError, LoaderError | ||||
| from wlauto.utils.misc import walk_modules, load_class, merge_lists, merge_dicts, get_article | ||||
| from wlauto.utils.types import identifier | ||||
|  | ||||
|  | ||||
| MODNAME_TRANS = string.maketrans(':/\\.', '____') | ||||
|  | ||||
|  | ||||
| class ExtensionLoaderItem(object): | ||||
|  | ||||
|     def __init__(self, ext_tuple): | ||||
|         self.name = ext_tuple.name | ||||
|         self.default_package = ext_tuple.default_package | ||||
|         self.default_path = ext_tuple.default_path | ||||
|         self.cls = load_class(ext_tuple.cls) | ||||
|  | ||||
|  | ||||
| class GlobalParameterAlias(object): | ||||
|     """ | ||||
|     Represents a "global alias" for an extension parameter. A global alias | ||||
|     is specified at the top-level of config rather namespaced under an extension | ||||
|     name. | ||||
|  | ||||
|     Multiple extensions may have parameters with the same global_alias if they are | ||||
|     part of the same inheritance hierarchy and one parameter is an override of the | ||||
|     other. This class keeps track of all such cases in its extensions dict. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, name): | ||||
|         self.name = name | ||||
|         self.extensions = {} | ||||
|  | ||||
|     def iteritems(self): | ||||
|         for ext in self.extensions.itervalues(): | ||||
|             yield (self.get_param(ext), ext) | ||||
|  | ||||
|     def get_param(self, ext): | ||||
|         for param in ext.parameters: | ||||
|             if param.global_alias == self.name: | ||||
|                 return param | ||||
|         message = 'Extension {} does not have a parameter with global alias {}' | ||||
|         raise ValueError(message.format(ext.name, self.name)) | ||||
|  | ||||
|     def update(self, other_ext): | ||||
|         self._validate_ext(other_ext) | ||||
|         self.extensions[other_ext.name] = other_ext | ||||
|  | ||||
|     def _validate_ext(self, other_ext): | ||||
|         other_param = self.get_param(other_ext) | ||||
|         for param, ext in self.iteritems(): | ||||
|             if ((not (issubclass(ext, other_ext) or issubclass(other_ext, ext))) and | ||||
|                     other_param.kind != param.kind): | ||||
|                 message = 'Duplicate global alias {} declared in {} and {} extensions with different types' | ||||
|                 raise LoaderError(message.format(self.name, ext.name, other_ext.name)) | ||||
|             if not param.name == other_param.name: | ||||
|                 message = 'Two params {} in {} and {} in {} both declare global alias {}' | ||||
|                 raise LoaderError(message.format(param.name, ext.name, | ||||
|                                                  other_param.name, other_ext.name, self.name)) | ||||
|  | ||||
|     def __str__(self): | ||||
|         text = 'GlobalAlias({} => {})' | ||||
|         extlist = ', '.join(['{}.{}'.format(e.name, p.name) for p, e in self.iteritems()]) | ||||
|         return text.format(self.name, extlist) | ||||
|  | ||||
|  | ||||
| class ExtensionLoader(object): | ||||
|     """ | ||||
|     Discovers, enumerates and loads available devices, configs, etc. | ||||
|     The loader will attempt to discover things on construction by looking | ||||
|     in predetermined set of locations defined by default_paths. Optionally, | ||||
|     additional locations may specified through paths parameter that must | ||||
|     be a list of additional Python module paths (i.e. dot-delimited). | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     _instance = None | ||||
|  | ||||
|     # Singleton | ||||
|     def __new__(cls, *args, **kwargs): | ||||
|         if not cls._instance: | ||||
|             cls._instance = super(ExtensionLoader, cls).__new__(cls, *args, **kwargs) | ||||
|         else: | ||||
|             for k, v in kwargs.iteritems(): | ||||
|                 if not hasattr(cls._instance, k): | ||||
|                     raise ValueError('Invalid parameter for ExtensionLoader: {}'.format(k)) | ||||
|                 setattr(cls._instance, k, v) | ||||
|         return cls._instance | ||||
|  | ||||
|     def set_load_defaults(self, value): | ||||
|         self._load_defaults = value | ||||
|         if value: | ||||
|             self.packages = merge_lists(self.default_packages, self.packages, duplicates='last') | ||||
|  | ||||
|     def get_load_defaults(self): | ||||
|         return self._load_defaults | ||||
|  | ||||
|     load_defaults = property(get_load_defaults, set_load_defaults) | ||||
|  | ||||
|     def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False, load_defaults=True): | ||||
|         """ | ||||
|         params:: | ||||
|  | ||||
|             :packages: List of packages to load extensions from. | ||||
|             :paths: List of paths to be searched for Python modules containing | ||||
|                     WA extensions. | ||||
|             :ignore_paths: List of paths to ignore when search for WA extensions (these would | ||||
|                            typically be subdirectories of one or more locations listed in | ||||
|                            ``paths`` parameter. | ||||
|             :keep_going: Specifies whether to keep going if an error occurs while loading | ||||
|                          extensions. | ||||
|             :load_defaults: Specifies whether extension should be loaded from default locations | ||||
|                             (WA package, and user's WA directory) as well as the packages/paths | ||||
|                             specified explicitly in ``packages`` and ``paths`` parameters. | ||||
|  | ||||
|         """ | ||||
|         self._load_defaults = None | ||||
|         self.logger = logging.getLogger('ExtensionLoader') | ||||
|         self.keep_going = keep_going | ||||
|         self.extension_kinds = {ext_tuple.name: ExtensionLoaderItem(ext_tuple) | ||||
|                                 for ext_tuple in settings.extensions} | ||||
|         self.default_packages = [ext.default_package for ext in self.extension_kinds.values()] | ||||
|  | ||||
|         self.packages = packages or [] | ||||
|         self.load_defaults = load_defaults | ||||
|         self.paths = paths or [] | ||||
|         self.ignore_paths = ignore_paths or [] | ||||
|         self.extensions = {} | ||||
|         self.aliases = {} | ||||
|         self.global_param_aliases = {} | ||||
|         # create an empty dict for each extension type to store discovered | ||||
|         # extensions. | ||||
|         for ext in self.extension_kinds.values(): | ||||
|             setattr(self, '_' + ext.name, {}) | ||||
|         self._load_from_packages(self.packages) | ||||
|         self._load_from_paths(self.paths, self.ignore_paths) | ||||
|  | ||||
|     def update(self, packages=None, paths=None, ignore_paths=None): | ||||
|         """ Load extensions from the specified paths/packages | ||||
|         without clearing or reloading existing extension. """ | ||||
|         if packages: | ||||
|             self.packages.extend(packages) | ||||
|             self._load_from_packages(packages) | ||||
|         if paths: | ||||
|             self.paths.extend(paths) | ||||
|             self.ignore_paths.extend(ignore_paths or []) | ||||
|             self._load_from_paths(paths, ignore_paths or []) | ||||
|  | ||||
|     def clear(self): | ||||
|         """ Clear all discovered items. """ | ||||
|         self.extensions.clear() | ||||
|         for ext in self.extension_kinds.values(): | ||||
|             self._get_store(ext).clear() | ||||
|  | ||||
|     def reload(self): | ||||
|         """ Clear all discovered items and re-run the discovery. """ | ||||
|         self.clear() | ||||
|         self._load_from_packages(self.packages) | ||||
|         self._load_from_paths(self.paths, self.ignore_paths) | ||||
|  | ||||
|     def get_extension_class(self, name, kind=None): | ||||
|         """ | ||||
|         Return the class for the specified extension if found or raises ``ValueError``. | ||||
|  | ||||
|         """ | ||||
|         name, _ = self.resolve_alias(name) | ||||
|         if kind is None: | ||||
|             return self.extensions[name] | ||||
|         ext = self.extension_kinds.get(kind) | ||||
|         if ext is None: | ||||
|             raise ValueError('Unknown extension type: {}'.format(kind)) | ||||
|         store = self._get_store(ext) | ||||
|         if name not in store: | ||||
|             raise NotFoundError('Extensions {} is not {} {}.'.format(name, get_article(kind), kind)) | ||||
|         return store[name] | ||||
|  | ||||
|     def get_extension(self, name, *args, **kwargs): | ||||
|         """ | ||||
|         Return extension of the specified kind with the specified name. Any additional | ||||
|         parameters will be passed to the extension's __init__. | ||||
|  | ||||
|         """ | ||||
|         name, base_kwargs = self.resolve_alias(name) | ||||
|         kind = kwargs.pop('kind', None) | ||||
|         kwargs = merge_dicts(base_kwargs, kwargs, list_duplicates='last', dict_type=OrderedDict) | ||||
|         cls = self.get_extension_class(name, kind) | ||||
|         extension = _instantiate(cls, args, kwargs) | ||||
|         extension.load_modules(self) | ||||
|         return extension | ||||
|  | ||||
|     def get_default_config(self, ext_name): | ||||
|         """ | ||||
|         Returns the default configuration for the specified extension name. The name may be an alias, | ||||
|         in which case, the returned config will be augmented with appropriate alias overrides. | ||||
|  | ||||
|         """ | ||||
|         real_name, alias_config = self.resolve_alias(ext_name) | ||||
|         base_default_config = self.get_extension_class(real_name).get_default_config() | ||||
|         return merge_dicts(base_default_config, alias_config, list_duplicates='last', dict_type=OrderedDict) | ||||
|  | ||||
|     def list_extensions(self, kind=None): | ||||
|         """ | ||||
|         List discovered extension classes. Optionally, only list extensions of a | ||||
|         particular type. | ||||
|  | ||||
|         """ | ||||
|         if kind is None: | ||||
|             return self.extensions.values() | ||||
|         if kind not in self.extension_kinds: | ||||
|             raise ValueError('Unknown extension type: {}'.format(kind)) | ||||
|         return self._get_store(self.extension_kinds[kind]).values() | ||||
|  | ||||
|     def has_extension(self, name, kind=None): | ||||
|         """ | ||||
|         Returns ``True`` if an extensions with the specified ``name`` has been | ||||
|         discovered by the loader. If ``kind`` was specified, only returns ``True`` | ||||
|         if the extension has been found, *and* it is of the specified kind. | ||||
|  | ||||
|         """ | ||||
|         try: | ||||
|             self.get_extension_class(name, kind) | ||||
|             return True | ||||
|         except NotFoundError: | ||||
|             return False | ||||
|  | ||||
|     def resolve_alias(self, alias_name): | ||||
|         """ | ||||
|         Try to resolve the specified name as an extension alias. Returns a | ||||
|         two-tuple, the first value of which is actual extension name, and the | ||||
|         second is a dict of parameter values for this alias. If the name passed | ||||
|         is already an extension name, then the result is ``(alias_name, {})``. | ||||
|  | ||||
|         """ | ||||
|         alias_name = identifier(alias_name.lower()) | ||||
|         if alias_name in self.extensions: | ||||
|             return (alias_name, {}) | ||||
|         if alias_name in self.aliases: | ||||
|             alias = self.aliases[alias_name] | ||||
|             return (alias.extension_name, alias.params) | ||||
|         raise NotFoundError('Could not find extension or alias "{}"'.format(alias_name)) | ||||
|  | ||||
|     # Internal methods. | ||||
|  | ||||
|     def __getattr__(self, name): | ||||
|         """ | ||||
|         This resolves methods for specific extensions types based on corresponding | ||||
|         generic extension methods. So it's possible to say things like :: | ||||
|  | ||||
|             loader.get_device('foo') | ||||
|  | ||||
|         instead of :: | ||||
|  | ||||
|             loader.get_extension('foo', kind='device') | ||||
|  | ||||
|         """ | ||||
|         if name.startswith('get_'): | ||||
|             name = name.replace('get_', '', 1) | ||||
|             if name in self.extension_kinds: | ||||
|                 return partial(self.get_extension, kind=name) | ||||
|         if name.startswith('list_'): | ||||
|             name = name.replace('list_', '', 1).rstrip('s') | ||||
|             if name in self.extension_kinds: | ||||
|                 return partial(self.list_extensions, kind=name) | ||||
|         if name.startswith('has_'): | ||||
|             name = name.replace('has_', '', 1) | ||||
|             if name in self.extension_kinds: | ||||
|                 return partial(self.has_extension, kind=name) | ||||
|         raise AttributeError(name) | ||||
|  | ||||
|     def _get_store(self, ext): | ||||
|         name = getattr(ext, 'name', ext) | ||||
|         return getattr(self, '_' + name) | ||||
|  | ||||
|     def _load_from_packages(self, packages): | ||||
|         try: | ||||
|             for package in packages: | ||||
|                 for module in walk_modules(package): | ||||
|                     self._load_module(module) | ||||
|         except ImportError as e: | ||||
|             message = 'Problem loading extensions from extra packages: {}' | ||||
|             raise LoaderError(message.format(e.message)) | ||||
|  | ||||
|     def _load_from_paths(self, paths, ignore_paths): | ||||
|         self.logger.debug('Loading from paths.') | ||||
|         for path in paths: | ||||
|             self.logger.debug('Checking path %s', path) | ||||
|             for root, _, files in os.walk(path): | ||||
|                 should_skip = False | ||||
|                 for igpath in ignore_paths: | ||||
|                     if root.startswith(igpath): | ||||
|                         should_skip = True | ||||
|                         break | ||||
|                 if should_skip: | ||||
|                     continue | ||||
|                 for fname in files: | ||||
|                     if not os.path.splitext(fname)[1].lower() == '.py': | ||||
|                         continue | ||||
|                     filepath = os.path.join(root, fname) | ||||
|                     try: | ||||
|                         modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS) | ||||
|                         module = imp.load_source(modname, filepath) | ||||
|                         self._load_module(module) | ||||
|                     except (SystemExit, ImportError), e: | ||||
|                         if self.keep_going: | ||||
|                             self.logger.warn('Failed to load {}'.format(filepath)) | ||||
|                             self.logger.warn('Got: {}'.format(e)) | ||||
|                         else: | ||||
|                             raise LoaderError('Failed to load {}'.format(filepath), sys.exc_info()) | ||||
|  | ||||
|     def _load_module(self, module):  # NOQA pylint: disable=too-many-branches | ||||
|         self.logger.debug('Checking module %s', module.__name__) | ||||
|         for obj in vars(module).itervalues(): | ||||
|             if inspect.isclass(obj): | ||||
|                 if not issubclass(obj, Extension) or not hasattr(obj, 'name') or not obj.name: | ||||
|                     continue | ||||
|                 try: | ||||
|                     for ext in self.extension_kinds.values(): | ||||
|                         if issubclass(obj, ext.cls): | ||||
|                             self._add_found_extension(obj, ext) | ||||
|                             break | ||||
|                     else:  # did not find a matching Extension type | ||||
|                         message = 'Unknown extension type for {} (type: {})' | ||||
|                         raise LoaderError(message.format(obj.name, obj.__class__.__name__)) | ||||
|                 except LoaderError as e: | ||||
|                     if self.keep_going: | ||||
|                         self.logger.warning(e) | ||||
|                     else: | ||||
|                         raise e | ||||
|  | ||||
|     def _add_found_extension(self, obj, ext): | ||||
|         """ | ||||
|             :obj: Found extension class | ||||
|             :ext: matching extension item. | ||||
|         """ | ||||
|         self.logger.debug('\tAdding %s %s', ext.name, obj.name) | ||||
|         key = identifier(obj.name.lower()) | ||||
|         obj.kind = ext.name | ||||
|         if key in self.extensions or key in self.aliases: | ||||
|             raise LoaderError('{} {} already exists.'.format(ext.name, obj.name)) | ||||
|         # Extensions are tracked both, in a common extensions | ||||
|         # dict, and in per-extension kind dict (as retrieving | ||||
|         # extensions by kind is a common use case. | ||||
|         self.extensions[key] = obj | ||||
|         store = self._get_store(ext) | ||||
|         store[key] = obj | ||||
|         for alias in obj.aliases: | ||||
|             if alias in self.extensions or alias in self.aliases: | ||||
|                 raise LoaderError('{} {} already exists.'.format(ext.name, obj.name)) | ||||
|             self.aliases[alias.name] = alias | ||||
|  | ||||
|         # Update global aliases list. If a global alias is already in the list, | ||||
|         # then make sure this extension is in the same parent/child hierarchy | ||||
|         # as the one already found. | ||||
|         for param in obj.parameters: | ||||
|             if param.global_alias: | ||||
|                 if param.global_alias not in self.global_param_aliases: | ||||
|                     ga = GlobalParameterAlias(param.global_alias) | ||||
|                     ga.update(obj) | ||||
|                     self.global_param_aliases[ga.name] = ga | ||||
|                 else:  # global alias already exists. | ||||
|                     self.global_param_aliases[param.global_alias].update(obj) | ||||
|  | ||||
|  | ||||
| # Utility functions. | ||||
|  | ||||
| def _instantiate(cls, args=None, kwargs=None): | ||||
|     args = [] if args is None else args | ||||
|     kwargs = {} if kwargs is None else kwargs | ||||
|     try: | ||||
|         return cls(*args, **kwargs) | ||||
|     except Exception: | ||||
|         raise LoaderError('Could not load {}'.format(cls), sys.exc_info()) | ||||
|  | ||||
							
								
								
									
										35
									
								
								wlauto/core/exttype.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								wlauto/core/exttype.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,35 @@ | ||||
| #    Copyright 2014-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
| # Separate module to avoid circular dependencies | ||||
| from wlauto.core.bootstrap import settings | ||||
| from wlauto.core.extension import Extension | ||||
| from wlauto.utils.misc import load_class | ||||
|  | ||||
|  | ||||
| _extension_bases = {ext.name: load_class(ext.cls) for ext in settings.extensions} | ||||
|  | ||||
|  | ||||
| def get_extension_type(ext): | ||||
|     """Given an instance of ``wlauto.core.Extension``, return a string representing | ||||
|     the type of the extension (e.g. ``'workload'`` for a Workload subclass instance).""" | ||||
|     if not isinstance(ext, Extension): | ||||
|         raise ValueError('{} is not an instance of Extension'.format(ext)) | ||||
|     for name, cls in _extension_bases.iteritems(): | ||||
|         if isinstance(ext, cls): | ||||
|             return name | ||||
|     raise ValueError('Unknown extension type: {}'.format(ext.__class__.__name__)) | ||||
|  | ||||
							
								
								
									
										374
									
								
								wlauto/core/instrumentation.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										374
									
								
								wlauto/core/instrumentation.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,374 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
| """ | ||||
| Adding New Instrument | ||||
| ===================== | ||||
|  | ||||
| Any new instrument should be a subclass of Instrument and it must have a name. | ||||
| When a new instrument is added to Workload Automation, the methods of the new | ||||
| instrument will be found automatically and hooked up to the supported signals. | ||||
| Once a signal is broadcasted, the corresponding registered method is invoked. | ||||
|  | ||||
| Each method in Instrument must take two arguments, which are self and context. | ||||
| Supported signals can be found in [... link to signals ...] To make | ||||
| implementations easier and common, the basic steps to add new instrument is | ||||
| similar to the steps to add new workload. | ||||
|  | ||||
| Hence, the following methods are sufficient to implement to add new instrument: | ||||
|  | ||||
|     - setup: This method is invoked after the workload is setup. All the | ||||
|        necessary setups should go inside this method. Setup, includes operations | ||||
|        like, pushing the files to the target device, install them, clear logs, | ||||
|        etc. | ||||
|     - start: It is invoked just before the workload start execution. Here is | ||||
|        where instrument measures start being registered/taken. | ||||
|     - stop: It is invoked just after the workload execution stops. The measures | ||||
|        should stop being taken/registered. | ||||
|     - update_result: It is invoked after the workload updated its result. | ||||
|        update_result is where the taken measures are added to the result so it | ||||
|        can be processed by Workload Automation. | ||||
|     - teardown is invoked after the workload is teared down. It is a good place | ||||
|        to clean any logs generated by the instrument. | ||||
|  | ||||
| For example, to add an instrument which will trace device errors, we subclass | ||||
| Instrument and overwrite the variable name.:: | ||||
|  | ||||
|         #BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace') | ||||
|         class TraceErrorsInstrument(Instrument): | ||||
|  | ||||
|             name = 'trace-errors' | ||||
|  | ||||
|             def __init__(self, device): | ||||
|                 super(TraceErrorsInstrument, self).__init__(device) | ||||
|                 self.trace_on_device = os.path.join(self.device.working_directory, 'trace') | ||||
|  | ||||
| We then declare and implement the aforementioned methods. For the setup method, | ||||
| we want to push the file to the target device and then change the file mode to | ||||
| 755 :: | ||||
|  | ||||
|     def setup(self, context): | ||||
|         self.device.push_file(BINARY_FILE, self.device.working_directory) | ||||
|         self.device.execute('chmod 755 {}'.format(self.trace_on_device)) | ||||
|  | ||||
| Then we implemented the start method, which will simply run the file to start | ||||
| tracing. :: | ||||
|  | ||||
|     def start(self, context): | ||||
|         self.device.execute('{} start'.format(self.trace_on_device)) | ||||
|  | ||||
| Lastly, we need to stop tracing once the workload stops and this happens in the | ||||
| stop method:: | ||||
|  | ||||
|     def stop(self, context): | ||||
|         self.device.execute('{} stop'.format(self.trace_on_device)) | ||||
|  | ||||
| The generated result can be updated inside update_result, or if it is trace, we | ||||
| just pull the file to the host device. context has a result variable which | ||||
| has add_metric method. It can be used to add the instrumentation results metrics | ||||
| to the final result for the workload. The method can be passed 4 params, which | ||||
| are metric key, value, unit and lower_is_better, which is a boolean. :: | ||||
|  | ||||
|     def update_result(self, context): | ||||
|         # pull the trace file to the device | ||||
|         result = os.path.join(self.device.working_directory, 'trace.txt') | ||||
|         self.device.pull_file(result, context.working_directory) | ||||
|  | ||||
|         # parse the file if needs to be parsed, or add result to | ||||
|         # context.result | ||||
|  | ||||
| At the end, we might want to delete any files generated by the instrumentation | ||||
| and the code to clear these file goes in teardown method. :: | ||||
|  | ||||
|     def teardown(self, context): | ||||
|         self.device.delete_file(os.path.join(self.device.working_directory, 'trace.txt')) | ||||
|  | ||||
| """ | ||||
|  | ||||
| import logging | ||||
| import inspect | ||||
| from collections import OrderedDict | ||||
|  | ||||
| import wlauto.core.signal as signal | ||||
| from wlauto.core.extension import Extension | ||||
| from wlauto.exceptions import WAError, DeviceNotRespondingError, TimeoutError | ||||
| from wlauto.utils.misc import get_traceback, isiterable | ||||
|  | ||||
|  | ||||
| logger = logging.getLogger('instrumentation') | ||||
|  | ||||
|  | ||||
| # Maps method names onto signals the should be registered to. | ||||
| # Note: the begin/end signals are paired -- if a begin_ signal is sent, | ||||
| #       then the corresponding end_ signal is guaranteed to also be sent. | ||||
| # Note: using OrderedDict to preserve logical ordering for the table generated | ||||
| #       in the documentation | ||||
| SIGNAL_MAP = OrderedDict([ | ||||
|     # Below are "aliases" for some of the more common signals to allow | ||||
|     # instrumentation to have similar structure to workloads | ||||
|     ('initialize', signal.RUN_INIT), | ||||
|     ('setup', signal.SUCCESSFUL_WORKLOAD_SETUP), | ||||
|     ('start', signal.BEFORE_WORKLOAD_EXECUTION), | ||||
|     ('stop', signal.AFTER_WORKLOAD_EXECUTION), | ||||
|     ('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE), | ||||
|     ('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE), | ||||
|     ('teardown', signal.AFTER_WORKLOAD_TEARDOWN), | ||||
|     ('finalize', signal.RUN_FIN), | ||||
|  | ||||
|     ('on_run_start', signal.RUN_START), | ||||
|     ('on_run_end', signal.RUN_END), | ||||
|     ('on_workload_spec_start', signal.WORKLOAD_SPEC_START), | ||||
|     ('on_workload_spec_end', signal.WORKLOAD_SPEC_END), | ||||
|     ('on_iteration_start', signal.ITERATION_START), | ||||
|     ('on_iteration_end', signal.ITERATION_END), | ||||
|  | ||||
|     ('before_initial_boot', signal.BEFORE_INITIAL_BOOT), | ||||
|     ('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT), | ||||
|     ('after_initial_boot', signal.AFTER_INITIAL_BOOT), | ||||
|     ('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT), | ||||
|     ('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT), | ||||
|     ('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT), | ||||
|     ('before_boot', signal.BEFORE_BOOT), | ||||
|     ('on_successful_boot', signal.SUCCESSFUL_BOOT), | ||||
|     ('after_boot', signal.AFTER_BOOT), | ||||
|  | ||||
|     ('on_spec_init', signal.SPEC_INIT), | ||||
|     ('on_run_init', signal.RUN_INIT), | ||||
|     ('on_iteration_init', signal.ITERATION_INIT), | ||||
|  | ||||
|     ('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP), | ||||
|     ('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP), | ||||
|     ('after_workload_setup', signal.AFTER_WORKLOAD_SETUP), | ||||
|     ('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION), | ||||
|     ('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION), | ||||
|     ('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION), | ||||
|     ('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE), | ||||
|     ('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE), | ||||
|     ('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE), | ||||
|     ('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN), | ||||
|     ('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN), | ||||
|     ('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN), | ||||
|  | ||||
|     ('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING), | ||||
|     ('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING), | ||||
|     ('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING), | ||||
|  | ||||
|     ('on_error', signal.ERROR_LOGGED), | ||||
|     ('on_warning', signal.WARNING_LOGGED), | ||||
| ]) | ||||
|  | ||||
| PRIORITY_MAP = OrderedDict([ | ||||
|     ('very_fast_', 20), | ||||
|     ('fast_', 10), | ||||
|     ('normal_', 0), | ||||
|     ('slow_', -10), | ||||
|     ('very_slow_', -20), | ||||
| ]) | ||||
|  | ||||
| installed = [] | ||||
|  | ||||
|  | ||||
| def is_installed(instrument): | ||||
|     if isinstance(instrument, Instrument): | ||||
|         if instrument in installed: | ||||
|             return True | ||||
|         if instrument.name in [i.name for i in installed]: | ||||
|             return True | ||||
|     elif isinstance(instrument, type): | ||||
|         if instrument in [i.__class__ for i in installed]: | ||||
|             return True | ||||
|     else:  # assume string | ||||
|         if instrument in [i.name for i in installed]: | ||||
|             return True | ||||
|     return False | ||||
|  | ||||
|  | ||||
| failures_detected = False | ||||
|  | ||||
|  | ||||
| def reset_failures(): | ||||
|     global failures_detected  # pylint: disable=W0603 | ||||
|     failures_detected = False | ||||
|  | ||||
|  | ||||
| def check_failures(): | ||||
|     result = failures_detected | ||||
|     reset_failures() | ||||
|     return result | ||||
|  | ||||
|  | ||||
| class ManagedCallback(object): | ||||
|     """ | ||||
|     This wraps instruments' callbacks to ensure that errors do interfer | ||||
|     with run execution. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, instrument, callback): | ||||
|         self.instrument = instrument | ||||
|         self.callback = callback | ||||
|  | ||||
|     def __call__(self, context): | ||||
|         if self.instrument.is_enabled: | ||||
|             try: | ||||
|                 self.callback(context) | ||||
|             except (KeyboardInterrupt, DeviceNotRespondingError, TimeoutError):  # pylint: disable=W0703 | ||||
|                 raise | ||||
|             except Exception as e:  # pylint: disable=W0703 | ||||
|                 logger.error('Error in insturment {}'.format(self.instrument.name)) | ||||
|                 global failures_detected  # pylint: disable=W0603 | ||||
|                 failures_detected = True | ||||
|                 if isinstance(e, WAError): | ||||
|                     logger.error(e) | ||||
|                 else: | ||||
|                     tb = get_traceback() | ||||
|                     logger.error(tb) | ||||
|                     logger.error('{}({})'.format(e.__class__.__name__, e)) | ||||
|                 if not context.current_iteration: | ||||
|                     # Error occureed outside of an iteration (most likely | ||||
|                     # during intial setup or teardown). Since this would affect | ||||
|                     # the rest of the run, mark the instument as broken so that | ||||
|                     # it doesn't get re-enabled for subsequent iterations. | ||||
|                     self.instrument.is_broken = True | ||||
|                 disable(self.instrument) | ||||
|  | ||||
|  | ||||
| # Need this to keep track of callbacks, because the dispatcher only keeps | ||||
| # weak references, so if the callbacks aren't referenced elsewhere, they will | ||||
| # be deallocated before they've had a chance to be invoked. | ||||
| _callbacks = [] | ||||
|  | ||||
|  | ||||
| def install(instrument): | ||||
|     """ | ||||
|     This will look for methods (or any callable members) with specific names | ||||
|     in the instrument and hook them up to the corresponding signals. | ||||
|  | ||||
|     :param instrument: Instrument instance to install. | ||||
|  | ||||
|     """ | ||||
|     logger.debug('Installing instrument %s.', instrument) | ||||
|     if is_installed(instrument): | ||||
|         raise ValueError('Instrument {} is already installed.'.format(instrument.name)) | ||||
|     for attr_name in dir(instrument): | ||||
|         priority = 0 | ||||
|         stripped_attr_name = attr_name | ||||
|         for key, value in PRIORITY_MAP.iteritems(): | ||||
|             if attr_name.startswith(key): | ||||
|                 stripped_attr_name = attr_name[len(key):] | ||||
|                 priority = value | ||||
|                 break | ||||
|         if stripped_attr_name in SIGNAL_MAP: | ||||
|             attr = getattr(instrument, attr_name) | ||||
|             if not callable(attr): | ||||
|                 raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument)) | ||||
|             arg_num = len(inspect.getargspec(attr).args) | ||||
|             if not arg_num == 2: | ||||
|                 raise ValueError('{} must take exactly 2 arguments; {} given.'.format(attr_name, arg_num)) | ||||
|  | ||||
|             logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name]) | ||||
|             mc = ManagedCallback(instrument, attr) | ||||
|             _callbacks.append(mc) | ||||
|             signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority) | ||||
|     installed.append(instrument) | ||||
|  | ||||
|  | ||||
| def uninstall(instrument): | ||||
|     instrument = get_instrument(instrument) | ||||
|     installed.remove(instrument) | ||||
|  | ||||
|  | ||||
| def validate(): | ||||
|     for instrument in installed: | ||||
|         instrument.validate() | ||||
|  | ||||
|  | ||||
| def get_instrument(inst): | ||||
|     if isinstance(inst, Instrument): | ||||
|         return inst | ||||
|     for installed_inst in installed: | ||||
|         if installed_inst.name == inst: | ||||
|             return installed_inst | ||||
|     raise ValueError('Instrument {} is not installed'.format(inst)) | ||||
|  | ||||
|  | ||||
| def disable_all(): | ||||
|     for instrument in installed: | ||||
|         _disable_instrument(instrument) | ||||
|  | ||||
|  | ||||
| def enable_all(): | ||||
|     for instrument in installed: | ||||
|         _enable_instrument(instrument) | ||||
|  | ||||
|  | ||||
| def enable(to_enable): | ||||
|     if isiterable(to_enable): | ||||
|         for inst in to_enable: | ||||
|             _enable_instrument(inst) | ||||
|     else: | ||||
|         _enable_instrument(to_enable) | ||||
|  | ||||
|  | ||||
| def disable(to_disable): | ||||
|     if isiterable(to_disable): | ||||
|         for inst in to_disable: | ||||
|             _disable_instrument(inst) | ||||
|     else: | ||||
|         _disable_instrument(to_disable) | ||||
|  | ||||
|  | ||||
| def _enable_instrument(inst): | ||||
|     inst = get_instrument(inst) | ||||
|     if not inst.is_broken: | ||||
|         logger.debug('Enabling instrument {}'.format(inst.name)) | ||||
|         inst.is_enabled = True | ||||
|     else: | ||||
|         logger.debug('Not enabling broken instrument {}'.format(inst.name)) | ||||
|  | ||||
|  | ||||
| def _disable_instrument(inst): | ||||
|     inst = get_instrument(inst) | ||||
|     if inst.is_enabled: | ||||
|         logger.debug('Disabling instrument {}'.format(inst.name)) | ||||
|         inst.is_enabled = False | ||||
|  | ||||
|  | ||||
| def get_enabled(): | ||||
|     return [i for i in installed if i.is_enabled] | ||||
|  | ||||
|  | ||||
| def get_disabled(): | ||||
|     return [i for i in installed if not i.is_enabled] | ||||
|  | ||||
|  | ||||
| class Instrument(Extension): | ||||
|     """ | ||||
|     Base class for instrumentation implementations. | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, device, **kwargs): | ||||
|         super(Instrument, self).__init__(**kwargs) | ||||
|         self.device = device | ||||
|         self.is_enabled = True | ||||
|         self.is_broken = False | ||||
|  | ||||
|     def __str__(self): | ||||
|         return self.name | ||||
|  | ||||
|     def __repr__(self): | ||||
|         return 'Instrument({})'.format(self.name) | ||||
|  | ||||
							
								
								
									
										109
									
								
								wlauto/core/resolver.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										109
									
								
								wlauto/core/resolver.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,109 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
| """ | ||||
| Defines infrastructure for resource resolution. This is used to find | ||||
| various dependencies/assets/etc that WA objects rely on in a flexible way. | ||||
|  | ||||
| """ | ||||
| import logging | ||||
| from collections import defaultdict | ||||
|  | ||||
| # Note: this is the modified louie library in wlauto/external. | ||||
| #       prioritylist does not exist in vanilla louie. | ||||
| from louie.prioritylist import PriorityList  # pylint: disable=E0611,F0401 | ||||
|  | ||||
| from wlauto.exceptions import ResourceError | ||||
|  | ||||
|  | ||||
| class ResourceResolver(object): | ||||
|     """ | ||||
|     Discovers and registers getters, and then handles requests for | ||||
|     resources using registered getters. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, config): | ||||
|         self.logger = logging.getLogger(self.__class__.__name__) | ||||
|         self.getters = defaultdict(PriorityList) | ||||
|         self.config = config | ||||
|  | ||||
|     def load(self): | ||||
|         """ | ||||
|         Discover getters under the specified source. The source could | ||||
|         be either a python package/module or a path. | ||||
|  | ||||
|         """ | ||||
|         for rescls in self.config.ext_loader.list_resource_getters(): | ||||
|             getter = self.config.get_extension(rescls.name, self) | ||||
|             getter.register() | ||||
|  | ||||
|     def get(self, resource, strict=True, *args, **kwargs): | ||||
|         """ | ||||
|         Uses registered getters to attempt to discover a resource of the specified | ||||
|         kind and matching the specified criteria. Returns path to the resource that | ||||
|         has been discovered. If a resource has not been discovered, this will raise | ||||
|         a ``ResourceError`` or, if ``strict`` has been set to ``False``, will return | ||||
|         ``None``. | ||||
|  | ||||
|         """ | ||||
|         self.logger.debug('Resolving {}'.format(resource)) | ||||
|         for getter in self.getters[resource.name]: | ||||
|             self.logger.debug('Trying {}'.format(getter)) | ||||
|             result = getter.get(resource, *args, **kwargs) | ||||
|             if result is not None: | ||||
|                 self.logger.debug('Resource {} found using {}'.format(resource, getter)) | ||||
|                 return result | ||||
|         if strict: | ||||
|             raise ResourceError('{} could not be found'.format(resource)) | ||||
|         self.logger.debug('Resource {} not found.'.format(resource)) | ||||
|         return None | ||||
|  | ||||
|     def register(self, getter, kind, priority=0): | ||||
|         """ | ||||
|         Register the specified resource getter as being able to discover a resource | ||||
|         of the specified kind with the specified priority. | ||||
|  | ||||
|         This method would typically be invoked by a getter inside its __init__. | ||||
|         The idea being that getters register themselves for resources they know | ||||
|         they can discover. | ||||
|  | ||||
|         *priorities* | ||||
|  | ||||
|         getters that are registered with the highest priority will be invoked first. If | ||||
|         multiple getters are registered under the same priority, they will be invoked | ||||
|         in the order they were registered (i.e. in the order they were discovered). This is | ||||
|         essentially non-deterministic. | ||||
|  | ||||
|         Generally getters that are more likely to find a resource, or would find a | ||||
|         "better" version of the resource should register with higher (positive) priorities. | ||||
|         Fall-back getters that should only be invoked if a resource is not found by usual | ||||
|         means should register with lower (negative) priorities. | ||||
|  | ||||
|         """ | ||||
|         self.logger.debug('Registering {}'.format(getter.name)) | ||||
|         self.getters[kind].add(getter, priority) | ||||
|  | ||||
|     def unregister(self, getter, kind): | ||||
|         """ | ||||
|         Unregister a getter that has been registered earlier. | ||||
|  | ||||
|         """ | ||||
|         self.logger.debug('Unregistering {}'.format(getter.name)) | ||||
|         try: | ||||
|             self.getters[kind].remove(getter) | ||||
|         except ValueError: | ||||
|             raise ValueError('Resource getter {} is not installed.'.format(getter.name)) | ||||
							
								
								
									
										182
									
								
								wlauto/core/resource.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										182
									
								
								wlauto/core/resource.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,182 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
| from wlauto.core.extension import Extension | ||||
|  | ||||
|  | ||||
| class GetterPriority(object): | ||||
|     """ | ||||
|     Enumerates standard ResourceGetter priorities. In general, getters should register | ||||
|     under one of these, rather than specifying other priority values. | ||||
|  | ||||
|  | ||||
|     :cached: The cached version of the resource. Look here first. This priority also implies | ||||
|              that the resource at this location is a "cache" and is not the only version of the | ||||
|              resource, so it may be cleared without losing access to the resource. | ||||
|     :preferred: Take this resource in favour of the environment resource. | ||||
|     :environment: Found somewhere under ~/.workload_automation/ or equivalent, or | ||||
|                     from environment variables, external configuration files, etc. | ||||
|                     These will override resource supplied with the package. | ||||
|     :external_package: Resource provided by another package. | ||||
|     :package: Resource provided with the package. | ||||
|     :remote: Resource will be downloaded from a remote location (such as an HTTP server | ||||
|                 or a samba share). Try this only if no other getter was successful. | ||||
|  | ||||
|     """ | ||||
|     cached = 20 | ||||
|     preferred = 10 | ||||
|     environment = 0 | ||||
|     external_package = -5 | ||||
|     package = -10 | ||||
|     remote = -20 | ||||
|  | ||||
|  | ||||
| class Resource(object): | ||||
|     """ | ||||
|     Represents a resource that needs to be resolved. This can be pretty much | ||||
|     anything: a file, environment variable, a Python object, etc. The only thing | ||||
|     a resource *has* to have is an owner (which would normally be the | ||||
|     Workload/Instrument/Device/etc object that needs the resource). In addition, | ||||
|     a resource have any number of attributes to identify, but all of them are resource | ||||
|     type specific. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     name = None | ||||
|  | ||||
|     def __init__(self, owner): | ||||
|         self.owner = owner | ||||
|  | ||||
|     def delete(self, instance): | ||||
|         """ | ||||
|         Delete an instance of this resource type. This must be implemented by the concrete | ||||
|         subclasses based on what the resource looks like, e.g. deleting a file or a directory | ||||
|         tree, or removing an entry from a database. | ||||
|  | ||||
|         :note: Implementation should *not* contain any logic for deciding whether or not | ||||
|                a resource should be deleted, only the actual deletion. The assumption is | ||||
|                that if this method is invoked, then the decision has already been made. | ||||
|  | ||||
|         """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def __str__(self): | ||||
|         return '<{}\'s {}>'.format(self.owner, self.name) | ||||
|  | ||||
|  | ||||
| class ResourceGetter(Extension): | ||||
|     """ | ||||
|     Base class for implementing resolvers. Defines resolver interface. Resolvers are | ||||
|     responsible for discovering resources (such as particular kinds of files) they know | ||||
|     about based on the parameters that are passed to them. Each resolver also has a dict of | ||||
|     attributes that describe it's operation, and may be used to determine which get invoked. | ||||
|     There is no pre-defined set of attributes and resolvers may define their own. | ||||
|  | ||||
|     Class attributes: | ||||
|  | ||||
|     :name: Name that uniquely identifies this getter. Must be set by any concrete subclass. | ||||
|     :resource_type: Identifies resource type(s) that this getter can handle. This must | ||||
|                     be either a string (for a single type) or a list of strings for | ||||
|                     multiple resource types. This must be set by any concrete subclass. | ||||
|     :priority: Priority with which this getter will be invoked. This should be one of | ||||
|                 the standard priorities specified in ``GetterPriority`` enumeration. If not | ||||
|                 set, this will default to ``GetterPriority.environment``. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     name = None | ||||
|     resource_type = None | ||||
|     priority = GetterPriority.environment | ||||
|  | ||||
|     def __init__(self, resolver, **kwargs): | ||||
|         super(ResourceGetter, self).__init__(**kwargs) | ||||
|         self.resolver = resolver | ||||
|  | ||||
|     def register(self): | ||||
|         """ | ||||
|         Registers with a resource resolver. Concrete implementations must override this | ||||
|         to invoke ``self.resolver.register()`` method to register ``self`` for specific | ||||
|         resource types. | ||||
|  | ||||
|         """ | ||||
|         if self.resource_type is None: | ||||
|             raise ValueError('No resource type specified for {}'.format(self.name)) | ||||
|         elif isinstance(self.resource_type, list): | ||||
|             for rt in self.resource_type: | ||||
|                 self.resolver.register(self, rt, self.priority) | ||||
|         else: | ||||
|             self.resolver.register(self, self.resource_type, self.priority) | ||||
|  | ||||
|     def unregister(self): | ||||
|         """Unregister from a resource resolver.""" | ||||
|         if self.resource_type is None: | ||||
|             raise ValueError('No resource type specified for {}'.format(self.name)) | ||||
|         elif isinstance(self.resource_type, list): | ||||
|             for rt in self.resource_type: | ||||
|                 self.resolver.unregister(self, rt) | ||||
|         else: | ||||
|             self.resolver.unregister(self, self.resource_type) | ||||
|  | ||||
|     def get(self, resource, **kwargs): | ||||
|         """ | ||||
|         This will get invoked by the resolver when attempting to resolve a resource, passing | ||||
|         in the resource to be resolved as the first parameter. Any additional parameters would | ||||
|         be specific to a particular resource type. | ||||
|  | ||||
|         This method will only be invoked for resource types that the getter has registered for. | ||||
|  | ||||
|         :param resource: an instance of :class:`wlauto.core.resource.Resource`. | ||||
|  | ||||
|         :returns: Implementations of this method must return either the discovered resource or | ||||
|                   ``None`` if the resource could not be discovered. | ||||
|  | ||||
|         """ | ||||
|         raise NotImplementedError() | ||||
|  | ||||
|     def delete(self, resource, *args, **kwargs): | ||||
|         """ | ||||
|         Delete the resource if it is discovered. All arguments are passed to a call | ||||
|         to``self.get()``. If that call returns a resource, it is deleted. | ||||
|  | ||||
|         :returns: ``True`` if the specified resource has been discovered and deleted, | ||||
|                   and ``False`` otherwise. | ||||
|  | ||||
|         """ | ||||
|         discovered = self.get(resource, *args, **kwargs) | ||||
|         if discovered: | ||||
|             resource.delete(discovered) | ||||
|             return True | ||||
|         else: | ||||
|             return False | ||||
|  | ||||
|     def __str__(self): | ||||
|         return '<ResourceGetter {}>'.format(self.name) | ||||
|  | ||||
|  | ||||
| class __NullOwner(object): | ||||
|     """Represents an owner for a resource not owned by anyone.""" | ||||
|  | ||||
|     name = 'noone' | ||||
|  | ||||
|     def __getattr__(self, name): | ||||
|         return None | ||||
|  | ||||
|     def __str__(self): | ||||
|         return 'no-one' | ||||
|  | ||||
|     __repr__ = __str__ | ||||
|  | ||||
|  | ||||
| NO_ONE = __NullOwner() | ||||
							
								
								
									
										321
									
								
								wlauto/core/result.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										321
									
								
								wlauto/core/result.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,321 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
| # pylint: disable=no-member | ||||
|  | ||||
| """ | ||||
| This module defines the classes used to handle result | ||||
| processing inside Workload Automation. There will be a | ||||
| :class:`wlauto.core.workload.WorkloadResult` object generated for | ||||
| every workload iteration executed. This object will have a list of | ||||
| :class:`wlauto.core.workload.WorkloadMetric` objects. This list will be | ||||
| populated by the workload itself and may also be updated by instrumentation | ||||
| (e.g. to add power measurements).  Once the result object has been fully | ||||
| populated, it will be passed into the ``process_iteration_result`` method of | ||||
| :class:`ResultProcessor`. Once the entire run has completed, a list containing | ||||
| result objects from all iterations will be passed into ``process_results`` | ||||
| method of :class`ResultProcessor`. | ||||
|  | ||||
| Which result processors will be active is defined by the ``result_processors`` | ||||
| list in the ``~/.workload_automation/config.py``. Only the result_processors | ||||
| who's names appear in this list will be used. | ||||
|  | ||||
| A :class:`ResultsManager`  keeps track of active results processors. | ||||
|  | ||||
| """ | ||||
| import logging | ||||
| import traceback | ||||
| from copy import copy | ||||
| from contextlib import contextmanager | ||||
| from datetime import datetime | ||||
|  | ||||
| from wlauto.core.extension import Extension | ||||
| from wlauto.exceptions import WAError | ||||
| from wlauto.utils.types import numeric | ||||
| from wlauto.utils.misc import enum_metaclass | ||||
|  | ||||
|  | ||||
| class ResultManager(object): | ||||
|     """ | ||||
|     Keeps track of result processors and passes on the results onto the individual processors. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self): | ||||
|         self.logger = logging.getLogger('ResultsManager') | ||||
|         self.processors = [] | ||||
|         self._bad = [] | ||||
|  | ||||
|     def install(self, processor): | ||||
|         self.logger.debug('Installing results processor %s', processor.name) | ||||
|         self.processors.append(processor) | ||||
|  | ||||
|     def uninstall(self, processor): | ||||
|         if processor in self.processors: | ||||
|             self.logger.debug('Uninstalling results processor %s', processor.name) | ||||
|             self.processors.remove(processor) | ||||
|         else: | ||||
|             self.logger.warning('Attempting to uninstall results processor %s, which is not installed.', | ||||
|                                 processor.name) | ||||
|  | ||||
|     def initialize(self, context): | ||||
|         # Errors aren't handled at this stage, because this gets executed | ||||
|         # before workload execution starts and we just want to propagte them | ||||
|         # and terminate (so that error can be corrected and WA restarted). | ||||
|         for processor in self.processors: | ||||
|             processor.initialize(context) | ||||
|  | ||||
|     def add_result(self, result, context): | ||||
|         with self._manage_processors(context): | ||||
|             for processor in self.processors: | ||||
|                 with self._handle_errors(processor): | ||||
|                     processor.process_iteration_result(result, context) | ||||
|             for processor in self.processors: | ||||
|                 with self._handle_errors(processor): | ||||
|                     processor.export_iteration_result(result, context) | ||||
|  | ||||
|     def process_run_result(self, result, context): | ||||
|         with self._manage_processors(context): | ||||
|             for processor in self.processors: | ||||
|                 with self._handle_errors(processor): | ||||
|                     processor.process_run_result(result, context) | ||||
|             for processor in self.processors: | ||||
|                 with self._handle_errors(processor): | ||||
|                     processor.export_run_result(result, context) | ||||
|  | ||||
|     def finalize(self, context): | ||||
|         with self._manage_processors(context): | ||||
|             for processor in self.processors: | ||||
|                 with self._handle_errors(processor): | ||||
|                     processor.finalize(context) | ||||
|  | ||||
|     def validate(self): | ||||
|         for processor in self.processors: | ||||
|             processor.validate() | ||||
|  | ||||
|     @contextmanager | ||||
|     def _manage_processors(self, context, finalize_bad=True): | ||||
|         yield | ||||
|         for processor in self._bad: | ||||
|             if finalize_bad: | ||||
|                 processor.finalize(context) | ||||
|             self.uninstall(processor) | ||||
|         self._bad = [] | ||||
|  | ||||
|     @contextmanager | ||||
|     def _handle_errors(self, processor): | ||||
|         try: | ||||
|             yield | ||||
|         except KeyboardInterrupt, e: | ||||
|             raise e | ||||
|         except WAError, we: | ||||
|             self.logger.error('"{}" result processor has encountered an error'.format(processor.name)) | ||||
|             self.logger.error('{}("{}")'.format(we.__class__.__name__, we.message)) | ||||
|             self._bad.append(processor) | ||||
|         except Exception, e:  # pylint: disable=W0703 | ||||
|             self.logger.error('"{}" result processor has encountered an error'.format(processor.name)) | ||||
|             self.logger.error('{}("{}")'.format(e.__class__.__name__, e)) | ||||
|             self.logger.error(traceback.format_exc()) | ||||
|             self._bad.append(processor) | ||||
|  | ||||
|  | ||||
| class ResultProcessor(Extension): | ||||
|     """ | ||||
|     Base class for result processors. Defines an interface that should be implemented | ||||
|     by the subclasses. A result processor can be used to do any kind of post-processing | ||||
|     of the results, from writing them out to a file, to uploading them to a database, | ||||
|     performing calculations, generating plots, etc. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def initialize(self, context): | ||||
|         pass | ||||
|  | ||||
|     def process_iteration_result(self, result, context): | ||||
|         pass | ||||
|  | ||||
|     def export_iteration_result(self, result, context): | ||||
|         pass | ||||
|  | ||||
|     def process_run_result(self, result, context): | ||||
|         pass | ||||
|  | ||||
|     def export_run_result(self, result, context): | ||||
|         pass | ||||
|  | ||||
|     def finalize(self, context): | ||||
|         pass | ||||
|  | ||||
|  | ||||
| class RunResult(object): | ||||
|     """ | ||||
|     Contains overall results for a run. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     __metaclass__ = enum_metaclass('values', return_name=True) | ||||
|  | ||||
|     values = [ | ||||
|         'OK', | ||||
|         'OKISH', | ||||
|         'PARTIAL', | ||||
|         'FAILED', | ||||
|         'UNKNOWN', | ||||
|     ] | ||||
|  | ||||
|     @property | ||||
|     def status(self): | ||||
|         if not self.iteration_results or all([s.status == IterationResult.FAILED for s in self.iteration_results]): | ||||
|             return self.FAILED | ||||
|         elif any([s.status == IterationResult.FAILED for s in self.iteration_results]): | ||||
|             return self.PARTIAL | ||||
|         elif any([s.status == IterationResult.ABORTED for s in self.iteration_results]): | ||||
|             return self.PARTIAL | ||||
|         elif (any([s.status == IterationResult.PARTIAL for s in self.iteration_results]) or | ||||
|                 self.non_iteration_errors): | ||||
|             return self.OKISH | ||||
|         elif all([s.status == IterationResult.OK for s in self.iteration_results]): | ||||
|             return self.OK | ||||
|         else: | ||||
|             return self.UNKNOWN  # should never happen | ||||
|  | ||||
|     def __init__(self, run_info): | ||||
|         self.info = run_info | ||||
|         self.iteration_results = [] | ||||
|         self.artifacts = [] | ||||
|         self.events = [] | ||||
|         self.non_iteration_errors = False | ||||
|  | ||||
|  | ||||
| class RunEvent(object): | ||||
|     """ | ||||
|     An event that occured during a run. | ||||
|  | ||||
|     """ | ||||
|     def __init__(self, message): | ||||
|         self.timestamp = datetime.utcnow() | ||||
|         self.message = message | ||||
|  | ||||
|     def to_dict(self): | ||||
|         return copy(self.__dict__) | ||||
|  | ||||
|     def __str__(self): | ||||
|         return '{} {}'.format(self.timestamp, self.message) | ||||
|  | ||||
|     __repr__ = __str__ | ||||
|  | ||||
|  | ||||
| class IterationResult(object): | ||||
|     """ | ||||
|     Contains the result of running a single iteration of a workload. It is the | ||||
|     responsibility of a workload to instantiate a IterationResult, populate it, | ||||
|     and return it form its get_result() method. | ||||
|  | ||||
|     Status explanations: | ||||
|  | ||||
|        :NOT_STARTED: This iteration has not yet started. | ||||
|        :RUNNING: This iteration is currently running and no errors have been detected. | ||||
|        :OK: This iteration has completed and no errors have been detected | ||||
|        :PARTIAL: One or more instruments have failed (the iteration may still be running). | ||||
|        :FAILED: The workload itself has failed. | ||||
|        :ABORTED: The user interupted the workload | ||||
|        :SKIPPED: The iteration was skipped due to a previous failure | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     __metaclass__ = enum_metaclass('values', return_name=True) | ||||
|  | ||||
|     values = [ | ||||
|         'NOT_STARTED', | ||||
|         'RUNNING', | ||||
|  | ||||
|         'OK', | ||||
|         'NONCRITICAL', | ||||
|         'PARTIAL', | ||||
|         'FAILED', | ||||
|         'ABORTED', | ||||
|         'SKIPPED', | ||||
|     ] | ||||
|  | ||||
|     def __init__(self, spec): | ||||
|         self.spec = spec | ||||
|         self.id = spec.id | ||||
|         self.workload = spec.workload | ||||
|         self.iteration = None | ||||
|         self.status = self.NOT_STARTED | ||||
|         self.events = [] | ||||
|         self.metrics = [] | ||||
|         self.artifacts = [] | ||||
|  | ||||
|     def add_metric(self, name, value, units=None, lower_is_better=False): | ||||
|         self.metrics.append(Metric(name, value, units, lower_is_better)) | ||||
|  | ||||
|     def has_metric(self, name): | ||||
|         for metric in self.metrics: | ||||
|             if metric.name == name: | ||||
|                 return True | ||||
|         return False | ||||
|  | ||||
|     def add_event(self, message): | ||||
|         self.events.append(RunEvent(message)) | ||||
|  | ||||
|     def to_dict(self): | ||||
|         d = copy(self.__dict__) | ||||
|         d['events'] = [e.to_dict() for e in self.events] | ||||
|         return d | ||||
|  | ||||
|     def __iter__(self): | ||||
|         return iter(self.metrics) | ||||
|  | ||||
|     def __getitem__(self, name): | ||||
|         for metric in self.metrics: | ||||
|             if metric.name == name: | ||||
|                 return metric | ||||
|         raise KeyError('Metric {} not found.'.format(name)) | ||||
|  | ||||
|  | ||||
| class Metric(object): | ||||
|     """ | ||||
|     This is a single metric collected from executing a workload. | ||||
|  | ||||
|     :param name: the name of the metric. Uniquely identifies the metric | ||||
|                  within the results. | ||||
|     :param value: The numerical value of the metric for this execution of | ||||
|                   a workload. This can be either an int or a float. | ||||
|     :param units: Units for the collected value. Can be None if the value | ||||
|                   has no units (e.g. it's a count or a standardised score). | ||||
|     :param lower_is_better: Boolean flag indicating where lower values are | ||||
|                             better than higher ones. Defaults to False. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, name, value, units=None, lower_is_better=False): | ||||
|         self.name = name | ||||
|         self.value = numeric(value) | ||||
|         self.units = units | ||||
|         self.lower_is_better = lower_is_better | ||||
|  | ||||
|     def to_dict(self): | ||||
|         return self.__dict__ | ||||
|  | ||||
|     def __str__(self): | ||||
|         result = '{}: {}'.format(self.name, self.value) | ||||
|         if self.units: | ||||
|             result += ' ' + self.units | ||||
|         result += ' ({})'.format('-' if self.lower_is_better else '+') | ||||
|         return '<{}>'.format(result) | ||||
|  | ||||
|     __repr__ = __str__ | ||||
|  | ||||
							
								
								
									
										189
									
								
								wlauto/core/signal.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										189
									
								
								wlauto/core/signal.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,189 @@ | ||||
| #    Copyright 2013-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
| """ | ||||
| This module wraps louie signalling mechanism. It relies on modified version of loiue | ||||
| that has prioritization added to handler invocation. | ||||
|  | ||||
| """ | ||||
| from louie import dispatcher  # pylint: disable=F0401 | ||||
|  | ||||
|  | ||||
| class Signal(object): | ||||
|     """ | ||||
|     This class implements the signals to be used for notifiying callbacks | ||||
|     registered to respond to different states and stages of the execution of workload | ||||
|     automation. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     def __init__(self, name, invert_priority=False): | ||||
|         """ | ||||
|         Instantiates a Signal. | ||||
|  | ||||
|             :param name: name is the identifier of the Signal object. Signal instances with | ||||
|                         the same name refer to the same execution stage/stage. | ||||
|             :param invert_priority: boolean parameter that determines whether multiple | ||||
|                                     callbacks for the same signal should be ordered with | ||||
|                                     ascending or descending priorities. Typically this flag | ||||
|                                     should be set to True if the Signal is triggered AFTER an | ||||
|                                     a state/stage has been reached. That way callbacks with high | ||||
|                                     priorities will be called right after the event has occured. | ||||
|         """ | ||||
|         self.name = name | ||||
|         self.invert_priority = invert_priority | ||||
|  | ||||
|     def __str__(self): | ||||
|         return self.name | ||||
|  | ||||
|     __repr__ = __str__ | ||||
|  | ||||
|     def __hash__(self): | ||||
|         return id(self.name) | ||||
|  | ||||
|  | ||||
| # These are paired events -- if the before_event is sent, the after_ signal is | ||||
| # guaranteed to also be sent. In particular, the after_ signals will be sent | ||||
| # even if there is an error, so you cannot assume in the handler that the | ||||
| # device has booted successfully. In most cases, you should instead use the | ||||
| # non-paired signals below. | ||||
| BEFORE_FLASHING = Signal('before-flashing-signal', invert_priority=True) | ||||
| SUCCESSFUL_FLASHING = Signal('successful-flashing-signal') | ||||
| AFTER_FLASHING = Signal('after-flashing-signal') | ||||
|  | ||||
| BEFORE_BOOT = Signal('before-boot-signal', invert_priority=True) | ||||
| SUCCESSFUL_BOOT = Signal('successful-boot-signal') | ||||
| AFTER_BOOT = Signal('after-boot-signal') | ||||
|  | ||||
| BEFORE_INITIAL_BOOT = Signal('before-initial-boot-signal', invert_priority=True) | ||||
| SUCCESSFUL_INITIAL_BOOT = Signal('successful-initial-boot-signal') | ||||
| AFTER_INITIAL_BOOT = Signal('after-initial-boot-signal') | ||||
|  | ||||
| BEFORE_FIRST_ITERATION_BOOT = Signal('before-first-iteration-boot-signal', invert_priority=True) | ||||
| SUCCESSFUL_FIRST_ITERATION_BOOT = Signal('successful-first-iteration-boot-signal') | ||||
| AFTER_FIRST_ITERATION_BOOT = Signal('after-first-iteration-boot-signal') | ||||
|  | ||||
| BEFORE_WORKLOAD_SETUP = Signal('before-workload-setup-signal', invert_priority=True) | ||||
| SUCCESSFUL_WORKLOAD_SETUP = Signal('successful-workload-setup-signal') | ||||
| AFTER_WORKLOAD_SETUP = Signal('after-workload-setup-signal') | ||||
|  | ||||
| BEFORE_WORKLOAD_EXECUTION = Signal('before-workload-execution-signal', invert_priority=True) | ||||
| SUCCESSFUL_WORKLOAD_EXECUTION = Signal('successful-workload-execution-signal') | ||||
| AFTER_WORKLOAD_EXECUTION = Signal('after-workload-execution-signal') | ||||
|  | ||||
| BEFORE_WORKLOAD_RESULT_UPDATE = Signal('before-iteration-result-update-signal', invert_priority=True) | ||||
| SUCCESSFUL_WORKLOAD_RESULT_UPDATE = Signal('successful-iteration-result-update-signal') | ||||
| AFTER_WORKLOAD_RESULT_UPDATE = Signal('after-iteration-result-update-signal') | ||||
|  | ||||
| BEFORE_WORKLOAD_TEARDOWN = Signal('before-workload-teardown-signal', invert_priority=True) | ||||
| SUCCESSFUL_WORKLOAD_TEARDOWN = Signal('successful-workload-teardown-signal') | ||||
| AFTER_WORKLOAD_TEARDOWN = Signal('after-workload-teardown-signal') | ||||
|  | ||||
| BEFORE_OVERALL_RESULTS_PROCESSING = Signal('before-overall-results-process-signal', invert_priority=True) | ||||
| SUCCESSFUL_OVERALL_RESULTS_PROCESSING = Signal('successful-overall-results-process-signal') | ||||
| AFTER_OVERALL_RESULTS_PROCESSING = Signal('after-overall-results-process-signal') | ||||
|  | ||||
| # These are the not-paired signals; they are emitted independently. E.g. the | ||||
| # fact that RUN_START was emitted does not mean run end will be. | ||||
| RUN_START = Signal('start-signal', invert_priority=True) | ||||
| RUN_END = Signal('end-signal') | ||||
| WORKLOAD_SPEC_START = Signal('workload-spec-start-signal', invert_priority=True) | ||||
| WORKLOAD_SPEC_END = Signal('workload-spec-end-signal') | ||||
| ITERATION_START = Signal('iteration-start-signal', invert_priority=True) | ||||
| ITERATION_END = Signal('iteration-end-signal') | ||||
|  | ||||
| RUN_INIT = Signal('run-init-signal') | ||||
| SPEC_INIT = Signal('spec-init-signal') | ||||
| ITERATION_INIT = Signal('iteration-init-signal') | ||||
|  | ||||
| RUN_FIN = Signal('run-fin-signal') | ||||
|  | ||||
| # These signals are used by the LoggerFilter to tell about logging events | ||||
| ERROR_LOGGED = Signal('error_logged') | ||||
| WARNING_LOGGED = Signal('warning_logged') | ||||
|  | ||||
|  | ||||
| def connect(handler, signal, sender=dispatcher.Any, priority=0): | ||||
|     """ | ||||
|     Connects a callback to a signal, so that the callback will be automatically invoked | ||||
|     when that signal is sent. | ||||
|  | ||||
|     Parameters: | ||||
|  | ||||
|         :handler: This can be any callable that that takes the right arguments for | ||||
|                   the signal. For most siginals this means a single argument that | ||||
|                   will be an ``ExecutionContext`` instance. But please see documentaion | ||||
|                   for individual signals in the :ref:`signals reference <instrumentation_method_map>`. | ||||
|         :signal: The signal to which the hanlder will be subscribed. Please see | ||||
|                  :ref:`signals reference <instrumentation_method_map>` for the list of standard WA | ||||
|                  signals. | ||||
|  | ||||
|                  .. note:: There is nothing that prevents instrumentation from sending their | ||||
|                            own signals that are not part of the standard set. However the signal | ||||
|                            must always be an :class:`wlauto.core.signal.Signal` instance. | ||||
|  | ||||
|         :sender: The handler will be invoked only for the signals emitted by this sender. By | ||||
|                  default, this is set to :class:`louie.dispatcher.Any`, so the handler will | ||||
|                  be invoked for signals from any sentder. | ||||
|         :priority: An integer (positive or negative) the specifies the priority of the handler. | ||||
|                    Handlers with higher priority will be called before handlers with lower | ||||
|                    priority. The  call order of handlers with the same priority is not specified. | ||||
|                    Defaults to 0. | ||||
|  | ||||
|                    .. note:: Priorities for some signals are inverted (so highest priority | ||||
|                              handlers get executed last). Please see :ref:`signals reference <instrumentation_method_map>` | ||||
|                              for details. | ||||
|  | ||||
|     """ | ||||
|     if signal.invert_priority: | ||||
|         dispatcher.connect(handler, signal, sender, priority=-priority)  # pylint: disable=E1123 | ||||
|     else: | ||||
|         dispatcher.connect(handler, signal, sender, priority=priority)  # pylint: disable=E1123 | ||||
|  | ||||
|  | ||||
| def disconnect(handler, signal, sender=dispatcher.Any): | ||||
|     """ | ||||
|     Disconnect a previously connected handler form the specified signal, optionally, only | ||||
|     for the specified sender. | ||||
|  | ||||
|     Parameters: | ||||
|  | ||||
|         :handler: The callback to be disconnected. | ||||
|         :signal: The signal the handler is to be disconnected form. It will | ||||
|                  be an :class:`wlauto.core.signal.Signal` instance. | ||||
|         :sender: If specified, the handler will only be disconnected from the signal | ||||
|                 sent by this sender. | ||||
|  | ||||
|     """ | ||||
|     dispatcher.disconnect(handler, signal, sender) | ||||
|  | ||||
|  | ||||
| def send(signal, sender, *args, **kwargs): | ||||
|     """ | ||||
|     Sends a signal, causing connected handlers to be invoked. | ||||
|  | ||||
|     Paramters: | ||||
|  | ||||
|         :signal: Signal to be sent. This must be an instance of :class:`wlauto.core.signal.Signal` | ||||
|                  or its subclasses. | ||||
|         :sender: The sender of the signal (typically, this would be ``self``). Some handlers may only | ||||
|                  be subscribed to signals from a particular sender. | ||||
|  | ||||
|         The rest of the parameters will be passed on as aruments to the handler. | ||||
|  | ||||
|     """ | ||||
|     dispatcher.send(signal, sender, *args, **kwargs) | ||||
|  | ||||
							
								
								
									
										26
									
								
								wlauto/core/version.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								wlauto/core/version.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,26 @@ | ||||
| #    Copyright 2014-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
| from collections import namedtuple | ||||
|  | ||||
| VersionTuple = namedtuple('Version', ['major', 'minor', 'revision']) | ||||
|  | ||||
| version = VersionTuple(2, 3, 0) | ||||
|  | ||||
|  | ||||
| def get_wa_version(): | ||||
|     version_string = '{}.{}.{}'.format(version.major, version.minor, version.revision) | ||||
|     return version_string | ||||
							
								
								
									
										94
									
								
								wlauto/core/workload.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										94
									
								
								wlauto/core/workload.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,94 @@ | ||||
| #    Copyright 2014-2015 ARM Limited | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| # | ||||
|  | ||||
|  | ||||
| """ | ||||
| A workload is the unit of execution. It represents a set of activities are are performed | ||||
| and measured together, as well as the necessary setup and teardown procedures. A single | ||||
| execution of a workload produces one :class:`wlauto.core.result.WorkloadResult` that is populated with zero or more | ||||
| :class:`wlauto.core.result.WorkloadMetric`\ s and/or | ||||
| :class:`wlauto.core.result.Artifact`\s by the workload and active instrumentation. | ||||
|  | ||||
| """ | ||||
| from wlauto.core.extension import Extension | ||||
| from wlauto.exceptions import WorkloadError | ||||
|  | ||||
|  | ||||
| class Workload(Extension): | ||||
|     """ | ||||
|     This is the base class for the workloads executed by the framework. | ||||
|     Each of the methods throwing NotImplementedError *must* be implemented | ||||
|     by the derived classes. | ||||
|  | ||||
|     """ | ||||
|  | ||||
|     supported_devices = [] | ||||
|     supported_platforms = [] | ||||
|     summary_metrics = [] | ||||
|  | ||||
|     def __init__(self, device, **kwargs): | ||||
|         """ | ||||
|         Creates a new Workload. | ||||
|  | ||||
|         :param device: the Device on which the workload will be executed. | ||||
|         """ | ||||
|         super(Workload, self).__init__(**kwargs) | ||||
|         if self.supported_devices and device.name not in self.supported_devices: | ||||
|             raise WorkloadError('Workload {} does not support device {}'.format(self.name, device.name)) | ||||
|         if self.supported_platforms and device.platform not in self.supported_platforms: | ||||
|             raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.platform)) | ||||
|         self.device = device | ||||
|  | ||||
|     def init_resources(self, context): | ||||
|         """ | ||||
|         May be optionally overridden by concrete instances in order to discover and initialise | ||||
|         necessary resources. This method will be invoked at most once during the execution: | ||||
|         before running any workloads, and before invocation of ``validate()``, but after it is | ||||
|         clear that this workload will run (i.e. this method will not be invoked for workloads | ||||
|         that have been discovered but have not been scheduled run in the agenda). | ||||
|  | ||||
|         """ | ||||
|         pass | ||||
|  | ||||
|     def setup(self, context): | ||||
|         """ | ||||
|         Perform the setup necessary to run the workload, such as copying the necessry files | ||||
|         to the device, configuring the environments, etc. | ||||
|  | ||||
|         This is also the place to perform any on-device checks prior to attempting to execute | ||||
|         the workload. | ||||
|  | ||||
|         """ | ||||
|         pass | ||||
|  | ||||
|     def run(self, context): | ||||
|         """Execute the workload. This is the method that performs the actual "work" of the""" | ||||
|         pass | ||||
|  | ||||
|     def update_result(self, context): | ||||
|         """ | ||||
|         Update the result within the specified execution context with the metrics | ||||
|         form this workload iteration. | ||||
|  | ||||
|         """ | ||||
|         pass | ||||
|  | ||||
|     def teardown(self, context): | ||||
|         """ Perform any final clean up for the Workload. """ | ||||
|         pass | ||||
|  | ||||
|     def __str__(self): | ||||
|         return '<Workload {}>'.format(self.name) | ||||
|  | ||||
		Reference in New Issue
	
	Block a user