mirror of
				https://github.com/ARM-software/workload-automation.git
				synced 2025-10-31 15:12:25 +00:00 
			
		
		
		
	removing old files
Removing old and unsused files: - wa/framework/old_output.py: superseded by output.py in the same dir - the entire wlauto tree: replaced by wa/ tree; it's stale by now anyway. - log.py and actor.py from framework/ as neither is used.
This commit is contained in:
		| @@ -1,31 +0,0 @@ | |||||||
| import uuid |  | ||||||
| import logging |  | ||||||
|  |  | ||||||
| from wa.framework import pluginloader |  | ||||||
| from wa.framework.plugin import Plugin |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class JobActor(Plugin): |  | ||||||
|  |  | ||||||
|     kind = 'job_actor' |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def run(self): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def restart(self): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def complete(self): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def finalize(self): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class NullJobActor(JobActor): |  | ||||||
|  |  | ||||||
|     name = 'null-job-actor' |  | ||||||
|  |  | ||||||
| @@ -1,306 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # pylint: disable=E1101 |  | ||||||
| import logging |  | ||||||
| import string |  | ||||||
| import threading |  | ||||||
| import subprocess |  | ||||||
|  |  | ||||||
| import colorama |  | ||||||
|  |  | ||||||
| from wa.framework import signal |  | ||||||
| from wa.framework.exception import WAError |  | ||||||
| from wa.utils.misc import get_traceback |  | ||||||
|  |  | ||||||
|  |  | ||||||
| COLOR_MAP = { |  | ||||||
|     logging.DEBUG: colorama.Fore.BLUE, |  | ||||||
|     logging.INFO: colorama.Fore.GREEN, |  | ||||||
|     logging.WARNING: colorama.Fore.YELLOW, |  | ||||||
|     logging.ERROR: colorama.Fore.RED, |  | ||||||
|     logging.CRITICAL: colorama.Style.BRIGHT + colorama.Fore.RED, |  | ||||||
| } |  | ||||||
|  |  | ||||||
| RESET_COLOR = colorama.Style.RESET_ALL |  | ||||||
|  |  | ||||||
| _indent_level = 0 |  | ||||||
| _indent_width = 4 |  | ||||||
| _console_handler = None |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def init(verbosity=logging.INFO, color=True, indent_with=4, |  | ||||||
|          regular_fmt='%(levelname)-8s %(message)s', |  | ||||||
|          verbose_fmt='%(asctime)s %(levelname)-8s %(name)-10.10s: %(message)s', |  | ||||||
|          debug=False): |  | ||||||
|     global _indent_width, _console_handler |  | ||||||
|     _indent_width = indent_with |  | ||||||
|     signal.log_error_func = lambda m: log_error(m, signal.logger) |  | ||||||
|  |  | ||||||
|     root_logger = logging.getLogger() |  | ||||||
|     root_logger.setLevel(logging.DEBUG) |  | ||||||
|  |  | ||||||
|     error_handler = ErrorSignalHandler(logging.DEBUG) |  | ||||||
|     root_logger.addHandler(error_handler) |  | ||||||
|  |  | ||||||
|     _console_handler = logging.StreamHandler() |  | ||||||
|     if color: |  | ||||||
|         formatter = ColorFormatter |  | ||||||
|     else: |  | ||||||
|         formatter = LineFormatter |  | ||||||
|     if verbosity: |  | ||||||
|         _console_handler.setLevel(logging.DEBUG) |  | ||||||
|         _console_handler.setFormatter(formatter(verbose_fmt)) |  | ||||||
|     else: |  | ||||||
|         _console_handler.setLevel(logging.INFO) |  | ||||||
|         _console_handler.setFormatter(formatter(regular_fmt)) |  | ||||||
|     root_logger.addHandler(_console_handler) |  | ||||||
|     logging.basicConfig(level=logging.DEBUG) |  | ||||||
|     if not debug: |  | ||||||
|         logging.raiseExceptions = False |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def set_level(level): |  | ||||||
|     _console_handler.setLevel(level) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def add_file(filepath, level=logging.DEBUG, |  | ||||||
|              fmt='%(asctime)s %(levelname)-8s %(name)s: %(message)-10.10s'): |  | ||||||
|     root_logger = logging.getLogger() |  | ||||||
|     file_handler = logging.FileHandler(filepath) |  | ||||||
|     file_handler.setLevel(level) |  | ||||||
|     file_handler.setFormatter(LineFormatter(fmt)) |  | ||||||
|     root_logger.addHandler(file_handler) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def enable(logs): |  | ||||||
|     if isinstance(logs, list): |  | ||||||
|         for log in logs: |  | ||||||
|             __enable_logger(log) |  | ||||||
|     else: |  | ||||||
|         __enable_logger(logs) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def disable(logs): |  | ||||||
|     if isinstance(logs, list): |  | ||||||
|         for log in logs: |  | ||||||
|             __disable_logger(log) |  | ||||||
|     else: |  | ||||||
|         __disable_logger(logs) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def __enable_logger(logger): |  | ||||||
|     if isinstance(logger, basestring): |  | ||||||
|         logger = logging.getLogger(logger) |  | ||||||
|     logger.propagate = True |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def __disable_logger(logger): |  | ||||||
|     if isinstance(logger, basestring): |  | ||||||
|         logger = logging.getLogger(logger) |  | ||||||
|     logger.propagate = False |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def indent(): |  | ||||||
|     global _indent_level |  | ||||||
|     _indent_level += 1 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def dedent(): |  | ||||||
|     global _indent_level |  | ||||||
|     _indent_level -= 1 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def log_error(e, logger, critical=False): |  | ||||||
|     """ |  | ||||||
|     Log the specified Exception as an error. The Error message will be formatted |  | ||||||
|     differently depending on the nature of the exception. |  | ||||||
|  |  | ||||||
|     :e: the error to log. should be an instance of ``Exception`` |  | ||||||
|     :logger: logger to be used. |  | ||||||
|     :critical: if ``True``,  this error will be logged at ``logging.CRITICAL``  |  | ||||||
|                level, otherwise it will be logged as ``logging.ERROR``. |  | ||||||
|      |  | ||||||
|     """ |  | ||||||
|     if critical: |  | ||||||
|         log_func = logger.critical |  | ||||||
|     else: |  | ||||||
|         log_func = logger.error |  | ||||||
|  |  | ||||||
|     if isinstance(e, KeyboardInterrupt): |  | ||||||
|         log_func('Got CTRL-C. Aborting.') |  | ||||||
|     elif isinstance(e, WAError): |  | ||||||
|         log_func(e) |  | ||||||
|     elif isinstance(e, subprocess.CalledProcessError): |  | ||||||
|         tb = get_traceback() |  | ||||||
|         log_func(tb) |  | ||||||
|         command = e.cmd |  | ||||||
|         if e.args: |  | ||||||
|             command = '{} {}'.format(command, ' '.join(e.args)) |  | ||||||
|         message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n' |  | ||||||
|         log_func(message.format(command, e.returncode, e.output)) |  | ||||||
|     elif isinstance(e, SyntaxError): |  | ||||||
|         tb = get_traceback() |  | ||||||
|         log_func(tb) |  | ||||||
|         message = 'Syntax Error in {}, line {}, offset {}:' |  | ||||||
|         log_func(message.format(e.filename, e.lineno, e.offset)) |  | ||||||
|         log_func('\t{}'.format(e.msg)) |  | ||||||
|     else: |  | ||||||
|         tb = get_traceback() |  | ||||||
|         log_func(tb) |  | ||||||
|         log_func('{}({})'.format(e.__class__.__name__, e)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ErrorSignalHandler(logging.Handler): |  | ||||||
|     """ |  | ||||||
|     Emits signals for ERROR and WARNING level traces. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def emit(self, record): |  | ||||||
|         if record.levelno == logging.ERROR: |  | ||||||
|             signal.send(signal.ERROR_LOGGED, self) |  | ||||||
|         elif record.levelno == logging.WARNING: |  | ||||||
|             signal.send(signal.WARNING_LOGGED, self) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class LineFormatter(logging.Formatter): |  | ||||||
|     """ |  | ||||||
|     Logs each line of the message separately. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def format(self, record): |  | ||||||
|         record.message = record.getMessage() |  | ||||||
|         if self.usesTime(): |  | ||||||
|             record.asctime = self.formatTime(record, self.datefmt) |  | ||||||
|  |  | ||||||
|         indent = _indent_width * _indent_level |  | ||||||
|         d = record.__dict__ |  | ||||||
|         parts = [] |  | ||||||
|         for line in record.message.split('\n'): |  | ||||||
|             line = ' ' * indent + line |  | ||||||
|             d.update({'message': line.strip('\r')}) |  | ||||||
|             parts.append(self._fmt % d) |  | ||||||
|  |  | ||||||
|         return '\n'.join(parts) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ColorFormatter(LineFormatter): |  | ||||||
|     """ |  | ||||||
|     Formats logging records with color and prepends record info |  | ||||||
|     to each line of the message. |  | ||||||
|  |  | ||||||
|         BLUE for DEBUG logging level |  | ||||||
|         GREEN for INFO logging level |  | ||||||
|         YELLOW for WARNING logging level |  | ||||||
|         RED for ERROR logging level |  | ||||||
|         BOLD RED for CRITICAL logging level |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, fmt=None, datefmt=None): |  | ||||||
|         super(ColorFormatter, self).__init__(fmt, datefmt) |  | ||||||
|         template_text = self._fmt.replace('%(message)s', RESET_COLOR + '%(message)s${color}') |  | ||||||
|         template_text = '${color}' + template_text + RESET_COLOR |  | ||||||
|         self.fmt_template = string.Template(template_text) |  | ||||||
|  |  | ||||||
|     def format(self, record): |  | ||||||
|         self._set_color(COLOR_MAP[record.levelno]) |  | ||||||
|         return super(ColorFormatter, self).format(record) |  | ||||||
|  |  | ||||||
|     def _set_color(self, color): |  | ||||||
|         self._fmt = self.fmt_template.substitute(color=color) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class BaseLogWriter(object): |  | ||||||
|  |  | ||||||
|     def __init__(self, name, level=logging.DEBUG): |  | ||||||
|         """ |  | ||||||
|         File-like object class designed to be used for logging from streams |  | ||||||
|         Each complete line (terminated by new line character) gets logged |  | ||||||
|         at DEBUG level. In complete lines are buffered until the next new line. |  | ||||||
|  |  | ||||||
|         :param name: The name of the logger that will be used. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         self.logger = logging.getLogger(name) |  | ||||||
|         self.buffer = '' |  | ||||||
|         if level == logging.DEBUG: |  | ||||||
|             self.do_write = self.logger.debug |  | ||||||
|         elif level == logging.INFO: |  | ||||||
|             self.do_write = self.logger.info |  | ||||||
|         elif level == logging.WARNING: |  | ||||||
|             self.do_write = self.logger.warning |  | ||||||
|         elif level == logging.ERROR: |  | ||||||
|             self.do_write = self.logger.error |  | ||||||
|         else: |  | ||||||
|             raise Exception('Unknown logging level: {}'.format(level)) |  | ||||||
|  |  | ||||||
|     def flush(self): |  | ||||||
|         # Defined to match the interface expected by pexpect. |  | ||||||
|         return self |  | ||||||
|  |  | ||||||
|     def close(self): |  | ||||||
|         if self.buffer: |  | ||||||
|             self.logger.debug(self.buffer) |  | ||||||
|             self.buffer = '' |  | ||||||
|         return self |  | ||||||
|  |  | ||||||
|     def __del__(self): |  | ||||||
|         # Ensure we don't lose bufferd output |  | ||||||
|         self.close() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class LogWriter(BaseLogWriter): |  | ||||||
|  |  | ||||||
|     def write(self, data): |  | ||||||
|         data = data.replace('\r\n', '\n').replace('\r', '\n') |  | ||||||
|         if '\n' in data: |  | ||||||
|             parts = data.split('\n') |  | ||||||
|             parts[0] = self.buffer + parts[0] |  | ||||||
|             for part in parts[:-1]: |  | ||||||
|                 self.do_write(part) |  | ||||||
|             self.buffer = parts[-1] |  | ||||||
|         else: |  | ||||||
|             self.buffer += data |  | ||||||
|         return self |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class LineLogWriter(BaseLogWriter): |  | ||||||
|  |  | ||||||
|     def write(self, data): |  | ||||||
|         self.do_write(data) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class StreamLogger(threading.Thread): |  | ||||||
|     """ |  | ||||||
|     Logs output from a stream in a thread. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, name, stream, level=logging.DEBUG, klass=LogWriter): |  | ||||||
|         super(StreamLogger, self).__init__() |  | ||||||
|         self.writer = klass(name, level) |  | ||||||
|         self.stream = stream |  | ||||||
|         self.daemon = True |  | ||||||
|  |  | ||||||
|     def run(self): |  | ||||||
|         line = self.stream.readline() |  | ||||||
|         while line: |  | ||||||
|             self.writer.write(line.rstrip('\n')) |  | ||||||
|             line = self.stream.readline() |  | ||||||
|         self.writer.close() |  | ||||||
| @@ -1,362 +0,0 @@ | |||||||
| import os |  | ||||||
| import shutil |  | ||||||
| import logging |  | ||||||
| import uuid |  | ||||||
| from copy import copy |  | ||||||
| from datetime import datetime, timedelta |  | ||||||
|  |  | ||||||
| from wa.framework import signal, log |  | ||||||
| from wa.framework.configuration.core import merge_config_values |  | ||||||
| from wa.utils import serializer |  | ||||||
| from wa.utils.misc import enum_metaclass, ensure_directory_exists as _d |  | ||||||
| from wa.utils.types import numeric |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Status(object): |  | ||||||
|  |  | ||||||
|     __metaclass__ = enum_metaclass('values', return_name=True) |  | ||||||
|  |  | ||||||
|     values = [ |  | ||||||
|         'NEW', |  | ||||||
|         'PENDING', |  | ||||||
|         'RUNNING', |  | ||||||
|         'COMPLETE', |  | ||||||
|         'OK', |  | ||||||
|         'OKISH', |  | ||||||
|         'NONCRITICAL', |  | ||||||
|         'PARTIAL', |  | ||||||
|         'FAILED', |  | ||||||
|         'ABORTED', |  | ||||||
|         'SKIPPED', |  | ||||||
|         'UNKNOWN', |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class WAOutput(object): |  | ||||||
|  |  | ||||||
|     basename = '.wa-output' |  | ||||||
|  |  | ||||||
|     @classmethod |  | ||||||
|     def load(cls, source): |  | ||||||
|         if os.path.isfile(source): |  | ||||||
|             pod = serializer.load(source) |  | ||||||
|         elif os.path.isdir(source): |  | ||||||
|             pod = serializer.load(os.path.join(source, cls.basename)) |  | ||||||
|         else: |  | ||||||
|             message = 'Cannot load {} from {}' |  | ||||||
|             raise ValueError(message.format(cls.__name__, source)) |  | ||||||
|         return cls.from_pod(pod) |  | ||||||
|  |  | ||||||
|     @classmethod |  | ||||||
|     def from_pod(cls, pod): |  | ||||||
|         instance = cls(pod['output_directory']) |  | ||||||
|         instance.status = pod['status'] |  | ||||||
|         instance.metrics = [Metric.from_pod(m) for m in pod['metrics']] |  | ||||||
|         instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']] |  | ||||||
|         instance.events = [RunEvent.from_pod(e) for e in pod['events']] |  | ||||||
|         instance.classifiers = pod['classifiers'] |  | ||||||
|         return instance |  | ||||||
|  |  | ||||||
|     def __init__(self, output_directory): |  | ||||||
|         self.logger = logging.getLogger('output') |  | ||||||
|         self.output_directory = output_directory |  | ||||||
|         self.status = Status.UNKNOWN |  | ||||||
|         self.classifiers = {} |  | ||||||
|         self.metrics = [] |  | ||||||
|         self.artifacts = [] |  | ||||||
|         self.events = [] |  | ||||||
|          |  | ||||||
|     def initialize(self, overwrite=False): |  | ||||||
|         if os.path.exists(self.output_directory): |  | ||||||
|             if not overwrite: |  | ||||||
|                 raise RuntimeError('"{}" already exists.'.format(self.output_directory)) |  | ||||||
|             self.logger.info('Removing existing output directory.') |  | ||||||
|             shutil.rmtree(self.output_directory) |  | ||||||
|         self.logger.debug('Creating output directory {}'.format(self.output_directory)) |  | ||||||
|         os.makedirs(self.output_directory) |  | ||||||
|  |  | ||||||
|     def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None): |  | ||||||
|         classifiers = merge_config_values(self.classifiers, classifiers or {}) |  | ||||||
|         self.metrics.append(Metric(name, value, units, lower_is_better, classifiers)) |  | ||||||
|  |  | ||||||
|     def add_artifact(self, name, path, kind, *args, **kwargs): |  | ||||||
|         path = _check_artifact_path(path, self.output_directory) |  | ||||||
|         self.artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs)) |  | ||||||
|  |  | ||||||
|     def get_path(self, subpath): |  | ||||||
|         return os.path.join(self.output_directory, subpath) |  | ||||||
|  |  | ||||||
|     def to_pod(self): |  | ||||||
|         return { |  | ||||||
|             'output_directory': self.output_directory, |  | ||||||
|             'status': self.status, |  | ||||||
|             'metrics': [m.to_pod() for m in self.metrics], |  | ||||||
|             'artifacts': [a.to_pod() for a in self.artifacts], |  | ||||||
|             'events': [e.to_pod() for e in self.events], |  | ||||||
|             'classifiers': copy(self.classifiers), |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|     def persist(self): |  | ||||||
|         statefile = os.path.join(self.output_directory, self.basename) |  | ||||||
|         with open(statefile, 'wb') as wfh: |  | ||||||
|             serializer.dump(self, wfh) |  | ||||||
|          |  | ||||||
|  |  | ||||||
| class RunInfo(object): |  | ||||||
|  |  | ||||||
|     default_name_format = 'wa-run-%y%m%d-%H%M%S' |  | ||||||
|  |  | ||||||
|     def __init__(self, project=None, project_stage=None, name=None): |  | ||||||
|         self.uuid = uuid.uuid4() |  | ||||||
|         self.project = project |  | ||||||
|         self.project_stage = project_stage |  | ||||||
|         self.name = name or datetime.now().strftime(self.default_name_format) |  | ||||||
|         self.start_time = None |  | ||||||
|         self.end_time = None |  | ||||||
|         self.duration = None |  | ||||||
|  |  | ||||||
|     @staticmethod |  | ||||||
|     def from_pod(pod): |  | ||||||
|         instance = RunInfo() |  | ||||||
|         instance.uuid = uuid.UUID(pod['uuid']) |  | ||||||
|         instance.project = pod['project'] |  | ||||||
|         instance.project_stage = pod['project_stage'] |  | ||||||
|         instance.name = pod['name'] |  | ||||||
|         instance.start_time = pod['start_time'] |  | ||||||
|         instance.end_time = pod['end_time'] |  | ||||||
|         instance.duration = timedelta(seconds=pod['duration']) |  | ||||||
|         return instance |  | ||||||
|  |  | ||||||
|     def to_pod(self): |  | ||||||
|         d = copy(self.__dict__) |  | ||||||
|         d['uuid'] = str(self.uuid) |  | ||||||
|         d['duration'] = self.duration.days * 3600 * 24 + self.duration.seconds |  | ||||||
|         return d |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class RunOutput(WAOutput): |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def info_directory(self): |  | ||||||
|         return _d(os.path.join(self.output_directory, '_info')) |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def config_directory(self): |  | ||||||
|         return _d(os.path.join(self.output_directory, '_config')) |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def failed_directory(self): |  | ||||||
|         return _d(os.path.join(self.output_directory, '_failed')) |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def log_file(self): |  | ||||||
|         return os.path.join(self.output_directory, 'run.log') |  | ||||||
|  |  | ||||||
|     @classmethod |  | ||||||
|     def from_pod(cls, pod): |  | ||||||
|         instance = WAOutput.from_pod(pod) |  | ||||||
|         instance.info = RunInfo.from_pod(pod['info']) |  | ||||||
|         instance.jobs = [JobOutput.from_pod(i) for i in pod['jobs']] |  | ||||||
|         instance.failed = [JobOutput.from_pod(i) for i in pod['failed']] |  | ||||||
|         return instance |  | ||||||
|  |  | ||||||
|     def __init__(self, output_directory): |  | ||||||
|         super(RunOutput, self).__init__(output_directory) |  | ||||||
|         self.logger = logging.getLogger('output') |  | ||||||
|         self.info = RunInfo() |  | ||||||
|         self.jobs = [] |  | ||||||
|         self.failed = [] |  | ||||||
|  |  | ||||||
|     def initialize(self, overwrite=False): |  | ||||||
|         super(RunOutput, self).initialize(overwrite) |  | ||||||
|         log.add_file(self.log_file) |  | ||||||
|         self.add_artifact('runlog', self.log_file,  'log') |  | ||||||
|  |  | ||||||
|     def create_job_output(self, id): |  | ||||||
|         outdir = os.path.join(self.output_directory, id) |  | ||||||
|         job_output = JobOutput(outdir) |  | ||||||
|         self.jobs.append(job_output) |  | ||||||
|         return job_output |  | ||||||
|  |  | ||||||
|     def move_failed(self, job_output): |  | ||||||
|         basename = os.path.basename(job_output.output_directory) |  | ||||||
|         i = 1 |  | ||||||
|         dest = os.path.join(self.failed_directory, basename + '-{}'.format(i)) |  | ||||||
|         while os.path.exists(dest): |  | ||||||
|             i += 1 |  | ||||||
|             dest = '{}-{}'.format(dest[:-2], i) |  | ||||||
|         shutil.move(job_output.output_directory, dest) |  | ||||||
|  |  | ||||||
|     def to_pod(self): |  | ||||||
|         pod = super(RunOutput, self).to_pod() |  | ||||||
|         pod['info'] = self.info.to_pod() |  | ||||||
|         pod['jobs'] = [i.to_pod() for i in self.jobs] |  | ||||||
|         pod['failed'] = [i.to_pod() for i in self.failed] |  | ||||||
|         return pod |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class JobOutput(WAOutput): |  | ||||||
|  |  | ||||||
|     def add_artifact(self, name, path, kind, *args, **kwargs): |  | ||||||
|         path = _check_artifact_path(path, self.output_directory) |  | ||||||
|         self.artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Artifact(object): |  | ||||||
|     """ |  | ||||||
|     This is an artifact generated during execution/post-processing of a workload. |  | ||||||
|     Unlike metrics, this represents an actual artifact, such as a file, generated. |  | ||||||
|     This may be "result", such as trace, or it could be "meta data" such as logs. |  | ||||||
|     These are distinguished using the ``kind`` attribute, which also helps WA decide |  | ||||||
|     how it should be handled. Currently supported kinds are: |  | ||||||
|  |  | ||||||
|         :log: A log file. Not part of "results" as such but contains information about the |  | ||||||
|               run/workload execution that be useful for diagnostics/meta analysis. |  | ||||||
|         :meta: A file containing metadata. This is not part of "results", but contains |  | ||||||
|                information that may be necessary to reproduce the results (contrast with |  | ||||||
|                ``log`` artifacts which are *not* necessary). |  | ||||||
|         :data: This file contains new data, not available otherwise and should be considered |  | ||||||
|                part of the "results" generated by WA. Most traces would fall into this category. |  | ||||||
|         :export: Exported version of results or some other artifact. This signifies that |  | ||||||
|                  this artifact does not contain any new data that is not available |  | ||||||
|                  elsewhere and that it may be safely discarded without losing information. |  | ||||||
|         :raw: Signifies that this is a raw dump/log that is normally processed to extract |  | ||||||
|               useful information and is then discarded. In a sense, it is the opposite of |  | ||||||
|               ``export``, but in general may also be discarded. |  | ||||||
|  |  | ||||||
|               .. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on |  | ||||||
|                         how important it is to preserve this file, e.g. when archiving, vs |  | ||||||
|                         how much space it takes up. Unlike ``export`` artifacts which are |  | ||||||
|                         (almost) always ignored by other exporters as that would never result |  | ||||||
|                         in data loss, ``raw`` files *may* be processed by exporters if they |  | ||||||
|                         decided that the risk of losing potentially (though unlikely) useful |  | ||||||
|                         data is greater than the time/space cost of handling the artifact (e.g. |  | ||||||
|                         a database uploader may choose to ignore ``raw`` artifacts, where as a |  | ||||||
|                         network filer archiver may choose to archive them). |  | ||||||
|  |  | ||||||
|         .. note: The kind parameter is intended to represent the logical function of a particular |  | ||||||
|                  artifact, not it's intended means of processing -- this is left entirely up to the |  | ||||||
|                  result processors. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     RUN = 'run' |  | ||||||
|     ITERATION = 'iteration' |  | ||||||
|  |  | ||||||
|     valid_kinds = ['log', 'meta', 'data', 'export', 'raw'] |  | ||||||
|  |  | ||||||
|     @staticmethod |  | ||||||
|     def from_pod(pod): |  | ||||||
|         return Artifact(**pod) |  | ||||||
|  |  | ||||||
|     def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None): |  | ||||||
|         """" |  | ||||||
|         :param name: Name that uniquely identifies this artifact. |  | ||||||
|         :param path: The *relative* path of the artifact. Depending on the ``level`` |  | ||||||
|                      must be either relative to the run or iteration output directory. |  | ||||||
|                      Note: this path *must* be delimited using ``/`` irrespective of the |  | ||||||
|                      operating system. |  | ||||||
|         :param kind: The type of the artifact this is (e.g. log file, result, etc.) this |  | ||||||
|                      will be used a hit to result processors. This must be one of ``'log'``, |  | ||||||
|                      ``'meta'``, ``'data'``, ``'export'``, ``'raw'``. |  | ||||||
|         :param level: The level at which the artifact will be generated. Must be either |  | ||||||
|                       ``'iteration'`` or ``'run'``. |  | ||||||
|         :param mandatory: Boolean value indicating whether this artifact must be present |  | ||||||
|                           at the end of result processing for its level. |  | ||||||
|         :param description: A free-form description of what this artifact is. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         if kind not in self.valid_kinds: |  | ||||||
|             raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds)) |  | ||||||
|         self.name = name |  | ||||||
|         self.path = path.replace('/', os.sep) if path is not None else path |  | ||||||
|         self.kind = kind |  | ||||||
|         self.level = level |  | ||||||
|         self.mandatory = mandatory |  | ||||||
|         self.description = description |  | ||||||
|  |  | ||||||
|     def exists(self, context): |  | ||||||
|         """Returns ``True`` if artifact exists within the specified context, and |  | ||||||
|         ``False`` otherwise.""" |  | ||||||
|         fullpath = os.path.join(context.output_directory, self.path) |  | ||||||
|         return os.path.exists(fullpath) |  | ||||||
|  |  | ||||||
|     def to_pod(self): |  | ||||||
|         return copy(self.__dict__) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class RunEvent(object): |  | ||||||
|     """ |  | ||||||
|     An event that occured during a run. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     @staticmethod |  | ||||||
|     def from_pod(pod): |  | ||||||
|         instance = RunEvent(pod['message']) |  | ||||||
|         instance.timestamp = pod['timestamp'] |  | ||||||
|         return instance |  | ||||||
|  |  | ||||||
|     def __init__(self, message): |  | ||||||
|         self.timestamp = datetime.utcnow() |  | ||||||
|         self.message = message |  | ||||||
|  |  | ||||||
|     def to_pod(self): |  | ||||||
|         return copy(self.__dict__) |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '{} {}'.format(self.timestamp, self.message) |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Metric(object): |  | ||||||
|     """ |  | ||||||
|     This is a single metric collected from executing a workload. |  | ||||||
|  |  | ||||||
|     :param name: the name of the metric. Uniquely identifies the metric |  | ||||||
|                  within the results. |  | ||||||
|     :param value: The numerical value of the metric for this execution of |  | ||||||
|                   a workload. This can be either an int or a float. |  | ||||||
|     :param units: Units for the collected value. Can be None if the value |  | ||||||
|                   has no units (e.g. it's a count or a standardised score). |  | ||||||
|     :param lower_is_better: Boolean flag indicating where lower values are |  | ||||||
|                             better than higher ones. Defaults to False. |  | ||||||
|     :param classifiers: A set of key-value pairs to further classify this metric |  | ||||||
|                         beyond current iteration (e.g. this can be used to identify |  | ||||||
|                         sub-tests). |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     @staticmethod |  | ||||||
|     def from_pod(pod): |  | ||||||
|         return Metric(**pod) |  | ||||||
|  |  | ||||||
|     def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None): |  | ||||||
|         self.name = name |  | ||||||
|         self.value = numeric(value) |  | ||||||
|         self.units = units |  | ||||||
|         self.lower_is_better = lower_is_better |  | ||||||
|         self.classifiers = classifiers or {} |  | ||||||
|  |  | ||||||
|     def to_pod(self): |  | ||||||
|         return copy(self.__dict__) |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         result = '{}: {}'.format(self.name, self.value) |  | ||||||
|         if self.units: |  | ||||||
|             result += ' ' + self.units |  | ||||||
|         result += ' ({})'.format('-' if self.lower_is_better else '+') |  | ||||||
|         return '<{}>'.format(result) |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _check_artifact_path(path, rootpath): |  | ||||||
|     if path.startswith(rootpath): |  | ||||||
|         return os.path.abspath(path) |  | ||||||
|     rootpath = os.path.abspath(rootpath) |  | ||||||
|     full_path = os.path.join(rootpath, path) |  | ||||||
|     if not os.path.isfile(full_path): |  | ||||||
|         raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path)) |  | ||||||
|     return full_path |  | ||||||
| @@ -1,80 +0,0 @@ | |||||||
| import string |  | ||||||
| from copy import copy |  | ||||||
|  |  | ||||||
| from devlib import Platform, AndroidTarget |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class TargetInfo(object): |  | ||||||
|  |  | ||||||
|     @staticmethod |  | ||||||
|     def from_pod(pod): |  | ||||||
|         instance = TargetInfo() |  | ||||||
|         instance.target = pod['target'] |  | ||||||
|         instance.abi = pod['abi'] |  | ||||||
|         instance.cpuinfo = Cpuinfo(pod['cpuinfo']) |  | ||||||
|         instance.os = pod['os'] |  | ||||||
|         instance.os_version = pod['os_version'] |  | ||||||
|         instance.abi = pod['abi'] |  | ||||||
|         instance.is_rooted = pod['is_rooted'] |  | ||||||
|         instance.kernel_version = KernelVersion(pod['kernel_release'],  |  | ||||||
|                                                 pod['kernel_version']) |  | ||||||
|         instance.kernel_config = KernelConfig(pod['kernel_config']) |  | ||||||
|  |  | ||||||
|         if pod["target"] == "AndroidTarget": |  | ||||||
|             instance.screen_resolution = pod['screen_resolution'] |  | ||||||
|             instance.prop = pod['prop'] |  | ||||||
|             instance.prop = pod['android_id'] |  | ||||||
|  |  | ||||||
|         return instance |  | ||||||
|  |  | ||||||
|     def __init__(self, target=None): |  | ||||||
|         if target: |  | ||||||
|             self.target = target.__class__.__name__ |  | ||||||
|             self.cpuinfo = target.cpuinfo |  | ||||||
|             self.os = target.os |  | ||||||
|             self.os_version = target.os_version |  | ||||||
|             self.abi = target.abi |  | ||||||
|             self.is_rooted = target.is_rooted |  | ||||||
|             self.kernel_version = target.kernel_version |  | ||||||
|             self.kernel_config = target.config |  | ||||||
|  |  | ||||||
|             if isinstance(target, AndroidTarget): |  | ||||||
|                 self.screen_resolution = target.screen_resolution |  | ||||||
|                 self.prop = target.getprop() |  | ||||||
|                 self.android_id = target.android_id |  | ||||||
|  |  | ||||||
|         else: |  | ||||||
|             self.target = None |  | ||||||
|             self.cpuinfo = None |  | ||||||
|             self.os = None |  | ||||||
|             self.os_version = None |  | ||||||
|             self.abi = None |  | ||||||
|             self.is_rooted = None |  | ||||||
|             self.kernel_version = None |  | ||||||
|             self.kernel_config = None |  | ||||||
|  |  | ||||||
|             if isinstance(target, AndroidTarget): |  | ||||||
|                 self.screen_resolution = None |  | ||||||
|                 self.prop = None |  | ||||||
|                 self.android_id = None |  | ||||||
|  |  | ||||||
|     def to_pod(self): |  | ||||||
|         pod = {} |  | ||||||
|         pod['target'] = self.target |  | ||||||
|         pod['abi'] = self.abi |  | ||||||
|         pod['cpuinfo'] = self.cpuinfo.sections |  | ||||||
|         pod['os'] = self.os |  | ||||||
|         pod['os_version'] = self.os_version |  | ||||||
|         pod['abi'] = self.abi |  | ||||||
|         pod['is_rooted'] = self.is_rooted |  | ||||||
|         pod['kernel_release'] = self.kernel_version.release |  | ||||||
|         pod['kernel_version'] = self.kernel_version.version |  | ||||||
|         pod['kernel_config'] = dict(self.kernel_config.iteritems()) |  | ||||||
|  |  | ||||||
|         if self.target == "AndroidTarget": |  | ||||||
|             pod['screen_resolution'] = self.screen_resolution |  | ||||||
|             pod['prop'] = self.prop |  | ||||||
|             pod['android_id'] = self.android_id |  | ||||||
|  |  | ||||||
|         return pod |  | ||||||
|  |  | ||||||
| @@ -1,35 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
| from wlauto.core.configuration import settings  # NOQA |  | ||||||
| from wlauto.core.device_manager import DeviceManager  # NOQA |  | ||||||
| from wlauto.core.command import Command  # NOQA |  | ||||||
| from wlauto.core.workload import Workload  # NOQA |  | ||||||
| from wlauto.core.plugin import Artifact, Alias  # NOQA |  | ||||||
| from wlauto.core.configuration.configuration import ConfigurationPoint as Parameter |  | ||||||
| import wlauto.core.pluginloader as PluginLoader  # NOQA |  | ||||||
| from wlauto.core.instrumentation import Instrument  # NOQA |  | ||||||
| from wlauto.core.result import ResultProcessor, IterationResult  # NOQA |  | ||||||
| from wlauto.core.resource import ResourceGetter, Resource, GetterPriority, NO_ONE  # NOQA |  | ||||||
| from wlauto.core.exttype import get_plugin_type  # NOQA Note: MUST be imported after other core imports. |  | ||||||
|  |  | ||||||
| from wlauto.common.resources import File, PluginAsset, Executable |  | ||||||
| from wlauto.common.android.resources import ApkFile, JarFile |  | ||||||
| from wlauto.common.android.workload import (UiAutomatorWorkload, ApkWorkload, AndroidBenchmark,  # NOQA |  | ||||||
|                                     AndroidUiAutoBenchmark, GameWorkload)  # NOQA |  | ||||||
|  |  | ||||||
| from wlauto.core.version import get_wa_version |  | ||||||
|  |  | ||||||
| __version__ = get_wa_version() |  | ||||||
| @@ -1,79 +0,0 @@ | |||||||
| # This agenda specifies configuration that may be used for regression runs  |  | ||||||
| # on big.LITTLE systems. This agenda will with a TC2 device configured as  |  | ||||||
| # described in the documentation. |  | ||||||
| config: |  | ||||||
|         device: tc2 |  | ||||||
|         run_name: big.LITTLE_regression |  | ||||||
| global:  |  | ||||||
|         iterations: 5 |  | ||||||
| sections: |  | ||||||
|         - id: mp_a15only |  | ||||||
|           boot_parameters: |  | ||||||
|                 os_mode: mp_a15_only |  | ||||||
|           runtime_parameters: |  | ||||||
|                 a15_governor: interactive |  | ||||||
|                 a15_governor_tunables: |  | ||||||
|                         above_hispeed_delay: 20000 |  | ||||||
|         - id: mp_a7bc |  | ||||||
|           boot_parameters: |  | ||||||
|                 os_mode: mp_a7_bootcluster |  | ||||||
|           runtime_parameters: |  | ||||||
|                 a7_governor: interactive |  | ||||||
|                 a7_min_frequency: 500000 |  | ||||||
|                 a7_governor_tunables: |  | ||||||
|                         above_hispeed_delay: 20000 |  | ||||||
|                 a15_governor: interactive |  | ||||||
|                 a15_governor_tunables: |  | ||||||
|                         above_hispeed_delay: 20000 |  | ||||||
|         - id: mp_a15bc |  | ||||||
|           boot_parameters: |  | ||||||
|                 os_mode: mp_a15_bootcluster |  | ||||||
|           runtime_parameters: |  | ||||||
|                 a7_governor: interactive |  | ||||||
|                 a7_min_frequency: 500000 |  | ||||||
|                 a7_governor_tunables: |  | ||||||
|                         above_hispeed_delay: 20000 |  | ||||||
|                 a15_governor: interactive |  | ||||||
|                 a15_governor_tunables: |  | ||||||
|                         above_hispeed_delay: 20000 |  | ||||||
| workloads: |  | ||||||
|         - id: b01 |  | ||||||
|           name: andebench |  | ||||||
|           workload_parameters: |  | ||||||
|                 number_of_threads: 5 |  | ||||||
|         - id: b02 |  | ||||||
|           name: andebench |  | ||||||
|           label: andebenchst |  | ||||||
|           workload_parameters: |  | ||||||
|                 number_of_threads: 1 |  | ||||||
|         - id: b03 |  | ||||||
|           name: antutu |  | ||||||
|           label: antutu4.0.3 |  | ||||||
|           workload_parameters: |  | ||||||
|                 version: 4.0.3 |  | ||||||
|         - id: b04 |  | ||||||
|           name: benchmarkpi |  | ||||||
|         - id: b05 |  | ||||||
|           name: caffeinemark |  | ||||||
|         - id: b06 |  | ||||||
|           name: cfbench |  | ||||||
|         - id: b07 |  | ||||||
|           name: geekbench |  | ||||||
|           label: geekbench3 |  | ||||||
|           workload_parameters: |  | ||||||
|                 version: 3 |  | ||||||
|         - id: b08 |  | ||||||
|           name: linpack |  | ||||||
|         - id: b09 |  | ||||||
|           name: quadrant |  | ||||||
|         - id: b10 |  | ||||||
|           name: smartbench |  | ||||||
|         - id: b11 |  | ||||||
|           name: sqlite |  | ||||||
|         - id: b12 |  | ||||||
|           name: vellamo |  | ||||||
|  |  | ||||||
|         - id: w01 |  | ||||||
|           name: bbench_with_audio |  | ||||||
|         - id: w02 |  | ||||||
|           name: audio |  | ||||||
| @@ -1,43 +0,0 @@ | |||||||
| # This an agenda that is built-up during the explantion of the agenda features |  | ||||||
| # in the documentation. This should work out-of-the box on most rooted Android  |  | ||||||
| # devices. |  | ||||||
| config: |  | ||||||
|         project: governor_comparison |  | ||||||
|         run_name: performance_vs_interactive |  | ||||||
|  |  | ||||||
|         device: generic_android |  | ||||||
|         reboot_policy: never |  | ||||||
|  |  | ||||||
|         instrumentation: [coreutil, cpufreq] |  | ||||||
|         coreutil: |  | ||||||
|                 threshold: 80 |  | ||||||
|         sysfs_extractor: |  | ||||||
|                 paths: [/proc/meminfo] |  | ||||||
|         result_processors: [sqlite] |  | ||||||
|         sqlite: |  | ||||||
|                 database: ~/my_wa_results.sqlite |  | ||||||
| global: |  | ||||||
|         iterations: 5 |  | ||||||
| sections: |  | ||||||
|         - id: perf |  | ||||||
|           runtime_params: |  | ||||||
|                 sysfile_values: |  | ||||||
|                         /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance |  | ||||||
|         - id: inter |  | ||||||
|           runtime_params: |  | ||||||
|                 sysfile_values: |  | ||||||
|                         /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive |  | ||||||
| workloads: |  | ||||||
|         - id: 01_dhry |  | ||||||
|           name: dhrystone |  | ||||||
|           label: dhrystone_15over6 |  | ||||||
|           workload_params: |  | ||||||
|                 threads: 6 |  | ||||||
|                 mloops: 15 |  | ||||||
|         - id: 02_memc |  | ||||||
|           name: memcpy |  | ||||||
|           instrumentation: [sysfs_extractor] |  | ||||||
|         - id: 03_cycl |  | ||||||
|           name: cyclictest |  | ||||||
|           iterations: 10 |  | ||||||
|  |  | ||||||
| @@ -1,16 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -1,400 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import stat |  | ||||||
| import string |  | ||||||
| import textwrap |  | ||||||
| import argparse |  | ||||||
| import shutil |  | ||||||
| import getpass |  | ||||||
| import subprocess |  | ||||||
| from collections import OrderedDict |  | ||||||
|  |  | ||||||
| import yaml |  | ||||||
|  |  | ||||||
| from wlauto import PluginLoader, Command, settings |  | ||||||
| from wlauto.exceptions import CommandError, ConfigError |  | ||||||
| from wlauto.core.command import init_argument_parser |  | ||||||
| from wlauto.utils.misc import (capitalize, check_output, |  | ||||||
|                                ensure_file_directory_exists as _f, ensure_directory_exists as _d) |  | ||||||
| from wlauto.utils.types import identifier |  | ||||||
| from wlauto.utils.doc import format_body |  | ||||||
|  |  | ||||||
|  |  | ||||||
| __all__ = ['create_workload'] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates') |  | ||||||
|  |  | ||||||
| UIAUTO_BUILD_SCRIPT = """#!/bin/bash |  | ||||||
|  |  | ||||||
| class_dir=bin/classes/com/arm/wlauto/uiauto |  | ||||||
| base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"` |  | ||||||
| mkdir -p $$class_dir |  | ||||||
| cp $$base_class $$class_dir |  | ||||||
|  |  | ||||||
| ant build |  | ||||||
|  |  | ||||||
| if [[ -f bin/${package_name}.jar ]]; then |  | ||||||
|     cp bin/${package_name}.jar .. |  | ||||||
| fi |  | ||||||
| """ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CreateSubcommand(object): |  | ||||||
|  |  | ||||||
|     name = None |  | ||||||
|     help = None |  | ||||||
|     usage = None |  | ||||||
|     description = None |  | ||||||
|     epilog = None |  | ||||||
|     formatter_class = None |  | ||||||
|  |  | ||||||
|     def __init__(self, logger, subparsers): |  | ||||||
|         self.logger = logger |  | ||||||
|         self.group = subparsers |  | ||||||
|         parser_params = dict(help=(self.help or self.description), usage=self.usage, |  | ||||||
|                              description=format_body(textwrap.dedent(self.description), 80), |  | ||||||
|                              epilog=self.epilog) |  | ||||||
|         if self.formatter_class: |  | ||||||
|             parser_params['formatter_class'] = self.formatter_class |  | ||||||
|         self.parser = subparsers.add_parser(self.name, **parser_params) |  | ||||||
|         init_argument_parser(self.parser)  # propagate top-level options |  | ||||||
|         self.initialize() |  | ||||||
|  |  | ||||||
|     def initialize(self): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CreateWorkloadSubcommand(CreateSubcommand): |  | ||||||
|  |  | ||||||
|     name = 'workload' |  | ||||||
|     description = '''Create a new workload. By default, a basic workload template will be |  | ||||||
|                      used but you can use options to specify a different template.''' |  | ||||||
|  |  | ||||||
|     def initialize(self): |  | ||||||
|         self.parser.add_argument('name', metavar='NAME', |  | ||||||
|                                  help='Name of the workload to be created') |  | ||||||
|         self.parser.add_argument('-p', '--path', metavar='PATH', default=None, |  | ||||||
|                                  help='The location at which the workload will be created. If not specified, ' + |  | ||||||
|                                       'this defaults to "~/.workload_automation/workloads".') |  | ||||||
|         self.parser.add_argument('-f', '--force', action='store_true', |  | ||||||
|                                  help='Create the new workload even if a workload with the specified ' + |  | ||||||
|                                       'name already exists.') |  | ||||||
|  |  | ||||||
|         template_group = self.parser.add_mutually_exclusive_group() |  | ||||||
|         template_group.add_argument('-A', '--android-benchmark', action='store_true', |  | ||||||
|                                     help='Use android benchmark template. This template allows you to specify ' + |  | ||||||
|                                          ' an APK file that will be installed and run on the device. You should ' + |  | ||||||
|                                          ' place the APK file into the workload\'s directory at the same level ' + |  | ||||||
|                                          'as the __init__.py.') |  | ||||||
|         template_group.add_argument('-U', '--ui-automation', action='store_true', |  | ||||||
|                                     help='Use UI automation template. This template generates a UI automation ' + |  | ||||||
|                                          'Android project as well as the Python class. This a more general ' + |  | ||||||
|                                          'version of the android benchmark template that makes no assumptions ' + |  | ||||||
|                                          'about the nature of your workload, apart from the fact that you need ' + |  | ||||||
|                                          'UI automation. If you need to install an APK, start an app on device, ' + |  | ||||||
|                                          'etc., you will need to do that explicitly in your code.') |  | ||||||
|         template_group.add_argument('-B', '--android-uiauto-benchmark', action='store_true', |  | ||||||
|                                     help='Use android uiauto benchmark template. This generates a UI automation ' + |  | ||||||
|                                          'project as well as a Python class. This template should be used ' + |  | ||||||
|                                          'if you have a APK file that needs to be run on the device. You ' + |  | ||||||
|                                          'should place the APK file into the workload\'s directory at the ' + |  | ||||||
|                                          'same level as the __init__.py.') |  | ||||||
|  |  | ||||||
|     def execute(self, state, args):  # pylint: disable=R0201 |  | ||||||
|         where = args.path or 'local' |  | ||||||
|         check_name = not args.force |  | ||||||
|  |  | ||||||
|         if args.android_benchmark: |  | ||||||
|             kind = 'android' |  | ||||||
|         elif args.ui_automation: |  | ||||||
|             kind = 'uiauto' |  | ||||||
|         elif args.android_uiauto_benchmark: |  | ||||||
|             kind = 'android_uiauto' |  | ||||||
|         else: |  | ||||||
|             kind = 'basic' |  | ||||||
|  |  | ||||||
|         try: |  | ||||||
|             create_workload(args.name, kind, where, check_name) |  | ||||||
|         except CommandError, e: |  | ||||||
|             print "ERROR:", e |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CreatePackageSubcommand(CreateSubcommand): |  | ||||||
|  |  | ||||||
|     name = 'package' |  | ||||||
|     description = '''Create a new empty Python package for WA plugins. On installation, |  | ||||||
|                      this package will "advertise" itself to WA so that Plugins with in it will |  | ||||||
|                      be loaded by WA when it runs.''' |  | ||||||
|  |  | ||||||
|     def initialize(self): |  | ||||||
|         self.parser.add_argument('name', metavar='NAME', |  | ||||||
|                                  help='Name of the package to be created') |  | ||||||
|         self.parser.add_argument('-p', '--path', metavar='PATH', default=None, |  | ||||||
|                                  help='The location at which the new pacakge will be created. If not specified, ' + |  | ||||||
|                                       'current working directory will be used.') |  | ||||||
|         self.parser.add_argument('-f', '--force', action='store_true', |  | ||||||
|                                  help='Create the new package even if a file or directory with the same name ' |  | ||||||
|                                       'already exists at the specified location.') |  | ||||||
|  |  | ||||||
|     def execute(self, args):  # pylint: disable=R0201 |  | ||||||
|         package_dir = args.path or os.path.abspath('.') |  | ||||||
|         template_path = os.path.join(TEMPLATES_DIR, 'setup.template') |  | ||||||
|         self.create_plugins_package(package_dir, args.name, template_path, args.force) |  | ||||||
|  |  | ||||||
|     def create_plugins_package(self, location, name, setup_template_path, overwrite=False): |  | ||||||
|         package_path = os.path.join(location, name) |  | ||||||
|         if os.path.exists(package_path): |  | ||||||
|             if overwrite: |  | ||||||
|                 self.logger.info('overwriting existing "{}"'.format(package_path)) |  | ||||||
|                 shutil.rmtree(package_path) |  | ||||||
|             else: |  | ||||||
|                 raise CommandError('Location "{}" already exists.'.format(package_path)) |  | ||||||
|         actual_package_path = os.path.join(package_path, name) |  | ||||||
|         os.makedirs(actual_package_path) |  | ||||||
|         setup_text = render_template(setup_template_path, {'package_name': name, 'user': getpass.getuser()}) |  | ||||||
|         with open(os.path.join(package_path, 'setup.py'), 'w') as wfh: |  | ||||||
|             wfh.write(setup_text) |  | ||||||
|         touch(os.path.join(actual_package_path, '__init__.py')) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CreateAgendaSubcommand(CreateSubcommand): |  | ||||||
|  |  | ||||||
|     name = 'agenda' |  | ||||||
|     description = """ |  | ||||||
|     Create an agenda whith the specified plugins enabled. And parameters set to their |  | ||||||
|     default values. |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def initialize(self): |  | ||||||
|         self.parser.add_argument('plugins', nargs='+', |  | ||||||
|                                  help='Plugins to be added') |  | ||||||
|         self.parser.add_argument('-i', '--iterations', type=int, default=1, |  | ||||||
|                                  help='Sets the number of iterations for all workloads') |  | ||||||
|         self.parser.add_argument('-r', '--include-runtime-params', action='store_true', |  | ||||||
|                                  help=""" |  | ||||||
|                                  Adds runtime parameters to the global section of the generated |  | ||||||
|                                  agenda. Note: these do not have default values, so only name |  | ||||||
|                                  will be added. Also, runtime parameters are devices-specific, so |  | ||||||
|                                  a device must be specified (either in the list of plugins, |  | ||||||
|                                  or in the existing config). |  | ||||||
|                                  """) |  | ||||||
|         self.parser.add_argument('-o', '--output', metavar='FILE', |  | ||||||
|                                  help='Output file. If not specfied, STDOUT will be used instead.') |  | ||||||
|  |  | ||||||
|     def execute(self, args):  # pylint: disable=no-self-use,too-many-branches,too-many-statements |  | ||||||
|         loader = PluginLoader(packages=settings.plugin_packages, |  | ||||||
|                                  paths=settings.plugin_paths) |  | ||||||
|         agenda = OrderedDict() |  | ||||||
|         agenda['config'] = OrderedDict(instrumentation=[], result_processors=[]) |  | ||||||
|         agenda['global'] = OrderedDict(iterations=args.iterations) |  | ||||||
|         agenda['workloads'] = [] |  | ||||||
|         device = None |  | ||||||
|         device_config = None |  | ||||||
|         for name in args.plugins: |  | ||||||
|             extcls = loader.get_plugin_class(name) |  | ||||||
|             config = loader.get_default_config(name) |  | ||||||
|             del config['modules'] |  | ||||||
|  |  | ||||||
|             if extcls.kind == 'workload': |  | ||||||
|                 entry = OrderedDict() |  | ||||||
|                 entry['name'] = extcls.name |  | ||||||
|                 if name != extcls.name: |  | ||||||
|                     entry['label'] = name |  | ||||||
|                 entry['params'] = config |  | ||||||
|                 agenda['workloads'].append(entry) |  | ||||||
|             elif extcls.kind == 'device': |  | ||||||
|                 if device is not None: |  | ||||||
|                     raise ConfigError('Specifying multiple devices: {} and {}'.format(device.name, name)) |  | ||||||
|                 device = extcls |  | ||||||
|                 device_config = config |  | ||||||
|                 agenda['config']['device'] = name |  | ||||||
|                 agenda['config']['device_config'] = config |  | ||||||
|             else: |  | ||||||
|                 if extcls.kind == 'instrument': |  | ||||||
|                     agenda['config']['instrumentation'].append(name) |  | ||||||
|                 if extcls.kind == 'result_processor': |  | ||||||
|                     agenda['config']['result_processors'].append(name) |  | ||||||
|                 agenda['config'][name] = config |  | ||||||
|  |  | ||||||
|         if args.include_runtime_params: |  | ||||||
|             if not device: |  | ||||||
|                 if settings.device: |  | ||||||
|                     device = loader.get_plugin_class(settings.device) |  | ||||||
|                     device_config = loader.get_default_config(settings.device) |  | ||||||
|                 else: |  | ||||||
|                     raise ConfigError('-r option requires for a device to be in the list of plugins') |  | ||||||
|             rps = OrderedDict() |  | ||||||
|             for rp in device.runtime_parameters: |  | ||||||
|                 if hasattr(rp, 'get_runtime_parameters'): |  | ||||||
|                     # a core parameter needs to be expanded for each of the |  | ||||||
|                     # device's cores, if they're avialable |  | ||||||
|                     for crp in rp.get_runtime_parameters(device_config.get('core_names', [])): |  | ||||||
|                         rps[crp.name] = None |  | ||||||
|                 else: |  | ||||||
|                     rps[rp.name] = None |  | ||||||
|             agenda['global']['runtime_params'] = rps |  | ||||||
|  |  | ||||||
|         if args.output: |  | ||||||
|             wfh = open(args.output, 'w') |  | ||||||
|         else: |  | ||||||
|             wfh = sys.stdout |  | ||||||
|         yaml.dump(agenda, wfh, indent=4, default_flow_style=False) |  | ||||||
|         if args.output: |  | ||||||
|             wfh.close() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CreateCommand(Command): |  | ||||||
|  |  | ||||||
|     name = 'create' |  | ||||||
|     description = '''Used to create various WA-related objects (see positional arguments list for what |  | ||||||
|                      objects may be created).\n\nUse "wa create <object> -h" for object-specific arguments.''' |  | ||||||
|     formatter_class = argparse.RawDescriptionHelpFormatter |  | ||||||
|     subcmd_classes = [ |  | ||||||
|         CreateWorkloadSubcommand, |  | ||||||
|         CreatePackageSubcommand, |  | ||||||
|         CreateAgendaSubcommand, |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         subparsers = self.parser.add_subparsers(dest='what') |  | ||||||
|         self.subcommands = []  # pylint: disable=W0201 |  | ||||||
|         for subcmd_cls in self.subcmd_classes: |  | ||||||
|             subcmd = subcmd_cls(self.logger, subparsers) |  | ||||||
|             self.subcommands.append(subcmd) |  | ||||||
|  |  | ||||||
|     def execute(self, args): |  | ||||||
|         for subcmd in self.subcommands: |  | ||||||
|             if subcmd.name == args.what: |  | ||||||
|                 subcmd.execute(args) |  | ||||||
|                 break |  | ||||||
|         else: |  | ||||||
|             raise CommandError('Not a valid create parameter: {}'.format(args.name)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def create_workload(name, kind='basic', where='local', check_name=True, **kwargs): |  | ||||||
|     if check_name: |  | ||||||
|         extloader = PluginLoader(packages=settings.plugin_packages, paths=settings.plugin_paths) |  | ||||||
|         if name in [wl.name for wl in extloader.list_workloads()]: |  | ||||||
|             raise CommandError('Workload with name "{}" already exists.'.format(name)) |  | ||||||
|  |  | ||||||
|     class_name = get_class_name(name) |  | ||||||
|     if where == 'local': |  | ||||||
|         workload_dir = _d(os.path.join(settings.user_directory, 'workloads', name)) |  | ||||||
|     else: |  | ||||||
|         workload_dir = _d(os.path.join(where, name)) |  | ||||||
|  |  | ||||||
|     if kind == 'basic': |  | ||||||
|         create_basic_workload(workload_dir, name, class_name, **kwargs) |  | ||||||
|     elif kind == 'uiauto': |  | ||||||
|         create_uiautomator_workload(workload_dir, name, class_name, **kwargs) |  | ||||||
|     elif kind == 'android': |  | ||||||
|         create_android_benchmark(workload_dir, name, class_name, **kwargs) |  | ||||||
|     elif kind == 'android_uiauto': |  | ||||||
|         create_android_uiauto_benchmark(workload_dir, name, class_name, **kwargs) |  | ||||||
|     else: |  | ||||||
|         raise CommandError('Unknown workload type: {}'.format(kind)) |  | ||||||
|  |  | ||||||
|     print 'Workload created in {}'.format(workload_dir) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def create_basic_workload(path, name, class_name): |  | ||||||
|     source_file = os.path.join(path, '__init__.py') |  | ||||||
|     with open(source_file, 'w') as wfh: |  | ||||||
|         wfh.write(render_template('basic_workload', {'name': name, 'class_name': class_name})) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def create_uiautomator_workload(path, name, class_name): |  | ||||||
|     uiauto_path = _d(os.path.join(path, 'uiauto')) |  | ||||||
|     create_uiauto_project(uiauto_path, name) |  | ||||||
|     source_file = os.path.join(path, '__init__.py') |  | ||||||
|     with open(source_file, 'w') as wfh: |  | ||||||
|         wfh.write(render_template('uiauto_workload', {'name': name, 'class_name': class_name})) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def create_android_benchmark(path, name, class_name): |  | ||||||
|     source_file = os.path.join(path, '__init__.py') |  | ||||||
|     with open(source_file, 'w') as wfh: |  | ||||||
|         wfh.write(render_template('android_benchmark', {'name': name, 'class_name': class_name})) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def create_android_uiauto_benchmark(path, name, class_name): |  | ||||||
|     uiauto_path = _d(os.path.join(path, 'uiauto')) |  | ||||||
|     create_uiauto_project(uiauto_path, name) |  | ||||||
|     source_file = os.path.join(path, '__init__.py') |  | ||||||
|     with open(source_file, 'w') as wfh: |  | ||||||
|         wfh.write(render_template('android_uiauto_benchmark', {'name': name, 'class_name': class_name})) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def create_uiauto_project(path, name, target='1'): |  | ||||||
|     sdk_path = get_sdk_path() |  | ||||||
|     android_path = os.path.join(sdk_path, 'tools', 'android') |  | ||||||
|     package_name = 'com.arm.wlauto.uiauto.' + name.lower() |  | ||||||
|  |  | ||||||
|     # ${ANDROID_HOME}/tools/android create uitest-project -n com.arm.wlauto.uiauto.linpack -t 1 -p ../test2 |  | ||||||
|     command = '{} create uitest-project --name {} --target {} --path {}'.format(android_path, |  | ||||||
|                                                                                 package_name, |  | ||||||
|                                                                                 target, |  | ||||||
|                                                                                 path) |  | ||||||
|     try: |  | ||||||
|         check_output(command, shell=True) |  | ||||||
|     except subprocess.CalledProcessError as e: |  | ||||||
|         if 'is is not valid' in e.output: |  | ||||||
|             message = 'No Android SDK target found; have you run "{} update sdk" and download a platform?' |  | ||||||
|             raise CommandError(message.format(android_path)) |  | ||||||
|  |  | ||||||
|     build_script = os.path.join(path, 'build.sh') |  | ||||||
|     with open(build_script, 'w') as wfh: |  | ||||||
|         template = string.Template(UIAUTO_BUILD_SCRIPT) |  | ||||||
|         wfh.write(template.substitute({'package_name': package_name})) |  | ||||||
|     os.chmod(build_script, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) |  | ||||||
|  |  | ||||||
|     source_file = _f(os.path.join(path, 'src', |  | ||||||
|                                   os.sep.join(package_name.split('.')[:-1]), |  | ||||||
|                                   'UiAutomation.java')) |  | ||||||
|     with open(source_file, 'w') as wfh: |  | ||||||
|         wfh.write(render_template('UiAutomation.java', {'name': name, 'package_name': package_name})) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Utility functions |  | ||||||
|  |  | ||||||
| def get_sdk_path(): |  | ||||||
|     sdk_path = os.getenv('ANDROID_HOME') |  | ||||||
|     if not sdk_path: |  | ||||||
|         raise CommandError('Please set ANDROID_HOME environment variable to point to ' + |  | ||||||
|                            'the locaton of Android SDK') |  | ||||||
|     return sdk_path |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_class_name(name, postfix=''): |  | ||||||
|     name = identifier(name) |  | ||||||
|     return ''.join(map(capitalize, name.split('_'))) + postfix |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def render_template(name, params): |  | ||||||
|     filepath = os.path.join(TEMPLATES_DIR, name) |  | ||||||
|     with open(filepath) as fh: |  | ||||||
|         text = fh.read() |  | ||||||
|         template = string.Template(text) |  | ||||||
|         return template.substitute(params) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def touch(path): |  | ||||||
|     with open(path, 'w') as _: |  | ||||||
|         pass |  | ||||||
| @@ -1,74 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| from wlauto import PluginLoader, Command, settings |  | ||||||
| from wlauto.utils.formatter import DescriptionListFormatter |  | ||||||
| from wlauto.utils.doc import get_summary |  | ||||||
| from wlauto.core import pluginloader |  | ||||||
|  |  | ||||||
| class ListCommand(Command): |  | ||||||
|  |  | ||||||
|     name = 'list' |  | ||||||
|     description = 'List available WA plugins with a short description of each.' |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         plugin_types = ['{}s'.format(name) for name in pluginloader.kinds] |  | ||||||
|         self.parser.add_argument('kind', metavar='KIND', |  | ||||||
|                                  help=('Specify the kind of plugin to list. Must be ' |  | ||||||
|                                        'one of: {}'.format(', '.join(plugin_types))), |  | ||||||
|                                  choices=plugin_types) |  | ||||||
|         self.parser.add_argument('-n', '--name', help='Filter results by the name specified') |  | ||||||
|         self.parser.add_argument('-o', '--packaged-only', action='store_true', |  | ||||||
|                                  help=''' |  | ||||||
|                                  Only list plugins packaged with WA itself. Do not list plugins |  | ||||||
|                                  installed locally or from other packages. |  | ||||||
|                                  ''') |  | ||||||
|         self.parser.add_argument('-p', '--platform', help='Only list results that are supported by ' |  | ||||||
|                                                           'the specified platform') |  | ||||||
|  |  | ||||||
|     def execute(self, state, args): |  | ||||||
|         filters = {} |  | ||||||
|         if args.name: |  | ||||||
|             filters['name'] = args.name |  | ||||||
|  |  | ||||||
|         results = pluginloader.list_plugins(args.kind[:-1]) |  | ||||||
|         if filters or args.platform: |  | ||||||
|             filtered_results = [] |  | ||||||
|             for result in results: |  | ||||||
|                 passed = True |  | ||||||
|                 for k, v in filters.iteritems(): |  | ||||||
|                     if getattr(result, k) != v: |  | ||||||
|                         passed = False |  | ||||||
|                         break |  | ||||||
|                 if passed and args.platform: |  | ||||||
|                     passed = check_platform(result, args.platform) |  | ||||||
|                 if passed: |  | ||||||
|                     filtered_results.append(result) |  | ||||||
|         else:  # no filters specified |  | ||||||
|             filtered_results = results |  | ||||||
|  |  | ||||||
|         if filtered_results: |  | ||||||
|             output = DescriptionListFormatter() |  | ||||||
|             for result in sorted(filtered_results, key=lambda x: x.name): |  | ||||||
|                 output.add_item(get_summary(result), result.name) |  | ||||||
|             print output.format_data() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def check_platform(plugin, platform): |  | ||||||
|     supported_platforms = getattr(plugin, 'supported_platforms', []) |  | ||||||
|     if supported_platforms: |  | ||||||
|         return platform in supported_platforms |  | ||||||
|     return True |  | ||||||
| @@ -1,217 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
|  |  | ||||||
|  |  | ||||||
| from wlauto import Command, settings |  | ||||||
| from wlauto.core import pluginloader |  | ||||||
| from wlauto.common.resources import Executable |  | ||||||
| from wlauto.core.resource import NO_ONE |  | ||||||
| from wlauto.core.resolver import ResourceResolver |  | ||||||
| from wlauto.core.configuration import RunConfiguration |  | ||||||
| from wlauto.common.android.workload import ApkWorkload |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class RecordCommand(Command): |  | ||||||
|  |  | ||||||
|     name = 'record' |  | ||||||
|     description = '''Performs a revent recording |  | ||||||
|  |  | ||||||
|     This command helps making revent recordings. It will automatically |  | ||||||
|     deploy revent and even has the option of automatically opening apps. |  | ||||||
|  |  | ||||||
|     Revent allows you to record raw inputs such as screen swipes or button presses. |  | ||||||
|     This can be useful for recording inputs for workloads such as games that don't |  | ||||||
|     have XML UI layouts that can be used with UIAutomator. As a drawback from this, |  | ||||||
|     revent recordings are specific to the device type they were recorded on. |  | ||||||
|  |  | ||||||
|     WA uses two parts to the names of revent recordings in the format, |  | ||||||
|     {device_name}.{suffix}.revent. |  | ||||||
|  |  | ||||||
|      - device_name can either be specified manually with the ``-d`` argument or |  | ||||||
|        it can be automatically determined. On Android device it will be obtained |  | ||||||
|        from ``build.prop``, on Linux devices it is obtained from ``/proc/device-tree/model``. |  | ||||||
|      - suffix is used by WA to determine which part of the app execution the |  | ||||||
|        recording is for, currently these are either ``setup`` or ``run``. This |  | ||||||
|        should be specified with the ``-s`` argument. |  | ||||||
|     ''' |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         self.context = context |  | ||||||
|         self.parser.add_argument('-d', '--device', help='The name of the device') |  | ||||||
|         self.parser.add_argument('-o', '--output', help='Directory to save the recording in') |  | ||||||
|  |  | ||||||
|         # Need validation |  | ||||||
|         self.parser.add_argument('-s', '--suffix', help='The suffix of the revent file, e.g. ``setup``') |  | ||||||
|         self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it', |  | ||||||
|                                  action="store_true") |  | ||||||
|  |  | ||||||
|         group = self.parser.add_mutually_exclusive_group(required=False) |  | ||||||
|         group.add_argument('-p', '--package', help='Package to launch before recording') |  | ||||||
|         group.add_argument('-w', '--workload', help='Name of a revent workload (mostly games)') |  | ||||||
|  |  | ||||||
|     # Validate command options |  | ||||||
|     def validate_args(self, args): |  | ||||||
|         if args.clear and not (args.package or args.workload): |  | ||||||
|             self.logger.error("Package/Workload must be specified if you want to clear cache") |  | ||||||
|             self.parser.print_help() |  | ||||||
|             sys.exit() |  | ||||||
|         if args.workload and args.suffix: |  | ||||||
|             self.logger.error("cannot specify manual suffixes for workloads") |  | ||||||
|             self.parser.print_help() |  | ||||||
|             sys.exit() |  | ||||||
|         if args.suffix: |  | ||||||
|             args.suffix += "." |  | ||||||
|  |  | ||||||
|     # pylint: disable=W0201 |  | ||||||
|     def execute(self, state, args): |  | ||||||
|         self.validate_args(args) |  | ||||||
|         self.logger.info("Connecting to device...") |  | ||||||
|  |  | ||||||
|         # Setup config |  | ||||||
|         self.config = RunConfiguration(pluginloader) |  | ||||||
|         for filepath in settings.config_paths: |  | ||||||
|             self.config.load_config(filepath) |  | ||||||
|         self.config.set_agenda(Agenda()) |  | ||||||
|         self.config.finalize() |  | ||||||
|  |  | ||||||
|         # Setup device |  | ||||||
|         self.device_manager = pluginloader.get_manager(self.config.device) |  | ||||||
|         self.device_manager.validate() |  | ||||||
|         self.device_manager.connect() |  | ||||||
|         context = LightContext(self.config, self.device_manager) |  | ||||||
|         self.device_manager.initialize(context) |  | ||||||
|         self.device = self.device_manager.target |  | ||||||
|         if args.device: |  | ||||||
|             self.device_name = args.device |  | ||||||
|         else: |  | ||||||
|             self.device_name = self.device.model |  | ||||||
|  |  | ||||||
|         # Install Revent |  | ||||||
|         host_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent')) |  | ||||||
|         self.target_binary = self.device.install_if_needed(host_binary) |  | ||||||
|  |  | ||||||
|         if args.workload: |  | ||||||
|             self.workload_record(args, context) |  | ||||||
|         elif args.package: |  | ||||||
|             self.package_record(args, context) |  | ||||||
|         else: |  | ||||||
|             self.manual_record(args, context) |  | ||||||
|  |  | ||||||
|     def manual_record(self, args, context): |  | ||||||
|         revent_file = self.device.get_workpath('{}.{}revent'.format(self.device_name, args.suffix or "")) |  | ||||||
|         self._record(revent_file, "", args.output) |  | ||||||
|  |  | ||||||
|     def package_record(self, args, context): |  | ||||||
|         revent_file = self.device.get_workpath('{}.{}revent'.format(self.device_name, args.suffix or "")) |  | ||||||
|         if args.clear: |  | ||||||
|             self.device.execute("pm clear {}".format(args.package)) |  | ||||||
|  |  | ||||||
|         self.logger.info("Starting {}".format(args.package)) |  | ||||||
|         self.device.execute('monkey -p {} -c android.intent.category.LAUNCHER 1'.format(args.package)) |  | ||||||
|  |  | ||||||
|         self._record(revent_file, "", args.output) |  | ||||||
|  |  | ||||||
|     def workload_record(self, args, context): |  | ||||||
|         setup_file = self.device.get_workpath('{}.setup.revent'.format(self.device_name)) |  | ||||||
|         run_file = self.device.get_workpath('{}.run.revent'.format(self.device_name)) |  | ||||||
|  |  | ||||||
|         self.logger.info("Deploying {}".format(args.workload)) |  | ||||||
|         workload = pluginloader.get_workload(args.workload, self.device) |  | ||||||
|         workload.apk_init_resources(context) |  | ||||||
|         workload.initialize_package(context) |  | ||||||
|         workload.do_post_install(context) |  | ||||||
|         workload.start_activity() |  | ||||||
|  |  | ||||||
|         if args.clear: |  | ||||||
|             workload.reset(context) |  | ||||||
|  |  | ||||||
|         self._record(setup_file, " SETUP", |  | ||||||
|                      args.output or os.path.join(workload.dependencies_directory, 'revent_files')) |  | ||||||
|         self._record(run_file, " RUN", |  | ||||||
|                      args.output or os.path.join(workload.dependencies_directory, 'revent_files')) |  | ||||||
|  |  | ||||||
|         self.logger.info("Tearing down {}".format(args.workload)) |  | ||||||
|         workload.apk_teardown(context) |  | ||||||
|  |  | ||||||
|     def _record(self, revent_file, name, output_path): |  | ||||||
|         self.logger.info("Press Enter when you are ready to record{}...".format(name)) |  | ||||||
|         raw_input("") |  | ||||||
|         command = "{} record -t 100000 -s {}".format(self.target_binary, revent_file) |  | ||||||
|         self.device.kick_off(command) |  | ||||||
|  |  | ||||||
|         self.logger.info("Press Enter when you have finished recording {}...".format(name)) |  | ||||||
|         raw_input("") |  | ||||||
|         self.device.killall("revent") |  | ||||||
|  |  | ||||||
|         output_path = output_path or os.getcwdu() |  | ||||||
|         if not os.path.isdir(output_path): |  | ||||||
|             os.mkdirs(output_path) |  | ||||||
|  |  | ||||||
|         revent_file_name = self.device.path.basename(revent_file) |  | ||||||
|         host_path = os.path.join(output_path, revent_file_name) |  | ||||||
|         if os.path.exists(host_path): |  | ||||||
|             self.logger.info("Revent file '{}' already exists, overwrite? [y/n]".format(revent_file_name)) |  | ||||||
|             if raw_input("") == "y": |  | ||||||
|                 os.remove(host_path) |  | ||||||
|             else: |  | ||||||
|                 self.logger.warning("Did not pull and overwrite '{}'".format(revent_file_name)) |  | ||||||
|                 return |  | ||||||
|         self.logger.info("Pulling '{}' from device".format(self.device.path.basename(revent_file))) |  | ||||||
|         self.device.pull(revent_file, output_path) |  | ||||||
|  |  | ||||||
| class ReplayCommand(RecordCommand): |  | ||||||
|  |  | ||||||
|     name = 'replay' |  | ||||||
|     description = '''Replay a revent recording |  | ||||||
|  |  | ||||||
|     Revent allows you to record raw inputs such as screen swipes or button presses. |  | ||||||
|     See ``wa show record`` to see how to make an revent recording. |  | ||||||
|     ''' |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         self.context = context |  | ||||||
|         self.parser.add_argument('revent', help='The name of the file to replay') |  | ||||||
|         self.parser.add_argument('-p', '--package', help='Package to launch before recording') |  | ||||||
|         self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it', |  | ||||||
|                                  action="store_true") |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     # pylint: disable=W0201 |  | ||||||
|     def run(self, args): |  | ||||||
|         self.logger.info("Pushing file to device") |  | ||||||
|         self.device.push(args.revent, self.device.working_directory) |  | ||||||
|         revent_file = self.device.path.join(self.device.working_directory, os.path.split(args.revent)[1]) |  | ||||||
|  |  | ||||||
|         if args.clear: |  | ||||||
|             self.device.execute("pm clear {}".format(args.package)) |  | ||||||
|  |  | ||||||
|         if args.package: |  | ||||||
|             self.logger.info("Starting {}".format(args.package)) |  | ||||||
|             self.device.execute('monkey -p {} -c android.intent.category.LAUNCHER 1'.format(args.package)) |  | ||||||
|  |  | ||||||
|         command = "{} replay {}".format(self.target_binary, revent_file) |  | ||||||
|         self.device.execute(command) |  | ||||||
|         self.logger.info("Finished replay") |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Used to satisfy the API |  | ||||||
| class LightContext(object): |  | ||||||
|     def __init__(self, config, device_manager): |  | ||||||
|         self.resolver = ResourceResolver(config) |  | ||||||
|         self.resolver.load() |  | ||||||
|         self.device_manager = device_manager |  | ||||||
| @@ -1,123 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import shutil |  | ||||||
|  |  | ||||||
| import wlauto |  | ||||||
| from wlauto import Command, settings |  | ||||||
| from wlauto.core import pluginloader |  | ||||||
| from wlauto.core.configuration import RunConfiguration |  | ||||||
| from wlauto.core.configuration.parsers import AgendaParser, ConfigParser |  | ||||||
| from wlauto.core.execution import Executor |  | ||||||
| from wlauto.core.output import init_wa_output |  | ||||||
| from wlauto.core.version import get_wa_version |  | ||||||
| from wlauto.exceptions import NotFoundError, ConfigError |  | ||||||
| from wlauto.utils.log import add_log_file |  | ||||||
| from wlauto.utils.types import toggle_set |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class RunCommand(Command): |  | ||||||
|  |  | ||||||
|     name = 'run' |  | ||||||
|     description = 'Execute automated workloads on a remote device and process the resulting output.' |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         self.parser.add_argument('agenda', metavar='AGENDA', |  | ||||||
|                                  help=""" |  | ||||||
|                                  Agenda for this workload automation run. This defines which |  | ||||||
|                                  workloads will be executed, how many times, with which |  | ||||||
|                                  tunables, etc.  See example agendas in {} for an example of |  | ||||||
|                                  how this file should be structured. |  | ||||||
|                                  """.format(os.path.dirname(wlauto.__file__))) |  | ||||||
|         self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None, |  | ||||||
|                                  help=""" |  | ||||||
|                                  Specify a directory where the output will be generated. If |  | ||||||
|                                  the directory already exists, the script will abort unless -f |  | ||||||
|                                  option (see below) is used, in which case the contents of the |  | ||||||
|                                  directory will be overwritten. If this option is not specified, |  | ||||||
|                                  then {} will be used instead. |  | ||||||
|                                  """.format(settings.default_output_directory)) |  | ||||||
|         self.parser.add_argument('-f', '--force', action='store_true', |  | ||||||
|                                  help=""" |  | ||||||
|                                  Overwrite output directory if it exists. By default, the script |  | ||||||
|                                  will abort in this situation to prevent accidental data loss. |  | ||||||
|                                  """) |  | ||||||
|         self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID', |  | ||||||
|                                  help=""" |  | ||||||
|                                  Specify a workload spec ID from an agenda to run. If this is |  | ||||||
|                                  specified, only that particular spec will be run, and other |  | ||||||
|                                  workloads in the agenda will be ignored. This option may be |  | ||||||
|                                  used to specify multiple IDs. |  | ||||||
|                                  """) |  | ||||||
|         self.parser.add_argument('--disable', action='append', dest='instruments_to_disable', |  | ||||||
|                                  default=[], |  | ||||||
|                                  metavar='INSTRUMENT', help=""" |  | ||||||
|                                  Specify an instrument to disable from the command line. This |  | ||||||
|                                  equivalent to adding "~{metavar}" to the instrumentation list in |  | ||||||
|                                  the agenda. This can be used to temporarily disable a troublesome |  | ||||||
|                                  instrument for a particular run without introducing permanent |  | ||||||
|                                  change to the config (which one might then forget to revert). |  | ||||||
|                                  This option may be specified multiple times. |  | ||||||
|                                  """) |  | ||||||
|  |  | ||||||
|     def execute(self, config, args): |  | ||||||
|         output = self.set_up_output_directory(config, args) |  | ||||||
|         add_log_file(output.logfile) |  | ||||||
|  |  | ||||||
|         self.logger.debug('Version: {}'.format(get_wa_version())) |  | ||||||
|         self.logger.debug('Command Line: {}'.format(' '.join(sys.argv))) |  | ||||||
|  |  | ||||||
|         disabled_instruments = toggle_set(["~{}".format(i)  |  | ||||||
|                                            for i in args.instruments_to_disable]) |  | ||||||
|         config.jobs_config.disable_instruments(disabled_instruments) |  | ||||||
|         config.jobs_config.only_run_ids(args.only_run_ids) |  | ||||||
|  |  | ||||||
|         parser = AgendaParser() |  | ||||||
|         if os.path.isfile(args.agenda): |  | ||||||
|             parser.load_from_path(config, args.agenda) |  | ||||||
|             shutil.copy(args.agenda, output.raw_config_dir) |  | ||||||
|         else: |  | ||||||
|             try: |  | ||||||
|                 pluginloader.get_plugin_class(args.agenda, kind='workload') |  | ||||||
|                 agenda = {'workloads': [{'name': args.agenda}]} |  | ||||||
|                 parser.load(config, agenda, 'CMDLINE_ARGS') |  | ||||||
|             except NotFoundError: |  | ||||||
|                 msg = 'Agenda file "{}" does not exist, and there no workload '\ |  | ||||||
|                       'with that name.\nYou can get a list of available '\ |  | ||||||
|                       'by running "wa list workloads".' |  | ||||||
|                 raise ConfigError(msg.format(args.agenda)) |  | ||||||
|  |  | ||||||
|         executor = Executor() |  | ||||||
|         executor.execute(config, output) |  | ||||||
|  |  | ||||||
|     def set_up_output_directory(self, config, args): |  | ||||||
|         if args.output_directory: |  | ||||||
|             output_directory = args.output_directory |  | ||||||
|         else: |  | ||||||
|             output_directory = settings.default_output_directory |  | ||||||
|         self.logger.debug('Using output directory: {}'.format(output_directory)) |  | ||||||
|         try: |  | ||||||
|             return init_wa_output(output_directory, config, args.force) |  | ||||||
|         except RuntimeError as e: |  | ||||||
|             if  'path exists' in str(e): |  | ||||||
|                 msg = 'Output directory "{}" exists.\nPlease specify another '\ |  | ||||||
|                       'location, or use -f option to overwrite.' |  | ||||||
|                 self.logger.critical(msg.format(output_directory)) |  | ||||||
|                 sys.exit(1) |  | ||||||
|             else: |  | ||||||
|                 raise e |  | ||||||
| @@ -1,114 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import sys |  | ||||||
| import subprocess |  | ||||||
| from cStringIO import StringIO |  | ||||||
|  |  | ||||||
| from wlauto import Command |  | ||||||
| from wlauto.core.configuration import settings |  | ||||||
| from wlauto.core import pluginloader |  | ||||||
| from wlauto.utils.doc import (get_summary, get_description, get_type_name, format_column, format_body, |  | ||||||
|                               format_paragraph, indent, strip_inlined_text) |  | ||||||
| from wlauto.utils.misc import get_pager |  | ||||||
| from wlauto.utils.terminalsize import get_terminal_size |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ShowCommand(Command): |  | ||||||
|  |  | ||||||
|     name = 'show' |  | ||||||
|  |  | ||||||
|     description = """ |  | ||||||
|     Display documentation for the specified plugin (workload, instrument, etc.). |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         self.parser.add_argument('name', metavar='EXTENSION', |  | ||||||
|                                  help='''The name of the plugin for which information will |  | ||||||
|                                          be shown.''') |  | ||||||
|  |  | ||||||
|     def execute(self, state, args): |  | ||||||
|         # pylint: disable=unpacking-non-sequence |  | ||||||
|         plugin = pluginloader.get_plugin_class(args.name) |  | ||||||
|         out = StringIO() |  | ||||||
|         term_width, term_height = get_terminal_size() |  | ||||||
|         format_plugin(plugin, out, term_width) |  | ||||||
|         text = out.getvalue() |  | ||||||
|         pager = get_pager() |  | ||||||
|         if len(text.split('\n')) > term_height and pager: |  | ||||||
|             try: |  | ||||||
|                 sp = subprocess.Popen(pager, stdin=subprocess.PIPE) |  | ||||||
|                 sp.communicate(text) |  | ||||||
|             except OSError: |  | ||||||
|                 self.logger.warning('Could not use PAGER "{}"'.format(pager)) |  | ||||||
|                 sys.stdout.write(text) |  | ||||||
|         else: |  | ||||||
|             sys.stdout.write(text) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def format_plugin(plugin, out, width): |  | ||||||
|     format_plugin_name(plugin, out) |  | ||||||
|     out.write('\n') |  | ||||||
|     format_plugin_summary(plugin, out, width) |  | ||||||
|     out.write('\n') |  | ||||||
|     if hasattr(plugin, 'supported_platforms'): |  | ||||||
|         format_supported_platforms(plugin, out, width) |  | ||||||
|         out.write('\n') |  | ||||||
|     if plugin.parameters: |  | ||||||
|         format_plugin_parameters(plugin, out, width) |  | ||||||
|         out.write('\n') |  | ||||||
|     format_plugin_description(plugin, out, width) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def format_plugin_name(plugin, out): |  | ||||||
|     out.write('\n{}\n'.format(plugin.name)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def format_plugin_summary(plugin, out, width): |  | ||||||
|     out.write('{}\n'.format(format_body(strip_inlined_text(get_summary(plugin)), width))) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def format_supported_platforms(plugin, out, width): |  | ||||||
|     text = 'supported on: {}'.format(', '.join(plugin.supported_platforms)) |  | ||||||
|     out.write('{}\n'.format(format_body(text, width))) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def format_plugin_description(plugin, out, width): |  | ||||||
|     # skip the initial paragraph of multi-paragraph description, as already |  | ||||||
|     # listed above. |  | ||||||
|     description = get_description(plugin).split('\n\n', 1)[-1] |  | ||||||
|     out.write('{}\n'.format(format_body(strip_inlined_text(description), width))) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def format_plugin_parameters(plugin, out, width, shift=4): |  | ||||||
|     out.write('parameters:\n\n') |  | ||||||
|     param_texts = [] |  | ||||||
|     for param in plugin.parameters: |  | ||||||
|         description = format_paragraph(strip_inlined_text(param.description or ''), width - shift) |  | ||||||
|         param_text = '{}'.format(param.name) |  | ||||||
|         if param.mandatory: |  | ||||||
|             param_text += " (MANDATORY)" |  | ||||||
|         param_text += '\n{}\n'.format(description) |  | ||||||
|         param_text += indent('type: {}\n'.format(get_type_name(param.kind))) |  | ||||||
|         if param.allowed_values: |  | ||||||
|             param_text += indent('allowed values: {}\n'.format(', '.join(map(str, param.allowed_values)))) |  | ||||||
|         elif param.constraint: |  | ||||||
|             param_text += indent('constraint: {}\n'.format(get_type_name(param.constraint))) |  | ||||||
|         if param.default is not None: |  | ||||||
|             param_text += indent('default: {}\n'.format(param.default)) |  | ||||||
|         param_texts.append(indent(param_text, shift)) |  | ||||||
|  |  | ||||||
|     out.write(format_column('\n'.join(param_texts), width)) |  | ||||||
| @@ -1,25 +0,0 @@ | |||||||
| package ${package_name}; |  | ||||||
|  |  | ||||||
| import android.app.Activity; |  | ||||||
| import android.os.Bundle; |  | ||||||
| import android.util.Log; |  | ||||||
| import android.view.KeyEvent; |  | ||||||
|  |  | ||||||
| // Import the uiautomator libraries |  | ||||||
| import com.android.uiautomator.core.UiObject; |  | ||||||
| import com.android.uiautomator.core.UiObjectNotFoundException; |  | ||||||
| import com.android.uiautomator.core.UiScrollable; |  | ||||||
| import com.android.uiautomator.core.UiSelector; |  | ||||||
| import com.android.uiautomator.testrunner.UiAutomatorTestCase; |  | ||||||
|  |  | ||||||
| import com.arm.wlauto.uiauto.BaseUiAutomation; |  | ||||||
|  |  | ||||||
| public class UiAutomation extends BaseUiAutomation {    |  | ||||||
|  |  | ||||||
|     public static String TAG = "${name}"; |  | ||||||
|  |  | ||||||
|     public void runUiAutomation() throws Exception { |  | ||||||
| 	// UI Automation code goes here |  | ||||||
|     } |  | ||||||
|  |  | ||||||
| } |  | ||||||
| @@ -1,27 +0,0 @@ | |||||||
| from wlauto import AndroidBenchmark, Parameter |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ${class_name}(AndroidBenchmark): |  | ||||||
|  |  | ||||||
|     name = '${name}' |  | ||||||
|     # NOTE: Please do not leave these comments in the code. |  | ||||||
|     # |  | ||||||
|     # Replace with the package for the app in the APK file. |  | ||||||
|     package = 'com.foo.bar' |  | ||||||
|     # Replace with the full path to the activity to run. |  | ||||||
|     activity = '.RunBuzz' |  | ||||||
|     description = "This is an placeholder description" |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         # Workload parameters go here e.g. |  | ||||||
|         Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, |  | ||||||
|                   description='This is an example parameter') |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def run(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def update_result(self, context): |  | ||||||
|         super(${class_name}, self).update_result(context) |  | ||||||
|         # process results and add them using |  | ||||||
|         # context.result.add_metric |  | ||||||
| @@ -1,24 +0,0 @@ | |||||||
| from wlauto import AndroidUiAutoBenchmark, Parameter |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ${class_name}(AndroidUiAutoBenchmark): |  | ||||||
|  |  | ||||||
|     name = '${name}' |  | ||||||
|     # NOTE: Please do not leave these comments in the code. |  | ||||||
|     # |  | ||||||
|     # Replace with the package for the app in the APK file. |  | ||||||
|     package = 'com.foo.bar' |  | ||||||
|     # Replace with the full path to the activity to run. |  | ||||||
|     activity = '.RunBuzz' |  | ||||||
|     description = "This is an placeholder description" |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         # Workload parameters go here e.g. |  | ||||||
|         Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, |  | ||||||
|                   description='This is an example parameter') |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def update_result(self, context): |  | ||||||
|         super(${class_name}, self).update_result(context) |  | ||||||
|         # process results and add them using |  | ||||||
|         # context.result.add_metric |  | ||||||
| @@ -1,28 +0,0 @@ | |||||||
| from wlauto import Workload, Parameter |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ${class_name}(Workload): |  | ||||||
|  |  | ||||||
|     name = '${name}' |  | ||||||
|     description = "This is an placeholder description" |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         # Workload parameters go here e.g. |  | ||||||
|         Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, |  | ||||||
|                   description='This is an example parameter') |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def run(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def update_result(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def teardown(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         pass |  | ||||||
| @@ -1,102 +0,0 @@ | |||||||
| import os |  | ||||||
| import sys |  | ||||||
| import warnings |  | ||||||
| from multiprocessing import Process |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     from setuptools.command.install import install as orig_install |  | ||||||
|     from setuptools import setup |  | ||||||
| except ImportError: |  | ||||||
|     from distutils.command.install import install as orig_install |  | ||||||
|     from distutils.core import setup |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     import pwd |  | ||||||
| except ImportError: |  | ||||||
|     pwd = None |  | ||||||
|  |  | ||||||
| warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'") |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     os.remove('MANIFEST') |  | ||||||
| except OSError: |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| packages = [] |  | ||||||
| data_files = {} |  | ||||||
| source_dir = os.path.dirname(__file__) |  | ||||||
| for root, dirs, files in os.walk('$package_name'): |  | ||||||
|     rel_dir = os.path.relpath(root, source_dir) |  | ||||||
|     data = [] |  | ||||||
|     if '__init__.py' in files: |  | ||||||
|         for f in files: |  | ||||||
|             if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']: |  | ||||||
|                 data.append(f) |  | ||||||
|         package_name = rel_dir.replace(os.sep, '.') |  | ||||||
|         package_dir = root |  | ||||||
|         packages.append(package_name) |  | ||||||
|         data_files[package_name] = data |  | ||||||
|     else: |  | ||||||
|         # use previous package name |  | ||||||
|         filepaths = [os.path.join(root, f) for f in files] |  | ||||||
|         data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths]) |  | ||||||
|  |  | ||||||
| params = dict( |  | ||||||
|     name='$package_name', |  | ||||||
|     version='0.0.1', |  | ||||||
|     packages=packages, |  | ||||||
|     package_data=data_files, |  | ||||||
|     url='N/A', |  | ||||||
|     maintainer='$user', |  | ||||||
|     maintainer_email='$user@example.com', |  | ||||||
|     install_requires=[ |  | ||||||
|         'wlauto', |  | ||||||
|     ], |  | ||||||
|     # https://pypi.python.org/pypi?%3Aaction=list_classifiers |  | ||||||
|     classifiers=[ |  | ||||||
|         'Development Status :: 3 - Alpha', |  | ||||||
|         'Environment :: Console', |  | ||||||
|         'License :: Other/Proprietary License', |  | ||||||
|         'Operating System :: Unix', |  | ||||||
|         'Programming Language :: Python :: 2.7', |  | ||||||
|     ], |  | ||||||
| ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def update_wa_packages(): |  | ||||||
|     sudo_user = os.getenv('SUDO_USER') |  | ||||||
|     if sudo_user: |  | ||||||
|         user_entry = pwd.getpwnam(sudo_user) |  | ||||||
|         os.setgid(user_entry.pw_gid) |  | ||||||
|         os.setuid(user_entry.pw_uid) |  | ||||||
|     env_root = os.getenv('WA_USER_DIRECTORY', os.path.join(os.path.expanduser('~'), '.workload_automation')) |  | ||||||
|     if not os.path.isdir(env_root): |  | ||||||
|         os.makedirs(env_root) |  | ||||||
|     wa_packages_file = os.path.join(env_root, 'packages') |  | ||||||
|     if os.path.isfile(wa_packages_file): |  | ||||||
|         with open(wa_packages_file, 'r') as wfh: |  | ||||||
|             package_list = wfh.read().split() |  | ||||||
|             if params['name'] not in package_list: |  | ||||||
|                 package_list.append(params['name']) |  | ||||||
|     else:  # no existing package file |  | ||||||
|         package_list = [params['name']] |  | ||||||
|     with open(wa_packages_file, 'w') as wfh: |  | ||||||
|         wfh.write('\n'.join(package_list)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class install(orig_install): |  | ||||||
|  |  | ||||||
|     def run(self): |  | ||||||
|         orig_install.run(self) |  | ||||||
|         # Must be done in a separate process because will drop privileges if |  | ||||||
|         # sudo, and won't be able to reacquire them. |  | ||||||
|         p = Process(target=update_wa_packages) |  | ||||||
|         p.start() |  | ||||||
|         p.join() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| params['cmdclass'] = {'install': install} |  | ||||||
|  |  | ||||||
|  |  | ||||||
| setup(**params) |  | ||||||
| @@ -1,35 +0,0 @@ | |||||||
| from wlauto import UiAutomatorWorkload, Parameter |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class  ${class_name}(UiAutomatorWorkload): |  | ||||||
|  |  | ||||||
|     name = '${name}' |  | ||||||
|     description = "This is an placeholder description" |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         # Workload parameters go here e.g. |  | ||||||
|         Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, |  | ||||||
|                   description='This is an example parameter') |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         super(${class_name}, self).setup(context) |  | ||||||
|         # Perform any necessary setup before starting the UI automation |  | ||||||
|         # e.g. copy files to the device, start apps, reset logs, etc. |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     def update_result(self, context): |  | ||||||
|         pass |  | ||||||
|         # Process workload execution artifacts to extract metrics |  | ||||||
|         # and add them to the run result using |  | ||||||
|         # context.result.add_metric() |  | ||||||
|  |  | ||||||
|     def teardown(self, context): |  | ||||||
|         super(${class_name}, self).teardown(context) |  | ||||||
|         # Preform any necessary cleanup |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         pass |  | ||||||
|         # Validate inter-parameter assumptions etc |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -1,16 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
										
											Binary file not shown.
										
									
								
							| @@ -1,16 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -1,40 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| from wlauto.common.resources import FileResource |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ReventFile(FileResource): |  | ||||||
|  |  | ||||||
|     name = 'revent' |  | ||||||
|  |  | ||||||
|     def __init__(self, owner, stage): |  | ||||||
|         super(ReventFile, self).__init__(owner) |  | ||||||
|         self.stage = stage |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class JarFile(FileResource): |  | ||||||
|  |  | ||||||
|     name = 'jar' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ApkFile(FileResource): |  | ||||||
|  |  | ||||||
|     name = 'apk' |  | ||||||
|  |  | ||||||
|     def __init__(self, owner, version): |  | ||||||
|         super(ApkFile, self).__init__(owner) |  | ||||||
|         self.version = version |  | ||||||
| @@ -1,506 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import time |  | ||||||
|  |  | ||||||
| from wlauto.core.plugin import Parameter |  | ||||||
| from wlauto.core.workload import Workload |  | ||||||
| from wlauto.core.resource import NO_ONE |  | ||||||
| from wlauto.common.resources import PluginAsset, Executable |  | ||||||
| from wlauto.exceptions import WorkloadError, ResourceError, ConfigError |  | ||||||
| from wlauto.utils.android import ApkInfo, ANDROID_NORMAL_PERMISSIONS |  | ||||||
| from wlauto.utils.types import boolean |  | ||||||
| import wlauto.common.android.resources |  | ||||||
|  |  | ||||||
|  |  | ||||||
| DELAY = 5 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class UiAutomatorWorkload(Workload): |  | ||||||
|     """ |  | ||||||
|     Base class for all workloads that rely on a UI Automator JAR file. |  | ||||||
|  |  | ||||||
|     This class should be subclassed by workloads that rely on android UiAutomator |  | ||||||
|     to work. This class handles transferring the UI Automator JAR file to the device |  | ||||||
|     and invoking it to run the workload. By default, it will look for the JAR file in |  | ||||||
|     the same directory as the .py file for the workload (this can be changed by overriding |  | ||||||
|     the ``uiauto_file`` property in the subclassing workload). |  | ||||||
|  |  | ||||||
|     To inintiate UI Automation, the fully-qualified name of the Java class and the |  | ||||||
|     corresponding method name are needed. By default, the package part of the class name |  | ||||||
|     is derived from the class file, and class and method names are ``UiAutomation`` |  | ||||||
|     and ``runUiAutomaton`` respectively. If you have generated the boilder plate for the |  | ||||||
|     UiAutomatior code using ``create_workloads`` utility, then everything should be named |  | ||||||
|     correctly. If you're creating the Java project manually, you need to make sure the names |  | ||||||
|     match what is expected, or you could override ``uiauto_package``, ``uiauto_class`` and |  | ||||||
|     ``uiauto_method`` class attributes with the value that match your Java code. |  | ||||||
|  |  | ||||||
|     You can also pass parameters to the JAR file. To do this add the parameters to |  | ||||||
|     ``self.uiauto_params`` dict inside your class's ``__init__`` or ``setup`` methods. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     supported_platforms = ['android'] |  | ||||||
|  |  | ||||||
|     uiauto_package = '' |  | ||||||
|     uiauto_class = 'UiAutomation' |  | ||||||
|     uiauto_method = 'runUiAutomation' |  | ||||||
|  |  | ||||||
|     # Can be overidden by subclasses to adjust to run time of specific |  | ||||||
|     # benchmarks. |  | ||||||
|     run_timeout = 4 * 60  # seconds |  | ||||||
|  |  | ||||||
|     def __init__(self, device, _call_super=True, **kwargs):  # pylint: disable=W0613 |  | ||||||
|         if _call_super: |  | ||||||
|             super(UiAutomatorWorkload, self).__init__(device, **kwargs) |  | ||||||
|         self.uiauto_file = None |  | ||||||
|         self.device_uiauto_file = None |  | ||||||
|         self.command = None |  | ||||||
|         self.uiauto_params = {} |  | ||||||
|  |  | ||||||
|     def init_resources(self, context): |  | ||||||
|         self.uiauto_file = context.resolver.get(wlauto.common.android.resources.JarFile(self)) |  | ||||||
|         if not self.uiauto_file: |  | ||||||
|             raise ResourceError('No UI automation JAR file found for workload {}.'.format(self.name)) |  | ||||||
|         self.device_uiauto_file = self.device.path.join(self.device.working_directory, |  | ||||||
|                                                         os.path.basename(self.uiauto_file)) |  | ||||||
|         if not self.uiauto_package: |  | ||||||
|             self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0] |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         method_string = '{}.{}#{}'.format(self.uiauto_package, self.uiauto_class, self.uiauto_method) |  | ||||||
|         params_dict = self.uiauto_params |  | ||||||
|         params_dict['workdir'] = self.device.working_directory |  | ||||||
|         params = '' |  | ||||||
|         for k, v in self.uiauto_params.iteritems(): |  | ||||||
|             params += ' -e {} {}'.format(k, v) |  | ||||||
|         self.command = 'uiautomator runtest {}{} -c {}'.format(self.device_uiauto_file, params, method_string) |  | ||||||
|         self.device.push(self.uiauto_file, self.device_uiauto_file) |  | ||||||
|         self.device.killall('uiautomator') |  | ||||||
|  |  | ||||||
|     def run(self, context): |  | ||||||
|         result = self.device.execute(self.command, self.run_timeout) |  | ||||||
|         if 'FAILURE' in result: |  | ||||||
|             raise WorkloadError(result) |  | ||||||
|         else: |  | ||||||
|             self.logger.debug(result) |  | ||||||
|         time.sleep(DELAY) |  | ||||||
|  |  | ||||||
|     def update_result(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def teardown(self, context): |  | ||||||
|         self.device.remove(self.device_uiauto_file) |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         if not self.uiauto_file: |  | ||||||
|             raise WorkloadError('No UI automation JAR file found for workload {}.'.format(self.name)) |  | ||||||
|         if not self.uiauto_package: |  | ||||||
|             raise WorkloadError('No UI automation package specified for workload {}.'.format(self.name)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ApkWorkload(Workload): |  | ||||||
|     """ |  | ||||||
|     A workload based on an APK file. |  | ||||||
|  |  | ||||||
|     Defines the following attributes: |  | ||||||
|  |  | ||||||
|     :package: The package name of the app. This is usually a Java-style name of the form |  | ||||||
|               ``com.companyname.appname``. |  | ||||||
|     :activity: This is the initial activity of the app. This will be used to launch the |  | ||||||
|                app during the setup. |  | ||||||
|     :view: The class of the main view pane of the app. This needs to be defined in order |  | ||||||
|            to collect SurfaceFlinger-derived statistics (such as FPS) for the app, but |  | ||||||
|            may otherwise be left as ``None``. |  | ||||||
|     :install_timeout: Timeout for the installation of the APK. This may vary wildly based on |  | ||||||
|                       the size and nature of a specific APK, and so should be defined on |  | ||||||
|                       per-workload basis. |  | ||||||
|  |  | ||||||
|                       .. note:: To a lesser extent, this will also vary based on the the |  | ||||||
|                                 device and the nature of adb connection (USB vs Ethernet), |  | ||||||
|                                 so, as with all timeouts, so leeway must be included in |  | ||||||
|                                 the specified value. |  | ||||||
|  |  | ||||||
|     .. note:: Both package and activity for a workload may be obtained from the APK using |  | ||||||
|               the ``aapt`` tool that comes with the ADT  (Android Developemnt Tools) bundle. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     package = None |  | ||||||
|     activity = None |  | ||||||
|     view = None |  | ||||||
|     supported_platforms = ['android'] |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         Parameter('install_timeout', kind=int, default=300, |  | ||||||
|                   description='Timeout for the installation of the apk.'), |  | ||||||
|         Parameter('check_apk', kind=boolean, default=True, |  | ||||||
|                   description=''' |  | ||||||
|                   Discover the APK for this workload on the host, and check that |  | ||||||
|                   the version matches the one on device (if already installed). |  | ||||||
|                   '''), |  | ||||||
|         Parameter('force_install', kind=boolean, default=False, |  | ||||||
|                   description=''' |  | ||||||
|                   Always re-install the APK, even if matching version is found |  | ||||||
|                   on already installed on the device. |  | ||||||
|                   '''), |  | ||||||
|         Parameter('uninstall_apk', kind=boolean, default=False, |  | ||||||
|                   description='If ``True``, will uninstall workload\'s APK as part of teardown.'), |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def __init__(self, device, _call_super=True, **kwargs): |  | ||||||
|         if _call_super: |  | ||||||
|             super(ApkWorkload, self).__init__(device, **kwargs) |  | ||||||
|         self.apk_file = None |  | ||||||
|         self.apk_version = None |  | ||||||
|         self.logcat_log = None |  | ||||||
|  |  | ||||||
|     def init_resources(self, context): |  | ||||||
|         self.apk_file = context.resolver.get(wlauto.common.android.resources.ApkFile(self), |  | ||||||
|                                              version=getattr(self, 'version', None), |  | ||||||
|                                              strict=self.check_apk) |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         if self.check_apk: |  | ||||||
|             if not self.apk_file: |  | ||||||
|                 raise WorkloadError('No APK file found for workload {}.'.format(self.name)) |  | ||||||
|         else: |  | ||||||
|             if self.force_install: |  | ||||||
|                 raise ConfigError('force_install cannot be "True" when check_apk is set to "False".') |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         self.initialize_package(context) |  | ||||||
|         self.start_activity() |  | ||||||
|         self.device.execute('am kill-all')  # kill all *background* activities |  | ||||||
|         self.device.clear_logcat() |  | ||||||
|  |  | ||||||
|     def initialize_package(self, context): |  | ||||||
|         installed_version = self.device.get_package_version(self.package) |  | ||||||
|         if self.check_apk: |  | ||||||
|             self.initialize_with_host_apk(context, installed_version) |  | ||||||
|         else: |  | ||||||
|             if not installed_version: |  | ||||||
|                 message = '''{} not found found on the device and check_apk is set to "False" |  | ||||||
|                              so host version was not checked.''' |  | ||||||
|                 raise WorkloadError(message.format(self.package)) |  | ||||||
|             message = 'Version {} installed on device; skipping host APK check.' |  | ||||||
|             self.logger.debug(message.format(installed_version)) |  | ||||||
|             self.reset(context) |  | ||||||
|             self.apk_version = installed_version |  | ||||||
|  |  | ||||||
|     def initialize_with_host_apk(self, context, installed_version): |  | ||||||
|         host_version = ApkInfo(self.apk_file).version_name |  | ||||||
|         if installed_version != host_version: |  | ||||||
|             if installed_version: |  | ||||||
|                 message = '{} host version: {}, device version: {}; re-installing...' |  | ||||||
|                 self.logger.debug(message.format(os.path.basename(self.apk_file), |  | ||||||
|                                                  host_version, installed_version)) |  | ||||||
|             else: |  | ||||||
|                 message = '{} host version: {}, not found on device; installing...' |  | ||||||
|                 self.logger.debug(message.format(os.path.basename(self.apk_file), |  | ||||||
|                                                  host_version)) |  | ||||||
|             self.force_install = True  # pylint: disable=attribute-defined-outside-init |  | ||||||
|         else: |  | ||||||
|             message = '{} version {} found on both device and host.' |  | ||||||
|             self.logger.debug(message.format(os.path.basename(self.apk_file), |  | ||||||
|                                              host_version)) |  | ||||||
|         if self.force_install: |  | ||||||
|             if installed_version: |  | ||||||
|                 self.device.uninstall(self.package) |  | ||||||
|             self.install_apk(context) |  | ||||||
|         else: |  | ||||||
|             self.reset(context) |  | ||||||
|         self.apk_version = host_version |  | ||||||
|  |  | ||||||
|     def start_activity(self): |  | ||||||
|         output = self.device.execute('am start -W -n {}/{}'.format(self.package, self.activity)) |  | ||||||
|         if 'Error:' in output: |  | ||||||
|             self.device.execute('am force-stop {}'.format(self.package))  # this will dismiss any erro dialogs |  | ||||||
|             raise WorkloadError(output) |  | ||||||
|         self.logger.debug(output) |  | ||||||
|  |  | ||||||
|     def reset(self, context):  # pylint: disable=W0613 |  | ||||||
|         self.device.execute('am force-stop {}'.format(self.package)) |  | ||||||
|         self.device.execute('pm clear {}'.format(self.package)) |  | ||||||
|  |  | ||||||
|         # As of android API level 23, apps can request permissions at runtime, |  | ||||||
|         # this will grant all of them so requests do not pop up when running the app |  | ||||||
|         if self.device.os_version['sdk'] >= 23: |  | ||||||
|             self._grant_requested_permissions() |  | ||||||
|  |  | ||||||
|     def install_apk(self, context): |  | ||||||
|         output = self.device.install(self.apk_file, self.install_timeout) |  | ||||||
|         if 'Failure' in output: |  | ||||||
|             if 'ALREADY_EXISTS' in output: |  | ||||||
|                 self.logger.warn('Using already installed APK (did not unistall properly?)') |  | ||||||
|             else: |  | ||||||
|                 raise WorkloadError(output) |  | ||||||
|         else: |  | ||||||
|             self.logger.debug(output) |  | ||||||
|         self.do_post_install(context) |  | ||||||
|  |  | ||||||
|     def _grant_requested_permissions(self): |  | ||||||
|         dumpsys_output = self.device.execute(command="dumpsys package {}".format(self.package)) |  | ||||||
|         permissions = [] |  | ||||||
|         lines = iter(dumpsys_output.splitlines()) |  | ||||||
|         for line in lines: |  | ||||||
|             if "requested permissions:" in line: |  | ||||||
|                 break |  | ||||||
|  |  | ||||||
|         for line in lines: |  | ||||||
|             if "android.permission." in line: |  | ||||||
|                 permissions.append(line.split(":")[0].strip()) |  | ||||||
|             else: |  | ||||||
|                 break |  | ||||||
|  |  | ||||||
|         for permission in permissions: |  | ||||||
|             # "Normal" Permisions are automatically granted and cannot be changed |  | ||||||
|             permission_name = permission.rsplit('.', 1)[1] |  | ||||||
|             if permission_name not in ANDROID_NORMAL_PERMISSIONS: |  | ||||||
|                 self.device.execute("pm grant {} {}".format(self.package, permission)) |  | ||||||
|  |  | ||||||
|     def do_post_install(self, context): |  | ||||||
|         """ May be overwritten by dervied classes.""" |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def run(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def update_result(self, context): |  | ||||||
|         self.logcat_log = os.path.join(context.output_directory, 'logcat.log') |  | ||||||
|         context.device_manager.dump_logcat(self.logcat_log) |  | ||||||
|         context.add_iteration_artifact(name='logcat', |  | ||||||
|                                        path='logcat.log', |  | ||||||
|                                        kind='log', |  | ||||||
|                                        description='Logact dump for the run.') |  | ||||||
|  |  | ||||||
|     def teardown(self, context): |  | ||||||
|         self.device.execute('am force-stop {}'.format(self.package)) |  | ||||||
|         if self.uninstall_apk: |  | ||||||
|             self.device.uninstall(self.package) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| AndroidBenchmark = ApkWorkload  # backward compatibility |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ReventWorkload(Workload): |  | ||||||
|  |  | ||||||
|     default_setup_timeout = 5 * 60  # in seconds |  | ||||||
|     default_run_timeout = 10 * 60  # in seconds |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def on_device_setup_revent(self): |  | ||||||
|         return self.device.get_workpath('{}.setup.revent'.format(self.device.model)) |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def on_device_run_revent(self): |  | ||||||
|         return self.device.get_workpath('{}.run.revent'.format(self.device.model)) |  | ||||||
|  |  | ||||||
|     def __init__(self, device, _call_super=True, **kwargs): |  | ||||||
|         if _call_super: |  | ||||||
|             super(ReventWorkload, self).__init__(device, **kwargs) |  | ||||||
|         self.on_device_revent_binary = None |  | ||||||
|         self.setup_timeout = kwargs.get('setup_timeout', self.default_setup_timeout) |  | ||||||
|         self.run_timeout = kwargs.get('run_timeout', self.default_run_timeout) |  | ||||||
|         self.revent_setup_file = None |  | ||||||
|         self.revent_run_file = None |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         self.revent_setup_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'setup')) |  | ||||||
|         self.revent_run_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'run')) |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         self._check_revent_files(context) |  | ||||||
|         self.device.killall('revent') |  | ||||||
|         command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_setup_revent) |  | ||||||
|         self.device.execute(command, timeout=self.setup_timeout) |  | ||||||
|  |  | ||||||
|     def run(self, context): |  | ||||||
|         command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_run_revent) |  | ||||||
|         self.logger.debug('Replaying {}'.format(os.path.basename(self.on_device_run_revent))) |  | ||||||
|         self.device.execute(command, timeout=self.run_timeout) |  | ||||||
|         self.logger.debug('Replay completed.') |  | ||||||
|  |  | ||||||
|     def update_result(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def teardown(self, context): |  | ||||||
|         self.device.remove(self.on_device_setup_revent) |  | ||||||
|         self.device.remove(self.on_device_run_revent) |  | ||||||
|  |  | ||||||
|     def _check_revent_files(self, context): |  | ||||||
|         # check the revent binary |  | ||||||
|         revent_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent')) |  | ||||||
|         if not os.path.isfile(revent_binary): |  | ||||||
|             message = '{} does not exist. '.format(revent_binary) |  | ||||||
|             message += 'Please build revent for your system and place it in that location' |  | ||||||
|             raise WorkloadError(message) |  | ||||||
|         if not self.revent_setup_file: |  | ||||||
|             # pylint: disable=too-few-format-args |  | ||||||
|             message = '{0}.setup.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name) |  | ||||||
|             raise WorkloadError(message) |  | ||||||
|         if not self.revent_run_file: |  | ||||||
|             # pylint: disable=too-few-format-args |  | ||||||
|             message = '{0}.run.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name) |  | ||||||
|             raise WorkloadError(message) |  | ||||||
|  |  | ||||||
|         self.on_device_revent_binary = self.device.install_executable(revent_binary) |  | ||||||
|         self.device.push(self.revent_run_file, self.on_device_run_revent) |  | ||||||
|         self.device.push(self.revent_setup_file, self.on_device_setup_revent) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark): |  | ||||||
|  |  | ||||||
|     supported_platforms = ['android'] |  | ||||||
|  |  | ||||||
|     def __init__(self, device, **kwargs): |  | ||||||
|         UiAutomatorWorkload.__init__(self, device, **kwargs) |  | ||||||
|         AndroidBenchmark.__init__(self, device, _call_super=False, **kwargs) |  | ||||||
|  |  | ||||||
|     def init_resources(self, context): |  | ||||||
|         UiAutomatorWorkload.init_resources(self, context) |  | ||||||
|         AndroidBenchmark.init_resources(self, context) |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         UiAutomatorWorkload.setup(self, context) |  | ||||||
|         AndroidBenchmark.setup(self, context) |  | ||||||
|  |  | ||||||
|     def update_result(self, context): |  | ||||||
|         UiAutomatorWorkload.update_result(self, context) |  | ||||||
|         AndroidBenchmark.update_result(self, context) |  | ||||||
|  |  | ||||||
|     def teardown(self, context): |  | ||||||
|         UiAutomatorWorkload.teardown(self, context) |  | ||||||
|         AndroidBenchmark.teardown(self, context) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class GameWorkload(ApkWorkload, ReventWorkload): |  | ||||||
|     """ |  | ||||||
|     GameWorkload is the base class for all the workload that use revent files to |  | ||||||
|     run. |  | ||||||
|  |  | ||||||
|     For more in depth details on how to record revent files, please see |  | ||||||
|     :ref:`revent_files_creation`. To subclass this class, please refer to |  | ||||||
|     :ref:`GameWorkload`. |  | ||||||
|  |  | ||||||
|     Additionally, this class defines the following attributes: |  | ||||||
|  |  | ||||||
|     :asset_file: A tarball containing additional assets for the workload. These are the assets |  | ||||||
|                  that are not part of the APK but would need to be downloaded by the workload |  | ||||||
|                  (usually, on first run of the app). Since the presence of a network connection |  | ||||||
|                  cannot be assumed on some devices, this provides an alternative means of obtaining |  | ||||||
|                  the assets. |  | ||||||
|     :saved_state_file: A tarball containing the saved state for a workload. This tarball gets |  | ||||||
|                        deployed in the same way as the asset file. The only difference being that |  | ||||||
|                        it is usually much slower and re-deploying the tarball should alone be |  | ||||||
|                        enough to reset the workload to a known state (without having to reinstall |  | ||||||
|                        the app or re-deploy the other assets). |  | ||||||
|     :loading_time: Time it takes for the workload to load after the initial activity has been |  | ||||||
|                    started. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     # May be optionally overwritten by subclasses |  | ||||||
|     asset_file = None |  | ||||||
|     saved_state_file = None |  | ||||||
|     view = 'SurfaceView' |  | ||||||
|     loading_time = 10 |  | ||||||
|     supported_platforms = ['android'] |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         Parameter('install_timeout', default=500, override=True), |  | ||||||
|         Parameter('assets_push_timeout', kind=int, default=500, |  | ||||||
|                   description='Timeout used during deployment of the assets package (if there is one).'), |  | ||||||
|         Parameter('clear_data_on_reset', kind=bool, default=True, |  | ||||||
|                   description=""" |  | ||||||
|                   If set to ``False``, this will prevent WA from clearing package |  | ||||||
|                   data for this workload prior to running it. |  | ||||||
|                   """), |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def __init__(self, device, **kwargs):  # pylint: disable=W0613 |  | ||||||
|         ApkWorkload.__init__(self, device, **kwargs) |  | ||||||
|         ReventWorkload.__init__(self, device, _call_super=False, **kwargs) |  | ||||||
|         self.logcat_process = None |  | ||||||
|         self.module_dir = os.path.dirname(sys.modules[self.__module__].__file__) |  | ||||||
|         self.revent_dir = os.path.join(self.module_dir, 'revent_files') |  | ||||||
|  |  | ||||||
|     def apk_init_resources(self, context): |  | ||||||
|         ApkWorkload.init_resources(self, context) |  | ||||||
|  |  | ||||||
|     def init_resources(self, context): |  | ||||||
|         self.apk_init_resources(context) |  | ||||||
|         ReventWorkload.init_resources(self, context) |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         ApkWorkload.setup(self, context) |  | ||||||
|         self.logger.debug('Waiting for the game to load...') |  | ||||||
|         time.sleep(self.loading_time) |  | ||||||
|         ReventWorkload.setup(self, context) |  | ||||||
|  |  | ||||||
|     def do_post_install(self, context): |  | ||||||
|         ApkWorkload.do_post_install(self, context) |  | ||||||
|         self._deploy_assets(context, self.assets_push_timeout) |  | ||||||
|  |  | ||||||
|     def reset(self, context): |  | ||||||
|         # If saved state exists, restore it; if not, do full |  | ||||||
|         # uninstall/install cycle. |  | ||||||
|         self.device.execute('am force-stop {}'.format(self.package)) |  | ||||||
|         if self.saved_state_file: |  | ||||||
|             self._deploy_resource_tarball(context, self.saved_state_file) |  | ||||||
|         else: |  | ||||||
|             if self.clear_data_on_reset: |  | ||||||
|                 self.device.execute('pm clear {}'.format(self.package)) |  | ||||||
|             self._deploy_assets(context) |  | ||||||
|  |  | ||||||
|     def run(self, context): |  | ||||||
|         ReventWorkload.run(self, context) |  | ||||||
|  |  | ||||||
|     def apk_teardown(self, context): |  | ||||||
|         if not self.saved_state_file: |  | ||||||
|             ApkWorkload.teardown(self, context) |  | ||||||
|         else: |  | ||||||
|             self.device.execute('am force-stop {}'.format(self.package)) |  | ||||||
|  |  | ||||||
|     def teardown(self, context): |  | ||||||
|         self.apk_teardown(context) |  | ||||||
|         ReventWorkload.teardown(self, context) |  | ||||||
|  |  | ||||||
|     def _deploy_assets(self, context, timeout=300): |  | ||||||
|         if self.asset_file: |  | ||||||
|             self._deploy_resource_tarball(context, self.asset_file, timeout) |  | ||||||
|         if self.saved_state_file:  # must be deployed *after* asset tarball! |  | ||||||
|             self._deploy_resource_tarball(context, self.saved_state_file, timeout) |  | ||||||
|  |  | ||||||
|     def _deploy_resource_tarball(self, context, resource_file, timeout=300): |  | ||||||
|         kind = 'data' |  | ||||||
|         if ':' in resource_file: |  | ||||||
|             kind, resource_file = resource_file.split(':', 1) |  | ||||||
|         ondevice_cache = self.device.path.join(self.device.working_directory, '.cache', self.name, resource_file) |  | ||||||
|         if not self.device.file_exists(ondevice_cache): |  | ||||||
|             asset_tarball = context.resolver.get(PluginAsset(self, resource_file)) |  | ||||||
|             if not asset_tarball: |  | ||||||
|                 message = 'Could not find resource {} for workload {}.' |  | ||||||
|                 raise WorkloadError(message.format(resource_file, self.name)) |  | ||||||
|             # adb push will create intermediate directories if they don't |  | ||||||
|             # exist. |  | ||||||
|             self.device.push(asset_tarball, ondevice_cache, timeout=timeout) |  | ||||||
|  |  | ||||||
|         device_asset_directory = self.device.path.join(context.device_manager.external_storage_directory, 'Android', kind) |  | ||||||
|         deploy_command = 'cd {} && {} tar -xzf {}'.format(device_asset_directory, |  | ||||||
|                                                           self.device.busybox, |  | ||||||
|                                                           ondevice_cache) |  | ||||||
|         self.device.execute(deploy_command, timeout=timeout, as_root=True) |  | ||||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							| @@ -1,6 +0,0 @@ | |||||||
| The gem5 simulator can be obtained from http://repo.gem5.org/gem5/ and the |  | ||||||
| corresponding documentation can be found at http://www.gem5.org. |  | ||||||
|  |  | ||||||
| The source for the m5 binaries bundled with Workload Automation (found at |  | ||||||
| wlauto/common/bin/arm64/m5 and wlauto/common/bin/armeabi/m5) can be found at |  | ||||||
| util/m5 in the gem5 source at http://repo.gem5.org/gem5/. |  | ||||||
| @@ -1,16 +0,0 @@ | |||||||
| #    Copyright 2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -1,64 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import os |  | ||||||
|  |  | ||||||
| from wlauto.core.resource import Resource |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class FileResource(Resource): |  | ||||||
|     """ |  | ||||||
|     Base class for all resources that are a regular file in the |  | ||||||
|     file system. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def delete(self, instance): |  | ||||||
|         os.remove(instance) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class File(FileResource): |  | ||||||
|  |  | ||||||
|     name = 'file' |  | ||||||
|  |  | ||||||
|     def __init__(self, owner, path, url=None): |  | ||||||
|         super(File, self).__init__(owner) |  | ||||||
|         self.path = path |  | ||||||
|         self.url = url |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class PluginAsset(File): |  | ||||||
|  |  | ||||||
|     name = 'plugin_asset' |  | ||||||
|  |  | ||||||
|     def __init__(self, owner, path): |  | ||||||
|         super(PluginAsset, self).__init__(owner, os.path.join(owner.name, path)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Executable(FileResource): |  | ||||||
|  |  | ||||||
|     name = 'executable' |  | ||||||
|  |  | ||||||
|     def __init__(self, owner, platform, filename): |  | ||||||
|         super(Executable, self).__init__(owner) |  | ||||||
|         self.platform = platform |  | ||||||
|         self.filename = filename |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename) |  | ||||||
| @@ -1,289 +0,0 @@ | |||||||
| """ |  | ||||||
| Default config for Workload Automation. DO NOT MODIFY this file. This file |  | ||||||
| gets copied to ~/.workload_automation/config.py on initial run of run_workloads. |  | ||||||
| Add your configuration to that file instead. |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| #  *** WARNING: *** |  | ||||||
| # Configuration listed in this file is NOT COMPLETE. This file sets the default |  | ||||||
| # configuration for WA and gives EXAMPLES of other configuration available. It |  | ||||||
| # is not supposed to be an exhaustive list. |  | ||||||
| # PLEASE REFER TO WA DOCUMENTATION FOR THE COMPLETE LIST OF AVAILABLE |  | ||||||
| # EXTENSIONS AND THEIR configuration. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # This defines when the device will be rebooted during Workload Automation execution.              # |  | ||||||
| #                                                                                                  # |  | ||||||
| # Valid policies are:                                                                              # |  | ||||||
| #   never:  The device will never be rebooted.                                                     # |  | ||||||
| #   as_needed: The device will only be rebooted if the need arises (e.g. if it                     # |  | ||||||
| #              becomes unresponsive                                                                # |  | ||||||
| #   initial: The device will be rebooted when the execution first starts, just before executing    # |  | ||||||
| #            the first workload spec.                                                              # |  | ||||||
| #   each_spec: The device will be rebooted before running a new workload spec.                     # |  | ||||||
| #   each_iteration: The device will be rebooted before each new iteration.                         # |  | ||||||
| #                                                                                                  # |  | ||||||
| reboot_policy = 'as_needed' |  | ||||||
|  |  | ||||||
| #  Defines the order in which the agenda spec will be executed. At the moment,                     # |  | ||||||
| #  the following execution orders are supported:                                                   # |  | ||||||
| #                                                                                                  # |  | ||||||
| #   by_iteration: The first iteration of each workload spec is executed one ofter the other,       # |  | ||||||
| #                 so all workloads are executed before proceeding on to the second iteration.      # |  | ||||||
| #                 This is the default if no order is explicitly specified.                         # |  | ||||||
| #                 If multiple sections were specified, this will also split them up, so that specs # |  | ||||||
| #                 in the same section are further apart in the execution order.                    # |  | ||||||
| #   by_section:   Same as "by_iteration", but runn specs from the same section one after the other # |  | ||||||
| #   by_spec:      All iterations of the first spec are executed before moving on to the next       # |  | ||||||
| #                 spec. This may also be specified as ``"classic"``, as this was the way           # |  | ||||||
| #                 workloads were executed in earlier versions of WA.                               # |  | ||||||
| #   random:       Randomisizes the order in which specs run.                                       # |  | ||||||
| execution_order = 'by_iteration' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # This indicates when a job will be re-run. |  | ||||||
| # Possible values: |  | ||||||
| #     OK: This iteration has completed and no errors have been detected |  | ||||||
| #     PARTIAL: One or more instruments have failed (the iteration may still be running). |  | ||||||
| #     FAILED: The workload itself has failed. |  | ||||||
| #     ABORTED: The user interupted the workload |  | ||||||
| # |  | ||||||
| # If set to an empty list, a job will not be re-run ever. |  | ||||||
| retry_on_status = ['FAILED', 'PARTIAL'] |  | ||||||
|  |  | ||||||
| # How many times a job will be re-run before giving up |  | ||||||
| max_retries = 3 |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| ######################################### Device Settings ########################################## |  | ||||||
| #################################################################################################### |  | ||||||
| # Specify the device you want to run workload automation on. This must be a                        # |  | ||||||
| # string with the ID of the device. At the moment, only 'TC2' is supported.                        # |  | ||||||
| #                                                                                                  # |  | ||||||
| device = 'generic_android' |  | ||||||
|  |  | ||||||
| # Configuration options that will be passed onto the device. These are obviously device-specific,  # |  | ||||||
| # so check the documentation for the particular device to find out which options and values are    # |  | ||||||
| # valid. The settings listed below are common to all devices                                       # |  | ||||||
| #                                                                                                  # |  | ||||||
| device_config = dict( |  | ||||||
|     # The name used by adb to identify the device. Use "adb devices" in bash to list |  | ||||||
|     # the devices currently seen by adb. |  | ||||||
|     #adb_name='10.109.173.2:5555', |  | ||||||
|  |  | ||||||
|     # The directory on the device that WA will use to push files to |  | ||||||
|     #working_directory='/sdcard/wa-working', |  | ||||||
|  |  | ||||||
|     # This specifies the device's CPU cores. The order must match how they |  | ||||||
|     # appear in cpufreq. The example below is for TC2. |  | ||||||
|     # core_names = ['a7', 'a7', 'a7', 'a15', 'a15'] |  | ||||||
|  |  | ||||||
|     # Specifies cluster mapping for the device's cores. |  | ||||||
|     # core_clusters = [0, 0, 0, 1, 1] |  | ||||||
| ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| ################################### Instrumention Configuration #################################### |  | ||||||
| #################################################################################################### |  | ||||||
| # This defines the additionnal instrumentation that will be enabled during workload execution,     # |  | ||||||
| # which in turn determines what additional data (such as /proc/interrupts content or Streamline    # |  | ||||||
| # traces) will be available in the results directory.                                              # |  | ||||||
| #                                                                                                  # |  | ||||||
| instrumentation = [ |  | ||||||
|     # Records the time it took to run the workload |  | ||||||
|     'execution_time', |  | ||||||
|  |  | ||||||
|     # Collects /proc/interrupts before and after execution and does a diff. |  | ||||||
|     'interrupts', |  | ||||||
|  |  | ||||||
|     # Collects the contents of/sys/devices/system/cpu before and after execution and does a diff. |  | ||||||
|     'cpufreq', |  | ||||||
|  |  | ||||||
|     # Gets energy usage from the workload form HWMON devices |  | ||||||
|     # NOTE: the hardware needs to have the right sensors in order for this to work |  | ||||||
|     #'hwmon', |  | ||||||
|  |  | ||||||
|     # Run perf in the background during workload execution and then collect the results. perf is a |  | ||||||
|     # standard Linux performance analysis tool. |  | ||||||
|     #'perf', |  | ||||||
|  |  | ||||||
|     # Collect Streamline traces during workload execution. Streamline is part of DS-5 |  | ||||||
|     #'streamline', |  | ||||||
|  |  | ||||||
|     # Collects traces by interacting with Ftrace Linux kernel internal tracer |  | ||||||
|     #'trace-cmd', |  | ||||||
|  |  | ||||||
|     # Obtains the power consumption of the target device's core measured by National Instruments |  | ||||||
|     # Data Acquisition(DAQ) device. |  | ||||||
|     #'daq', |  | ||||||
|  |  | ||||||
|     # Collects CCI counter data. |  | ||||||
|     #'cci_pmu_logger', |  | ||||||
|  |  | ||||||
|     # Collects FPS (Frames Per Second) and related metrics (such as jank) from |  | ||||||
|     # the View of the workload (Note: only a single View per workload is |  | ||||||
|     # supported at the moment, so this is mainly useful for games). |  | ||||||
|     #'fps', |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| ################################# Result Processors Configuration ################################## |  | ||||||
| #################################################################################################### |  | ||||||
| # Specifies how results will be processed and presented.                                           # |  | ||||||
| #                                                                                                  # |  | ||||||
| result_processors = [ |  | ||||||
|     # Creates a status.txt that provides a summary status for the run |  | ||||||
|     'status', |  | ||||||
|  |  | ||||||
|     # Creates a results.txt file for each iteration that lists all collected metrics |  | ||||||
|     # in "name = value (units)" format |  | ||||||
|     'standard', |  | ||||||
|  |  | ||||||
|     # Creates a results.csv that contains metrics for all iterations of all workloads |  | ||||||
|     # in the .csv format. |  | ||||||
|     'csv', |  | ||||||
|  |  | ||||||
|     # Creates a summary.csv that contains summary metrics for all iterations of all |  | ||||||
|     # all in the .csv format. Summary metrics are defined on per-worklod basis |  | ||||||
|     # are typically things like overall scores. The contents of summary.csv are |  | ||||||
|     # always a subset of the contents of results.csv (if it is generated). |  | ||||||
|     #'summary_csv', |  | ||||||
|  |  | ||||||
|     # Creates a results.csv that contains metrics for all iterations of all workloads |  | ||||||
|     # in the JSON format |  | ||||||
|     #'json', |  | ||||||
|  |  | ||||||
|     # Write results to an sqlite3 database. By default, a new database will be |  | ||||||
|     # generated for each run, however it is possible to specify a path to an |  | ||||||
|     # existing DB file (see result processor configuration below), in which |  | ||||||
|     # case results from multiple runs may be stored in the one file. |  | ||||||
|     #'sqlite', |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| ################################### Logging output Configuration ################################### |  | ||||||
| #################################################################################################### |  | ||||||
| # Specify the format of logging messages. The format uses the old formatting syntax:               # |  | ||||||
| #                                                                                                  # |  | ||||||
| #   http://docs.python.org/2/library/stdtypes.html#string-formatting-operations                    # |  | ||||||
| #                                                                                                  # |  | ||||||
| # The attributes that can be used in formats are listested here:                                   # |  | ||||||
| #                                                                                                  # |  | ||||||
| #   http://docs.python.org/2/library/logging.html#logrecord-attributes                             # |  | ||||||
| #                                                                                                  # |  | ||||||
| logging = { |  | ||||||
|     # Log file format |  | ||||||
|     'file format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s', |  | ||||||
|     # Verbose console output format |  | ||||||
|     'verbose format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s', |  | ||||||
|     # Regular console output format |  | ||||||
|     'regular format': '%(levelname)-8s %(message)s', |  | ||||||
|     # Colouring the console output |  | ||||||
|     'colour_enabled': True, |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| #################################### Instruments Configuration ##################################### |  | ||||||
| #################################################################################################### |  | ||||||
| # Instrumention Configuration is related to specific insturment's settings. Some of the            # |  | ||||||
| # instrumentations require specific settings in order for them to work. These settings are         # |  | ||||||
| # specified here.                                                                                  # |  | ||||||
| # Note that these settings only take effect if the corresponding instrument is |  | ||||||
| # enabled above. |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| ######################################## perf configuration ######################################## |  | ||||||
|  |  | ||||||
| # The hardware events such as instructions executed, cache-misses suffered, or branches |  | ||||||
| # mispredicted to be reported by perf. Events can be obtained from the device by tpying |  | ||||||
| # 'perf list'. |  | ||||||
| #perf_events = ['migrations', 'cs'] |  | ||||||
|  |  | ||||||
| # The perf options which can be obtained from man page for perf-record |  | ||||||
| #perf_options = '-a -i' |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| ####################################### hwmon configuration ######################################## |  | ||||||
|  |  | ||||||
| # The kinds of sensors hwmon instrument will look for |  | ||||||
| #hwmon_sensors = ['energy', 'temp'] |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| ###################################### trace-cmd configuration ##################################### |  | ||||||
|  |  | ||||||
| # trace-cmd events to be traced. The events can be found by rooting on the device then type |  | ||||||
| # 'trace-cmd list -e' |  | ||||||
| #trace_events = ['power*'] |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| ######################################### DAQ configuration ######################################## |  | ||||||
|  |  | ||||||
| # The host address of the machine that runs the daq Server which the insturment communicates with |  | ||||||
| #daq_server_host = '10.1.17.56' |  | ||||||
|  |  | ||||||
| # The port number for daq Server in which daq insturment communicates with |  | ||||||
| #daq_server_port = 56788 |  | ||||||
|  |  | ||||||
| # The values of resistors 1 and 2 (in Ohms) across which the voltages are measured |  | ||||||
| #daq_resistor_values = [0.002, 0.002] |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| ################################### cci_pmu_logger configuration ################################### |  | ||||||
|  |  | ||||||
| # The events to be counted by PMU |  | ||||||
| # NOTE: The number of events must not exceed the number of counters available (which is 4 for CCI-400) |  | ||||||
| #cci_pmu_events = ['0x63', '0x83'] |  | ||||||
|  |  | ||||||
| # The name of the events which will be used when reporting PMU counts |  | ||||||
| #cci_pmu_event_labels = ['event_0x63', 'event_0x83'] |  | ||||||
|  |  | ||||||
| # The period (in jiffies) between counter reads |  | ||||||
| #cci_pmu_period = 15 |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| ################################### fps configuration ############################################## |  | ||||||
|  |  | ||||||
| # Data points below this FPS will dropped as not constituting "real" gameplay. The assumption |  | ||||||
| # being that while actually running, the FPS in the game will not drop below X frames per second, |  | ||||||
| # except on loading screens, menus, etc, which should not contribute to FPS calculation. |  | ||||||
| #fps_drop_threshold=5 |  | ||||||
|  |  | ||||||
| # If set to True, this will keep the raw dumpsys output in the results directory (this is maily |  | ||||||
| # used for debugging). Note: frames.csv with collected frames data will always be generated |  | ||||||
| # regardless of this setting. |  | ||||||
| #fps_keep_raw=False |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| ################################# Result Processor Configuration ################################### |  | ||||||
| #################################################################################################### |  | ||||||
|  |  | ||||||
| # Specifies an alternative database to store results in. If the file does not |  | ||||||
| # exist, it will be created (the directiory of the file must exist however). If |  | ||||||
| # the file does exist, the results will be added to the existing data set (each |  | ||||||
| # run as a UUID, so results won't clash even if identical agendas were used). |  | ||||||
| # Note that in order for this to work, the version of the schema used to generate |  | ||||||
| # the DB file must match that of the schema used for the current run. Please |  | ||||||
| # see "What's new" secition in WA docs to check if the schema has changed in |  | ||||||
| # recent releases of WA. |  | ||||||
| #sqlite_database = '/work/results/myresults.sqlite' |  | ||||||
|  |  | ||||||
| # If the file specified by sqlite_database exists, setting this to True will |  | ||||||
| # cause that file to be overwritten rather than updated -- existing results in |  | ||||||
| # the file will be lost. |  | ||||||
| #sqlite_overwrite = False |  | ||||||
|  |  | ||||||
| # distribution: internal |  | ||||||
|  |  | ||||||
| #################################################################################################### |  | ||||||
| #################################### Resource Getter configuration ################################# |  | ||||||
| #################################################################################################### |  | ||||||
|  |  | ||||||
| # The location on your system where /arm/scratch is mounted. Used by |  | ||||||
| # Scratch resource getter. |  | ||||||
| #scratch_mount_point = '/arm/scratch' |  | ||||||
|  |  | ||||||
| # end distribution |  | ||||||
| @@ -1,16 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -1,81 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
| import textwrap |  | ||||||
|  |  | ||||||
| from wlauto.core.plugin import Plugin |  | ||||||
| from wlauto.utils.doc import format_body |  | ||||||
| from wlauto.core.version import get_wa_version |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def init_argument_parser(parser): |  | ||||||
|     parser.add_argument('-c', '--config', action='append', default=[], |  | ||||||
|                         help='specify an additional config.py') |  | ||||||
|     parser.add_argument('-v', '--verbose', action='count', |  | ||||||
|                         help='The scripts will produce verbose output.') |  | ||||||
|     parser.add_argument('--version', action='version',  |  | ||||||
|                         version='%(prog)s {}'.format(get_wa_version())) |  | ||||||
|     return parser |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Command(Plugin): |  | ||||||
|     """ |  | ||||||
|     Defines a Workload Automation command. This will be executed from the |  | ||||||
|     command line as ``wa <command> [args ...]``. This defines the name to be |  | ||||||
|     used when invoking wa, the code that will actually be executed on |  | ||||||
|     invocation and the argument parser to be used to parse the reset of the |  | ||||||
|     command line arguments. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     kind = "command" |  | ||||||
|     help = None |  | ||||||
|     usage = None |  | ||||||
|     description = None |  | ||||||
|     epilog = None |  | ||||||
|     formatter_class = None |  | ||||||
|  |  | ||||||
|     def __init__(self, subparsers): |  | ||||||
|         super(Command, self).__init__() |  | ||||||
|         self.group = subparsers |  | ||||||
|         parser_params = dict(help=(self.help or self.description), usage=self.usage, |  | ||||||
|                              description=format_body(textwrap.dedent(self.description), 80), |  | ||||||
|                              epilog=self.epilog) |  | ||||||
|         if self.formatter_class: |  | ||||||
|             parser_params['formatter_class'] = self.formatter_class |  | ||||||
|         self.parser = subparsers.add_parser(self.name, **parser_params) |  | ||||||
|         init_argument_parser(self.parser)  # propagate top-level options |  | ||||||
|         self.initialize(None) |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         """ |  | ||||||
|         Perform command-specific initialisation (e.g. adding command-specific |  | ||||||
|         options to the command's parser). ``context`` is always ``None``. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def execute(self, state, args): |  | ||||||
|         """ |  | ||||||
|         Execute this command. |  | ||||||
|  |  | ||||||
|         :state: An initialized ``ConfigManager`` that contains the current state of |  | ||||||
|                 WA exeuction up to that point (processed configuraition, loaded |  | ||||||
|                 plugins, etc). |  | ||||||
|         :args: An ``argparse.Namespace`` containing command line arguments (as returned by |  | ||||||
|                ``argparse.ArgumentParser.parse_args()``. This would usually be the result of |  | ||||||
|                invoking ``self.parser``. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         raise NotImplementedError() |  | ||||||
| @@ -1,19 +0,0 @@ | |||||||
| #    Copyright 2013-2016 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
| from wlauto.core.configuration.configuration import (settings, |  | ||||||
|                                                      RunConfiguration, |  | ||||||
|                                                      JobGenerator, |  | ||||||
|                                                      ConfigurationPoint) |  | ||||||
| from wlauto.core.configuration.plugin_cache import PluginCache |  | ||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1,42 +0,0 @@ | |||||||
| from wlauto.core.configuration.configuration import MetaConfiguration, RunConfiguration |  | ||||||
| from wlauto.core.configuration.plugin_cache import PluginCache |  | ||||||
| from wlauto.utils.serializer import yaml |  | ||||||
| from wlauto.utils.doc import strip_inlined_text |  | ||||||
|  |  | ||||||
| DEFAULT_INSTRUMENTS = ['execution_time', |  | ||||||
|                        'interrupts', |  | ||||||
|                        'cpufreq', |  | ||||||
|                        'status', |  | ||||||
|                        'standard', |  | ||||||
|                        'csv'] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _format_yaml_comment(param, short_description=False): |  | ||||||
|     comment = param.description |  | ||||||
|     comment = strip_inlined_text(comment) |  | ||||||
|     if short_description: |  | ||||||
|         comment = comment.split('\n\n')[0] |  | ||||||
|     comment = comment.replace('\n', '\n# ') |  | ||||||
|     comment = "# {}\n".format(comment) |  | ||||||
|     return comment |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _format_instruments(output): |  | ||||||
|     plugin_cache = PluginCache() |  | ||||||
|     output.write("instrumentation:\n") |  | ||||||
|     for plugin in DEFAULT_INSTRUMENTS: |  | ||||||
|         plugin_cls = plugin_cache.loader.get_plugin_class(plugin) |  | ||||||
|         output.writelines(_format_yaml_comment(plugin_cls, short_description=True)) |  | ||||||
|         output.write(" - {}\n".format(plugin)) |  | ||||||
|         output.write("\n") |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def generate_default_config(path): |  | ||||||
|     with open(path, 'w') as output: |  | ||||||
|         for param in MetaConfiguration.config_points + RunConfiguration.config_points: |  | ||||||
|             entry = {param.name: param.default} |  | ||||||
|             comment = _format_yaml_comment(param) |  | ||||||
|             output.writelines(comment) |  | ||||||
|             yaml.dump(entry, output, default_flow_style=False) |  | ||||||
|             output.write("\n") |  | ||||||
|         _format_instruments(output) |  | ||||||
| @@ -1,213 +0,0 @@ | |||||||
| import random |  | ||||||
| from itertools import izip_longest, groupby, chain |  | ||||||
|  |  | ||||||
| from wlauto.core import pluginloader |  | ||||||
| from wlauto.core.configuration.configuration import (MetaConfiguration, |  | ||||||
|                                                      RunConfiguration, |  | ||||||
|                                                      JobGenerator, settings) |  | ||||||
| from wlauto.core.configuration.parsers import ConfigParser |  | ||||||
| from wlauto.core.configuration.plugin_cache import PluginCache |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CombinedConfig(object): |  | ||||||
|  |  | ||||||
|     @staticmethod |  | ||||||
|     def from_pod(pod): |  | ||||||
|         instance = CombinedConfig() |  | ||||||
|         instance.settings = MetaConfiguration.from_pod(pod.get('settings', {})) |  | ||||||
|         instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {})) |  | ||||||
|         return instance |  | ||||||
|  |  | ||||||
|     def __init__(self, settings=None, run_config=None): |  | ||||||
|         self.settings = settings |  | ||||||
|         self.run_config = run_config |  | ||||||
|  |  | ||||||
|     def to_pod(self): |  | ||||||
|         return {'settings': self.settings.to_pod(), |  | ||||||
|                 'run_config': self.run_config.to_pod()} |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Job(object): |  | ||||||
|  |  | ||||||
|     def __init__(self, spec, iteration, context): |  | ||||||
|         self.spec = spec |  | ||||||
|         self.iteration = iteration |  | ||||||
|         self.context = context |  | ||||||
|         self.status = 'new' |  | ||||||
|         self.workload = None |  | ||||||
|         self.output = None |  | ||||||
|  |  | ||||||
|     def load(self, target, loader=pluginloader): |  | ||||||
|         self.workload = loader.get_workload(self.spec.workload_name, |  | ||||||
|                                             target, |  | ||||||
|                                             **self.spec.workload_parameters) |  | ||||||
|         self.workload.init_resources(self.context) |  | ||||||
|         self.workload.validate() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ConfigManager(object): |  | ||||||
|     """ |  | ||||||
|     Represents run-time state of WA. Mostly used as a container for loaded  |  | ||||||
|     configuration and discovered plugins. |  | ||||||
|  |  | ||||||
|     This exists outside of any command or run and is associated with the running  |  | ||||||
|     instance of wA itself. |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def enabled_instruments(self): |  | ||||||
|         return self.jobs_config.enabled_instruments |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def job_specs(self): |  | ||||||
|         if not self._jobs_generated: |  | ||||||
|             msg = 'Attempting to access job specs before '\ |  | ||||||
|                   'jobs have been generated' |  | ||||||
|             raise RuntimeError(msg) |  | ||||||
|         return [j.spec for j in self._jobs] |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def jobs(self): |  | ||||||
|         if not self._jobs_generated: |  | ||||||
|             msg = 'Attempting to access jobs before '\ |  | ||||||
|                   'they have been generated' |  | ||||||
|             raise RuntimeError(msg) |  | ||||||
|         return self._jobs |  | ||||||
|  |  | ||||||
|     def __init__(self, settings=settings): |  | ||||||
|         self.settings = settings |  | ||||||
|         self.run_config = RunConfiguration() |  | ||||||
|         self.plugin_cache = PluginCache() |  | ||||||
|         self.jobs_config = JobGenerator(self.plugin_cache) |  | ||||||
|         self.loaded_config_sources = [] |  | ||||||
|         self._config_parser = ConfigParser() |  | ||||||
|         self._jobs = [] |  | ||||||
|         self._jobs_generated = False |  | ||||||
|         self.agenda = None |  | ||||||
|  |  | ||||||
|     def load_config_file(self, filepath): |  | ||||||
|         self._config_parser.load_from_path(self, filepath) |  | ||||||
|         self.loaded_config_sources.append(filepath) |  | ||||||
|  |  | ||||||
|     def load_config(self, values, source, wrap_exceptions=True): |  | ||||||
|         self._config_parser.load(self, values, source) |  | ||||||
|         self.loaded_config_sources.append(source) |  | ||||||
|  |  | ||||||
|     def get_plugin(self, name=None, kind=None, *args, **kwargs): |  | ||||||
|         return self.plugin_cache.get_plugin(name, kind, *args, **kwargs) |  | ||||||
|  |  | ||||||
|     def get_instruments(self, target): |  | ||||||
|         instruments = [] |  | ||||||
|         for name in self.enabled_instruments: |  | ||||||
|             instruments.append(self.get_plugin(name, kind='instrument',  |  | ||||||
|                                                target=target)) |  | ||||||
|         return instruments |  | ||||||
|  |  | ||||||
|     def finalize(self): |  | ||||||
|         if not self.agenda: |  | ||||||
|             msg = 'Attempting to finalize config before agenda has been set' |  | ||||||
|             raise RuntimeError(msg) |  | ||||||
|         self.run_config.merge_device_config(self.plugin_cache) |  | ||||||
|         return CombinedConfig(self.settings, self.run_config) |  | ||||||
|  |  | ||||||
|     def generate_jobs(self, context): |  | ||||||
|         job_specs = self.jobs_config.generate_job_specs(context.tm) |  | ||||||
|         exec_order = self.run_config.execution_order |  | ||||||
|         for spec, i in permute_iterations(job_specs, exec_order): |  | ||||||
|             job = Job(spec, i, context) |  | ||||||
|             job.load(context.tm.target) |  | ||||||
|             self._jobs.append(job) |  | ||||||
|         self._jobs_generated = True |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def permute_by_job(specs): |  | ||||||
|     """ |  | ||||||
|     This is that "classic" implementation that executes all iterations of a |  | ||||||
|     workload spec before proceeding onto the next spec. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     for spec in specs: |  | ||||||
|         for i in range(1, spec.iterations + 1): |  | ||||||
|             yield (spec, i) |  | ||||||
|   |  | ||||||
|  |  | ||||||
| def permute_by_iteration(specs): |  | ||||||
|     """ |  | ||||||
|     Runs the first iteration for all benchmarks first, before proceeding to the |  | ||||||
|     next iteration, i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2, |  | ||||||
|     C1, C2... |  | ||||||
|  |  | ||||||
|     If multiple sections where specified in the agenda, this will run all |  | ||||||
|     sections for the first global spec first, followed by all sections for the |  | ||||||
|     second spec, etc. |  | ||||||
|  |  | ||||||
|     e.g. given sections X and Y, and global specs A and B, with 2 iterations, |  | ||||||
|     this will run |  | ||||||
|  |  | ||||||
|     X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2 |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     groups = [list(g) for k, g in groupby(specs, lambda s: s.workload_id)] |  | ||||||
|  |  | ||||||
|     all_tuples = [] |  | ||||||
|     for spec in chain(*groups): |  | ||||||
|         all_tuples.append([(spec, i + 1)  |  | ||||||
|                            for i in xrange(spec.iterations)]) |  | ||||||
|     for t in chain(*map(list, izip_longest(*all_tuples))): |  | ||||||
|         if t is not None: |  | ||||||
|             yield t |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def permute_by_section(specs): |  | ||||||
|     """ |  | ||||||
|     Runs the first iteration for all benchmarks first, before proceeding to the |  | ||||||
|     next iteration, i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2, |  | ||||||
|     C1, C2... |  | ||||||
|  |  | ||||||
|     If multiple sections where specified in the agenda, this will run all specs |  | ||||||
|     for the first section followed by all specs for the seciod section, etc. |  | ||||||
|  |  | ||||||
|     e.g. given sections X and Y, and global specs A and B, with 2 iterations, |  | ||||||
|     this will run |  | ||||||
|  |  | ||||||
|     X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2 |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     groups = [list(g) for k, g in groupby(specs, lambda s: s.section_id)] |  | ||||||
|  |  | ||||||
|     all_tuples = [] |  | ||||||
|     for spec in chain(*groups): |  | ||||||
|         all_tuples.append([(spec, i + 1)  |  | ||||||
|                            for i in xrange(spec.iterations)]) |  | ||||||
|     for t in chain(*map(list, izip_longest(*all_tuples))): |  | ||||||
|         if t is not None: |  | ||||||
|             yield t |  | ||||||
|   |  | ||||||
|  |  | ||||||
| def permute_randomly(specs): |  | ||||||
|     """ |  | ||||||
|     This will generate a random permutation of specs/iteration tuples. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     result = [] |  | ||||||
|     for spec in specs: |  | ||||||
|         for i in xrange(1, spec.iterations + 1): |  | ||||||
|             result.append((spec, i)) |  | ||||||
|     random.shuffle(result) |  | ||||||
|     for t in result: |  | ||||||
|         yield t |  | ||||||
|  |  | ||||||
|  |  | ||||||
| permute_map = { |  | ||||||
|     'by_iteration': permute_by_iteration, |  | ||||||
|     'by_job': permute_by_job, |  | ||||||
|     'by_section': permute_by_section, |  | ||||||
|     'random': permute_randomly, |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def permute_iterations(specs, exec_order): |  | ||||||
|     if exec_order not in permute_map: |  | ||||||
|         msg = 'Unknown execution order "{}"; must be in: {}' |  | ||||||
|         raise ValueError(msg.format(exec_order, permute_map.keys())) |  | ||||||
|     return permute_map[exec_order](specs) |  | ||||||
| @@ -1,308 +0,0 @@ | |||||||
| #    Copyright 2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
| import os |  | ||||||
|  |  | ||||||
| from wlauto.exceptions import ConfigError |  | ||||||
| from wlauto.utils.serializer import read_pod, SerializerSyntaxError |  | ||||||
| from wlauto.utils.types import toggle_set, counter |  | ||||||
| from wlauto.core.configuration.configuration import JobSpec |  | ||||||
|  |  | ||||||
|  |  | ||||||
| ############### |  | ||||||
| ### Parsers ### |  | ||||||
| ############### |  | ||||||
|  |  | ||||||
| class ConfigParser(object): |  | ||||||
|  |  | ||||||
|     def load_from_path(self, state, filepath): |  | ||||||
|         self.load(state, _load_file(filepath, "Config"), filepath) |  | ||||||
|  |  | ||||||
|     def load(self, state, raw, source, wrap_exceptions=True):  # pylint: disable=too-many-branches |  | ||||||
|         try: |  | ||||||
|             if 'run_name' in raw: |  | ||||||
|                 msg = '"run_name" can only be specified in the config '\ |  | ||||||
|                       'section of an agenda' |  | ||||||
|                 raise ConfigError(msg) |  | ||||||
|  |  | ||||||
|             if 'id' in raw: |  | ||||||
|                 raise ConfigError('"id" cannot be set globally') |  | ||||||
|  |  | ||||||
|             merge_result_processors_instruments(raw) |  | ||||||
|  |  | ||||||
|             # Get WA core configuration |  | ||||||
|             for cfg_point in state.settings.configuration.itervalues(): |  | ||||||
|                 value = get_aliased_param(cfg_point, raw) |  | ||||||
|                 if value is not None: |  | ||||||
|                     state.settings.set(cfg_point.name, value) |  | ||||||
|  |  | ||||||
|             # Get run specific configuration |  | ||||||
|             for cfg_point in state.run_config.configuration.itervalues(): |  | ||||||
|                 value = get_aliased_param(cfg_point, raw) |  | ||||||
|                 if value is not None: |  | ||||||
|                     state.run_config.set(cfg_point.name, value) |  | ||||||
|  |  | ||||||
|             # Get global job spec configuration |  | ||||||
|             for cfg_point in JobSpec.configuration.itervalues(): |  | ||||||
|                 value = get_aliased_param(cfg_point, raw) |  | ||||||
|                 if value is not None: |  | ||||||
|                     state.jobs_config.set_global_value(cfg_point.name, value) |  | ||||||
|  |  | ||||||
|             for name, values in raw.iteritems(): |  | ||||||
|                 # Assume that all leftover config is for a plug-in or a global |  | ||||||
|                 # alias it is up to PluginCache to assert this assumption |  | ||||||
|                 state.plugin_cache.add_configs(name, values, source) |  | ||||||
|  |  | ||||||
|         except ConfigError as e: |  | ||||||
|             if wrap_exceptions: |  | ||||||
|                 raise ConfigError('Error in "{}":\n{}'.format(source, str(e))) |  | ||||||
|             else: |  | ||||||
|                 raise e |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class AgendaParser(object): |  | ||||||
|  |  | ||||||
|     def load_from_path(self, state, filepath): |  | ||||||
|         raw = _load_file(filepath, 'Agenda') |  | ||||||
|         self.load(state, raw, filepath) |  | ||||||
|  |  | ||||||
|     def load(self, state, raw, source): |  | ||||||
|         try: |  | ||||||
|             if not isinstance(raw, dict): |  | ||||||
|                 raise ConfigError('Invalid agenda, top level entry must be a dict') |  | ||||||
|  |  | ||||||
|             self._populate_and_validate_config(state, raw, source) |  | ||||||
|             sections = self._pop_sections(raw) |  | ||||||
|             global_workloads = self._pop_workloads(raw) |  | ||||||
|  |  | ||||||
|             if raw: |  | ||||||
|                 msg = 'Invalid top level agenda entry(ies): "{}"' |  | ||||||
|                 raise ConfigError(msg.format('", "'.join(raw.keys()))) |  | ||||||
|  |  | ||||||
|             sect_ids, wkl_ids = self._collect_ids(sections, global_workloads) |  | ||||||
|             self._process_global_workloads(state, global_workloads, wkl_ids) |  | ||||||
|             self._process_sections(state, sections, sect_ids, wkl_ids) |  | ||||||
|  |  | ||||||
|             state.agenda = source |  | ||||||
|  |  | ||||||
|         except (ConfigError, SerializerSyntaxError) as e: |  | ||||||
|             raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e))) |  | ||||||
|  |  | ||||||
|     def _populate_and_validate_config(self, state, raw, source): |  | ||||||
|         for name in ['config', 'global']: |  | ||||||
|             entry = raw.pop(name, None) |  | ||||||
|             if entry is None: |  | ||||||
|                 continue |  | ||||||
|  |  | ||||||
|             if not isinstance(entry, dict): |  | ||||||
|                 msg = 'Invalid entry "{}" - must be a dict' |  | ||||||
|                 raise ConfigError(msg.format(name)) |  | ||||||
|  |  | ||||||
|             if 'run_name' in entry: |  | ||||||
|                 state.run_config.set('run_name', entry.pop('run_name')) |  | ||||||
|  |  | ||||||
|             state.load_config(entry, source, wrap_exceptions=False) |  | ||||||
|  |  | ||||||
|     def _pop_sections(self, raw): |  | ||||||
|         sections = raw.pop("sections", []) |  | ||||||
|         if not isinstance(sections, list): |  | ||||||
|             raise ConfigError('Invalid entry "sections" - must be a list') |  | ||||||
|         return sections |  | ||||||
|  |  | ||||||
|     def _pop_workloads(self, raw): |  | ||||||
|         workloads = raw.pop("workloads", []) |  | ||||||
|         if not isinstance(workloads, list): |  | ||||||
|             raise ConfigError('Invalid entry "workloads" - must be a list') |  | ||||||
|         return workloads |  | ||||||
|  |  | ||||||
|     def _collect_ids(self, sections, global_workloads): |  | ||||||
|         seen_section_ids = set() |  | ||||||
|         seen_workload_ids = set() |  | ||||||
|  |  | ||||||
|         for workload in global_workloads: |  | ||||||
|             workload = _get_workload_entry(workload) |  | ||||||
|             _collect_valid_id(workload.get("id"), seen_workload_ids, "workload") |  | ||||||
|  |  | ||||||
|         for section in sections: |  | ||||||
|             _collect_valid_id(section.get("id"), seen_section_ids, "section") |  | ||||||
|             for workload in section["workloads"] if "workloads" in section else []: |  | ||||||
|                 workload = _get_workload_entry(workload) |  | ||||||
|                 _collect_valid_id(workload.get("id"), seen_workload_ids,  |  | ||||||
|                                   "workload") |  | ||||||
|  |  | ||||||
|         return seen_section_ids, seen_workload_ids |  | ||||||
|  |  | ||||||
|     def _process_global_workloads(self, state, global_workloads, seen_wkl_ids): |  | ||||||
|         for workload_entry in global_workloads: |  | ||||||
|             workload = _process_workload_entry(workload_entry, seen_wkl_ids, |  | ||||||
|                                                state.jobs_config) |  | ||||||
|             state.jobs_config.add_workload(workload) |  | ||||||
|  |  | ||||||
|     def _process_sections(self, state, sections, seen_sect_ids, seen_wkl_ids): |  | ||||||
|         for section in sections: |  | ||||||
|             workloads = [] |  | ||||||
|             for workload_entry in section.pop("workloads", []): |  | ||||||
|                 workload = _process_workload_entry(workload_entry, seen_workload_ids, |  | ||||||
|                                                    state.jobs_config) |  | ||||||
|                 workloads.append(workload) |  | ||||||
|  |  | ||||||
|             section = _construct_valid_entry(section, seen_sect_ids,  |  | ||||||
|                                              "s", state.jobs_config) |  | ||||||
|             state.jobs_config.add_section(section, workloads) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| ######################## |  | ||||||
| ### Helper functions ### |  | ||||||
| ######################## |  | ||||||
|  |  | ||||||
| def get_aliased_param(cfg_point, d, default=None, pop=True): |  | ||||||
|     """ |  | ||||||
|     Given a ConfigurationPoint and a dict, this function will search the dict for |  | ||||||
|     the ConfigurationPoint's name/aliases. If more than one is found it will raise |  | ||||||
|     a ConfigError. If one (and only one) is found then it will return the value |  | ||||||
|     for the ConfigurationPoint. If the name or aliases are present in the dict it will |  | ||||||
|     return the "default" parameter of this function. |  | ||||||
|     """ |  | ||||||
|     aliases = [cfg_point.name] + cfg_point.aliases |  | ||||||
|     alias_map = [a for a in aliases if a in d] |  | ||||||
|     if len(alias_map) > 1: |  | ||||||
|         raise ConfigError(DUPLICATE_ENTRY_ERROR.format(aliases)) |  | ||||||
|     elif alias_map: |  | ||||||
|         if pop: |  | ||||||
|             return d.pop(alias_map[0]) |  | ||||||
|         else: |  | ||||||
|             return d[alias_map[0]] |  | ||||||
|     else: |  | ||||||
|         return default |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _load_file(filepath, error_name): |  | ||||||
|     if not os.path.isfile(filepath): |  | ||||||
|         raise ValueError("{} does not exist".format(filepath)) |  | ||||||
|     try: |  | ||||||
|         raw = read_pod(filepath) |  | ||||||
|     except SerializerSyntaxError as e: |  | ||||||
|         raise ConfigError('Error parsing {} {}: {}'.format(error_name, filepath, e)) |  | ||||||
|     if not isinstance(raw, dict): |  | ||||||
|         message = '{} does not contain a valid {} structure; top level must be a dict.' |  | ||||||
|         raise ConfigError(message.format(filepath, error_name)) |  | ||||||
|     return raw |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def merge_result_processors_instruments(raw): |  | ||||||
|     instr_config = JobSpec.configuration['instrumentation'] |  | ||||||
|     instruments = toggle_set(get_aliased_param(instr_config, raw, default=[])) |  | ||||||
|     result_processors = toggle_set(raw.pop('result_processors', [])) |  | ||||||
|     if instruments and result_processors: |  | ||||||
|         conflicts = instruments.conflicts_with(result_processors) |  | ||||||
|         if conflicts: |  | ||||||
|             msg = '"instrumentation" and "result_processors" have '\ |  | ||||||
|                   'conflicting entries: {}' |  | ||||||
|             entires = ', '.join('"{}"'.format(c.strip("~")) for c in conflicts) |  | ||||||
|             raise ConfigError(msg.format(entires)) |  | ||||||
|     raw['instrumentation'] = instruments.merge_with(result_processors) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _pop_aliased(d, names, entry_id): |  | ||||||
|     name_count = sum(1 for n in names if n in d) |  | ||||||
|     if name_count > 1: |  | ||||||
|         names_list = ', '.join(names) |  | ||||||
|         msg = 'Inivalid workload entry "{}": at moust one of ({}}) must be specified.' |  | ||||||
|         raise ConfigError(msg.format(workload_entry['id'], names_list)) |  | ||||||
|     for name in names: |  | ||||||
|         if name in d: |  | ||||||
|             return d.pop(name) |  | ||||||
|     return None |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _construct_valid_entry(raw, seen_ids, prefix, jobs_config): |  | ||||||
|     workload_entry = {} |  | ||||||
|  |  | ||||||
|     # Generate an automatic ID if the entry doesn't already have one |  | ||||||
|     if 'id' not in raw: |  | ||||||
|         while True: |  | ||||||
|             new_id = '{}{}'.format(prefix, counter(name=prefix)) |  | ||||||
|             if new_id not in seen_ids: |  | ||||||
|                 break |  | ||||||
|         workload_entry['id'] = new_id |  | ||||||
|         seen_ids.add(new_id) |  | ||||||
|     else: |  | ||||||
|         workload_entry['id'] = raw.pop('id') |  | ||||||
|  |  | ||||||
|     # Process instrumentation |  | ||||||
|     merge_result_processors_instruments(raw) |  | ||||||
|  |  | ||||||
|     # Validate all workload_entry |  | ||||||
|     for name, cfg_point in JobSpec.configuration.iteritems(): |  | ||||||
|         value = get_aliased_param(cfg_point, raw) |  | ||||||
|         if value is not None: |  | ||||||
|             value = cfg_point.kind(value) |  | ||||||
|             cfg_point.validate_value(name, value) |  | ||||||
|             workload_entry[name] = value |  | ||||||
|  |  | ||||||
|     wk_id = workload_entry['id'] |  | ||||||
|     param_names = ['workload_params', 'workload_parameters'] |  | ||||||
|     if prefix == 'wk': |  | ||||||
|         param_names +=  ['params', 'parameters'] |  | ||||||
|     workload_entry["workload_parameters"] = _pop_aliased(raw, param_names, wk_id) |  | ||||||
|  |  | ||||||
|     param_names = ['runtime_parameters', 'runtime_params'] |  | ||||||
|     if prefix == 's': |  | ||||||
|         param_names +=  ['params', 'parameters'] |  | ||||||
|     workload_entry["runtime_parameters"] = _pop_aliased(raw, param_names, wk_id) |  | ||||||
|  |  | ||||||
|     param_names = ['boot_parameters', 'boot_params'] |  | ||||||
|     workload_entry["boot_parameters"] = _pop_aliased(raw, param_names, wk_id) |  | ||||||
|  |  | ||||||
|     if "instrumentation" in workload_entry: |  | ||||||
|         jobs_config.update_enabled_instruments(workload_entry["instrumentation"]) |  | ||||||
|  |  | ||||||
|     # error if there are unknown workload_entry |  | ||||||
|     if raw: |  | ||||||
|         msg = 'Invalid entry(ies) in "{}": "{}"' |  | ||||||
|         raise ConfigError(msg.format(workload_entry['id'], ', '.join(raw.keys()))) |  | ||||||
|  |  | ||||||
|     return workload_entry |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _collect_valid_id(entry_id, seen_ids, entry_type): |  | ||||||
|     if entry_id is None: |  | ||||||
|         return |  | ||||||
|     if entry_id in seen_ids: |  | ||||||
|         raise ConfigError('Duplicate {} ID "{}".'.format(entry_type, entry_id)) |  | ||||||
|     # "-" is reserved for joining section and workload IDs |  | ||||||
|     if "-" in entry_id: |  | ||||||
|         msg = 'Invalid {} ID "{}"; IDs cannot contain a "-"' |  | ||||||
|         raise ConfigError(msg.format(entry_type, entry_id)) |  | ||||||
|     if entry_id == "global": |  | ||||||
|         msg = 'Invalid {} ID "global"; is a reserved ID' |  | ||||||
|         raise ConfigError(msg.format(entry_type)) |  | ||||||
|     seen_ids.add(entry_id) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _get_workload_entry(workload): |  | ||||||
|     if isinstance(workload, basestring): |  | ||||||
|         workload = {'name': workload} |  | ||||||
|     elif not isinstance(workload, dict): |  | ||||||
|         raise ConfigError('Invalid workload entry: "{}"') |  | ||||||
|     return workload |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _process_workload_entry(workload, seen_workload_ids, jobs_config): |  | ||||||
|     workload = _get_workload_entry(workload) |  | ||||||
|     workload = _construct_valid_entry(workload, seen_workload_ids,  |  | ||||||
|                                       "wk", jobs_config) |  | ||||||
|     return workload |  | ||||||
|  |  | ||||||
| @@ -1,210 +0,0 @@ | |||||||
| #    Copyright 2016 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
|  |  | ||||||
| from copy import copy |  | ||||||
| from collections import defaultdict |  | ||||||
|  |  | ||||||
| from wlauto.core import pluginloader |  | ||||||
| from wlauto.exceptions import ConfigError |  | ||||||
| from wlauto.utils.types import obj_dict |  | ||||||
| from devlib.utils.misc import memoized |  | ||||||
|  |  | ||||||
| GENERIC_CONFIGS = ["device_config", "workload_parameters", |  | ||||||
|                    "boot_parameters", "runtime_parameters"] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class PluginCache(object): |  | ||||||
|     """ |  | ||||||
|     The plugin cache is used to store configuration that cannot be processed at |  | ||||||
|     this stage, whether thats because it is unknown if its needed |  | ||||||
|     (in the case of disabled plug-ins) or it is not know what it belongs to (in |  | ||||||
|     the case of "device-config" ect.). It also maintains where configuration came |  | ||||||
|     from, and the priority order of said sources. |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, loader=pluginloader): |  | ||||||
|         self.loader = loader |  | ||||||
|         self.sources = [] |  | ||||||
|         self.plugin_configs = defaultdict(lambda: defaultdict(dict)) |  | ||||||
|         self.global_alias_values = defaultdict(dict) |  | ||||||
|  |  | ||||||
|         # Generate a mapping of what global aliases belong to |  | ||||||
|         self._global_alias_map = defaultdict(dict) |  | ||||||
|         self._list_of_global_aliases = set() |  | ||||||
|         for plugin in self.loader.list_plugins(): |  | ||||||
|             for param in plugin.parameters: |  | ||||||
|                 if param.global_alias: |  | ||||||
|                     self._global_alias_map[plugin.name][param.global_alias] = param |  | ||||||
|                     self._list_of_global_aliases.add(param.global_alias) |  | ||||||
|  |  | ||||||
|     def add_source(self, source): |  | ||||||
|         if source in self.sources: |  | ||||||
|             raise Exception("Source has already been added.") |  | ||||||
|         self.sources.append(source) |  | ||||||
|  |  | ||||||
|     def add_global_alias(self, alias, value, source): |  | ||||||
|         if source not in self.sources: |  | ||||||
|             msg = "Source '{}' has not been added to the plugin cache." |  | ||||||
|             raise RuntimeError(msg.format(source)) |  | ||||||
|  |  | ||||||
|         if not self.is_global_alias(alias): |  | ||||||
|             msg = "'{} is not a valid global alias'" |  | ||||||
|             raise RuntimeError(msg.format(alias)) |  | ||||||
|  |  | ||||||
|         self.global_alias_values[alias][source] = value |  | ||||||
|  |  | ||||||
|     def add_configs(self, plugin_name, values, source): |  | ||||||
|         if self.is_global_alias(plugin_name): |  | ||||||
|             self.add_global_alias(plugin_name, values, source) |  | ||||||
|             return |  | ||||||
|         for name, value in values.iteritems(): |  | ||||||
|             self.add_config(plugin_name, name, value, source) |  | ||||||
|  |  | ||||||
|     def add_config(self, plugin_name, name, value, source): |  | ||||||
|         if source not in self.sources: |  | ||||||
|             msg = "Source '{}' has not been added to the plugin cache." |  | ||||||
|             raise RuntimeError(msg.format(source)) |  | ||||||
|  |  | ||||||
|         if (not self.loader.has_plugin(plugin_name) and  |  | ||||||
|                 plugin_name not in GENERIC_CONFIGS): |  | ||||||
|             msg = 'configuration provided for unknown plugin "{}"' |  | ||||||
|             raise ConfigError(msg.format(plugin_name)) |  | ||||||
|  |  | ||||||
|         if (plugin_name not in GENERIC_CONFIGS and |  | ||||||
|                 name not in self.get_plugin_parameters(plugin_name)): |  | ||||||
|             msg = "'{}' is not a valid parameter for '{}'" |  | ||||||
|             raise ConfigError(msg.format(name, plugin_name)) |  | ||||||
|  |  | ||||||
|         self.plugin_configs[plugin_name][source][name] = value |  | ||||||
|  |  | ||||||
|     def is_global_alias(self, name): |  | ||||||
|         return name in self._list_of_global_aliases |  | ||||||
|  |  | ||||||
|     def get_plugin_config(self, plugin_name, generic_name=None): |  | ||||||
|         config = obj_dict(not_in_dict=['name']) |  | ||||||
|         config.name = plugin_name |  | ||||||
|  |  | ||||||
|         # Load plugin defaults |  | ||||||
|         cfg_points = self.get_plugin_parameters(plugin_name) |  | ||||||
|         for cfg_point in cfg_points.itervalues(): |  | ||||||
|             cfg_point.set_value(config, check_mandatory=False) |  | ||||||
|  |  | ||||||
|         # Merge global aliases |  | ||||||
|         for alias, param in self._global_alias_map[plugin_name].iteritems(): |  | ||||||
|             if alias in self.global_alias_values: |  | ||||||
|                 for source in self.sources: |  | ||||||
|                     if source not in self.global_alias_values[alias]: |  | ||||||
|                         continue |  | ||||||
|                     val = self.global_alias_values[alias][source] |  | ||||||
|                     param.set_value(config, value=val) |  | ||||||
|  |  | ||||||
|         # Merge user config |  | ||||||
|         # Perform a simple merge with the order of sources representing priority |  | ||||||
|         if generic_name is None: |  | ||||||
|             plugin_config = self.plugin_configs[plugin_name] |  | ||||||
|             for source in self.sources: |  | ||||||
|                 if source not in plugin_config: |  | ||||||
|                     continue |  | ||||||
|                 for name, value in plugin_config[source].iteritems(): |  | ||||||
|                     cfg_points[name].set_value(config, value=value) |  | ||||||
|         # A more complicated merge that involves priority of sources and specificity |  | ||||||
|         else: |  | ||||||
|             self._merge_using_priority_specificity(plugin_name, generic_name, config) |  | ||||||
|  |  | ||||||
|         return config |  | ||||||
|  |  | ||||||
|     def get_plugin(self, name, kind=None, *args, **kwargs): |  | ||||||
|         config = self.get_plugin_config(name) |  | ||||||
|         kwargs = dict(config.items() + kwargs.items()) |  | ||||||
|         return self.loader.get_plugin(name, kind=kind, *args, **kwargs) |  | ||||||
|  |  | ||||||
|     @memoized |  | ||||||
|     def get_plugin_parameters(self, name): |  | ||||||
|         params = self.loader.get_plugin_class(name).parameters |  | ||||||
|         return {param.name: param for param in params} |  | ||||||
|  |  | ||||||
|     # pylint: disable=too-many-nested-blocks, too-many-branches |  | ||||||
|     def _merge_using_priority_specificity(self, specific_name,  |  | ||||||
|                                           generic_name, final_config): |  | ||||||
|         """ |  | ||||||
|         WA configuration can come from various sources of increasing priority, |  | ||||||
|         as well as being specified in a generic and specific manner (e.g. |  | ||||||
|         ``device_config`` and ``nexus10`` respectivly). WA has two rules for |  | ||||||
|         the priority of configuration: |  | ||||||
|  |  | ||||||
|             - Configuration from higher priority sources overrides |  | ||||||
|               configuration from lower priority sources. |  | ||||||
|             - More specific configuration overrides less specific configuration. |  | ||||||
|  |  | ||||||
|         There is a situation where these two rules come into conflict. When a |  | ||||||
|         generic configuration is given in config source of high priority and a |  | ||||||
|         specific configuration is given in a config source of lower priority. |  | ||||||
|         In this situation it is not possible to know the end users intention |  | ||||||
|         and WA will error. |  | ||||||
|  |  | ||||||
|         :param generic_name: The name of the generic configuration |  | ||||||
|                              e.g ``device_config`` |  | ||||||
|         :param specific_name: The name of the specific configuration used |  | ||||||
|                               e.g ``nexus10`` |  | ||||||
|         :param cfg_point: A dict of ``ConfigurationPoint``s to be used when |  | ||||||
|                           merging configuration.  keys=config point name,  |  | ||||||
|                           values=config point |  | ||||||
|  |  | ||||||
|         :rtype: A fully merged and validated configuration in the form of a |  | ||||||
|                 obj_dict. |  | ||||||
|         """ |  | ||||||
|         generic_config = copy(self.plugin_configs[generic_name]) |  | ||||||
|         specific_config = copy(self.plugin_configs[specific_name]) |  | ||||||
|         cfg_points = self.get_plugin_parameters(specific_name) |  | ||||||
|         sources = self.sources |  | ||||||
|         seen_specific_config = defaultdict(list) |  | ||||||
|  |  | ||||||
|         # set_value uses the 'name' attribute of the passed object in it error |  | ||||||
|         # messages, to ensure these messages make sense the name will have to be |  | ||||||
|         # changed several times during this function. |  | ||||||
|         final_config.name = specific_name |  | ||||||
|  |  | ||||||
|         # pylint: disable=too-many-nested-blocks |  | ||||||
|         for source in sources: |  | ||||||
|             try: |  | ||||||
|                 if source in generic_config: |  | ||||||
|                     final_config.name = generic_name |  | ||||||
|                     for name, cfg_point in cfg_points.iteritems(): |  | ||||||
|                         if name in generic_config[source]: |  | ||||||
|                             if name in seen_specific_config: |  | ||||||
|                                 msg = ('"{generic_name}" configuration "{config_name}" has already been ' |  | ||||||
|                                        'specified more specifically for {specific_name} in:\n\t\t{sources}') |  | ||||||
|                                 msg = msg.format(generic_name=generic_name, |  | ||||||
|                                                  config_name=name, |  | ||||||
|                                                  specific_name=specific_name, |  | ||||||
|                                                  sources=", ".join(seen_specific_config[name])) |  | ||||||
|                                 raise ConfigError(msg) |  | ||||||
|                             value = generic_config[source][name] |  | ||||||
|                             cfg_point.set_value(final_config, value, check_mandatory=False) |  | ||||||
|  |  | ||||||
|                 if source in specific_config: |  | ||||||
|                     final_config.name = specific_name |  | ||||||
|                     for name, cfg_point in cfg_points.iteritems(): |  | ||||||
|                         if name in specific_config[source]: |  | ||||||
|                             seen_specific_config[name].append(str(source)) |  | ||||||
|                             value = specific_config[source][name] |  | ||||||
|                             cfg_point.set_value(final_config, value, check_mandatory=False) |  | ||||||
|  |  | ||||||
|             except ConfigError as e: |  | ||||||
|                 raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e))) |  | ||||||
|  |  | ||||||
|         # Validate final configuration |  | ||||||
|         final_config.name = specific_name |  | ||||||
|         for cfg_point in cfg_points.itervalues(): |  | ||||||
|             cfg_point.validate(final_config) |  | ||||||
| @@ -1,89 +0,0 @@ | |||||||
| #    Copyright 2016 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class JobSpecSource(object): |  | ||||||
|  |  | ||||||
|     kind = "" |  | ||||||
|  |  | ||||||
|     def __init__(self, config, parent=None): |  | ||||||
|         self.config = config |  | ||||||
|         self.parent = parent |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def id(self): |  | ||||||
|         return self.config['id'] |  | ||||||
|  |  | ||||||
|     def name(self): |  | ||||||
|         raise NotImplementedError() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class WorkloadEntry(JobSpecSource): |  | ||||||
|     kind = "workload" |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def name(self): |  | ||||||
|         if self.parent.id == "global": |  | ||||||
|             return 'workload "{}"'.format(self.id) |  | ||||||
|         else: |  | ||||||
|             return 'workload "{}" from section "{}"'.format(self.id, self.parent.id) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class SectionNode(JobSpecSource): |  | ||||||
|  |  | ||||||
|     kind = "section" |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def name(self): |  | ||||||
|         if self.id == "global": |  | ||||||
|             return "globally specified configuration" |  | ||||||
|         else: |  | ||||||
|             return 'section "{}"'.format(self.id) |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def is_leaf(self): |  | ||||||
|         return not bool(self.children) |  | ||||||
|  |  | ||||||
|     def __init__(self, config, parent=None): |  | ||||||
|         super(SectionNode, self).__init__(config, parent=parent) |  | ||||||
|         self.workload_entries = [] |  | ||||||
|         self.children = [] |  | ||||||
|  |  | ||||||
|     def add_section(self, section): |  | ||||||
|         new_node = SectionNode(section, parent=self) |  | ||||||
|         self.children.append(new_node) |  | ||||||
|         return new_node |  | ||||||
|  |  | ||||||
|     def add_workload(self, workload_config): |  | ||||||
|         self.workload_entries.append(WorkloadEntry(workload_config, self)) |  | ||||||
|  |  | ||||||
|     def descendants(self): |  | ||||||
|         for child in self.children: |  | ||||||
|             for n in child.descendants(): |  | ||||||
|                 yield n |  | ||||||
|             yield child |  | ||||||
|  |  | ||||||
|     def ancestors(self): |  | ||||||
|         if self.parent is not None: |  | ||||||
|             yield self.parent |  | ||||||
|             for ancestor in self.parent.ancestors(): |  | ||||||
|                 yield ancestor |  | ||||||
|  |  | ||||||
|     def leaves(self): |  | ||||||
|         if self.is_leaf: |  | ||||||
|             yield self |  | ||||||
|         else: |  | ||||||
|             for n in self.descendants(): |  | ||||||
|                 if n.is_leaf: |  | ||||||
|                     yield n |  | ||||||
| @@ -1,198 +0,0 @@ | |||||||
| import string |  | ||||||
| from copy import copy |  | ||||||
|  |  | ||||||
| from wlauto.core.plugin import Plugin, Parameter |  | ||||||
| from wlauto.core.configuration.configuration import RuntimeParameter |  | ||||||
| from wlauto.exceptions import ConfigError |  | ||||||
| from wlauto.utils.types import list_of_integers, list_of, caseless_string |  | ||||||
|  |  | ||||||
| from devlib.platform import Platform |  | ||||||
| from devlib.target import AndroidTarget, Cpuinfo, KernelVersion, KernelConfig |  | ||||||
|  |  | ||||||
| __all__ = ['RuntimeParameter', 'CoreParameter', 'DeviceManager', 'TargetInfo'] |  | ||||||
|  |  | ||||||
| UNKOWN_RTP = 'Unknown runtime parameter "{}"' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class TargetInfo(object): |  | ||||||
|  |  | ||||||
|     @staticmethod |  | ||||||
|     def from_pod(pod): |  | ||||||
|         instance = TargetInfo() |  | ||||||
|         instance.target = pod['target'] |  | ||||||
|         instance.abi = pod['abi'] |  | ||||||
|         instance.cpuinfo = Cpuinfo(pod['cpuinfo']) |  | ||||||
|         instance.os = pod['os'] |  | ||||||
|         instance.os_version = pod['os_version'] |  | ||||||
|         instance.abi = pod['abi'] |  | ||||||
|         instance.is_rooted = pod['is_rooted'] |  | ||||||
|         instance.kernel_version = KernelVersion(pod['kernel_release'],  |  | ||||||
|                                                 pod['kernel_version']) |  | ||||||
|         instance.kernel_config = KernelConfig(pod['kernel_config']) |  | ||||||
|  |  | ||||||
|         if pod["target"] == "AndroidTarget": |  | ||||||
|             instance.screen_resolution = pod['screen_resolution'] |  | ||||||
|             instance.prop = pod['prop'] |  | ||||||
|             instance.prop = pod['android_id'] |  | ||||||
|  |  | ||||||
|         return instance |  | ||||||
|  |  | ||||||
|     def __init__(self, target=None): |  | ||||||
|         if target: |  | ||||||
|             self.target = target.__class__.__name__ |  | ||||||
|             self.cpuinfo = target.cpuinfo |  | ||||||
|             self.os = target.os |  | ||||||
|             self.os_version = target.os_version |  | ||||||
|             self.abi = target.abi |  | ||||||
|             self.is_rooted = target.is_rooted |  | ||||||
|             self.kernel_version = target.kernel_version |  | ||||||
|             self.kernel_config = target.config |  | ||||||
|  |  | ||||||
|             if isinstance(target, AndroidTarget): |  | ||||||
|                 self.screen_resolution = target.screen_resolution |  | ||||||
|                 self.prop = target.getprop() |  | ||||||
|                 self.android_id = target.android_id |  | ||||||
|  |  | ||||||
|         else: |  | ||||||
|             self.target = None |  | ||||||
|             self.cpuinfo = None |  | ||||||
|             self.os = None |  | ||||||
|             self.os_version = None |  | ||||||
|             self.abi = None |  | ||||||
|             self.is_rooted = None |  | ||||||
|             self.kernel_version = None |  | ||||||
|             self.kernel_config = None |  | ||||||
|  |  | ||||||
|             if isinstance(target, AndroidTarget): |  | ||||||
|                 self.screen_resolution = None |  | ||||||
|                 self.prop = None |  | ||||||
|                 self.android_id = None |  | ||||||
|  |  | ||||||
|     def to_pod(self): |  | ||||||
|         pod = {} |  | ||||||
|         pod['target'] = self.target |  | ||||||
|         pod['abi'] = self.abi |  | ||||||
|         pod['cpuinfo'] = self.cpuinfo.sections |  | ||||||
|         pod['os'] = self.os |  | ||||||
|         pod['os_version'] = self.os_version |  | ||||||
|         pod['abi'] = self.abi |  | ||||||
|         pod['is_rooted'] = self.is_rooted |  | ||||||
|         pod['kernel_release'] = self.kernel_version.release |  | ||||||
|         pod['kernel_version'] = self.kernel_version.version |  | ||||||
|         pod['kernel_config'] = dict(self.kernel_config.iteritems()) |  | ||||||
|  |  | ||||||
|         if self.target == "AndroidTarget": |  | ||||||
|             pod['screen_resolution'] = self.screen_resolution |  | ||||||
|             pod['prop'] = self.prop |  | ||||||
|             pod['android_id'] = self.android_id |  | ||||||
|  |  | ||||||
|         return pod |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DeviceManager(Plugin): |  | ||||||
|  |  | ||||||
|     kind = "manager" |  | ||||||
|     name = None |  | ||||||
|     target_type = None |  | ||||||
|     platform_type = Platform |  | ||||||
|     has_gpu = None |  | ||||||
|     path_module = None |  | ||||||
|     info = None |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         Parameter('core_names', kind=list_of(caseless_string), |  | ||||||
|                   description=""" |  | ||||||
|                   This is a list of all cpu cores on the device with each |  | ||||||
|                   element being the core type, e.g. ``['a7', 'a7', 'a15']``. The |  | ||||||
|                   order of the cores must match the order they are listed in |  | ||||||
|                   ``'/sys/devices/system/cpu'``. So in this case, ``'cpu0'`` must |  | ||||||
|                   be an A7 core, and ``'cpu2'`` an A15.' |  | ||||||
|                   """), |  | ||||||
|         Parameter('core_clusters', kind=list_of_integers, |  | ||||||
|                   description=""" |  | ||||||
|                   This is a list indicating the cluster affinity of the CPU cores, |  | ||||||
|                   each element correponding to the cluster ID of the core coresponding |  | ||||||
|                   to its index. E.g. ``[0, 0, 1]`` indicates that cpu0 and cpu1 are on |  | ||||||
|                   cluster 0, while cpu2 is on cluster 1. If this is not specified, this |  | ||||||
|                   will be inferred from ``core_names`` if possible (assuming all cores with |  | ||||||
|                   the same name are on the same cluster). |  | ||||||
|                   """), |  | ||||||
|         Parameter('working_directory', |  | ||||||
|                   description=''' |  | ||||||
|                   Working directory to be used by WA. This must be in a location where the specified user |  | ||||||
|                   has write permissions. This will default to /home/<username>/wa (or to /root/wa, if |  | ||||||
|                   username is 'root'). |  | ||||||
|                   '''), |  | ||||||
|         Parameter('binaries_directory', |  | ||||||
|                   description='Location of executable binaries on this device (must be in PATH).'), |  | ||||||
|     ] |  | ||||||
|     modules = [] |  | ||||||
|  |  | ||||||
|     runtime_parameter_managers = [ |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def __init__(self): |  | ||||||
|         super(DeviceManager, self).__init__() |  | ||||||
|         self.runtime_parameter_values = None |  | ||||||
|  |  | ||||||
|     # Framework |  | ||||||
|  |  | ||||||
|     def connect(self): |  | ||||||
|         raise NotImplementedError("connect method must be implemented for device managers") |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         super(DeviceManager, self).initialize(context) |  | ||||||
|         self.info = TargetInfo(self.target) |  | ||||||
|         self.target.setup() |  | ||||||
|  |  | ||||||
|     def start(self): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def stop(self): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     # Runtime Parameters |  | ||||||
|  |  | ||||||
|     def merge_runtime_parameters(self, params): |  | ||||||
|         merged_values = {} |  | ||||||
|         for source, values in params.iteritems(): |  | ||||||
|             for name, value in values: |  | ||||||
|                 for rtpm in self.runtime_parameter_managers: |  | ||||||
|                     if rtpm.match(name): |  | ||||||
|                         rtpm.update_value(name, value, source, merged_values) |  | ||||||
|                         break |  | ||||||
|                 else: |  | ||||||
|                     msg = 'Unknown runtime parameter "{}" in "{}"' |  | ||||||
|                     raise ConfigError(msg.format(name, source)) |  | ||||||
|         return merged_values |  | ||||||
|  |  | ||||||
|     def static_runtime_parameter_validation(self, params): |  | ||||||
|         params = copy(params) |  | ||||||
|         for rtpm in self.runtime_parameters_managers: |  | ||||||
|             rtpm.static_validation(params) |  | ||||||
|         if params: |  | ||||||
|             msg = 'Unknown runtime_parameters for "{}": "{}"' |  | ||||||
|             raise ConfigError(msg.format(self.name, '", "'.join(params.iterkeys()))) |  | ||||||
|  |  | ||||||
|     def dynamic_runtime_parameter_validation(self, params): |  | ||||||
|         for rtpm in self.runtime_parameters_managers: |  | ||||||
|             rtpm.dynamic_validation(params) |  | ||||||
|  |  | ||||||
|     def commit_runtime_parameters(self, params): |  | ||||||
|         params = copy(params) |  | ||||||
|         for rtpm in self.runtime_parameters_managers: |  | ||||||
|             rtpm.commit(params) |  | ||||||
|  |  | ||||||
|     #Runtime parameter getters/setters |  | ||||||
|     def get_sysfile_values(self): |  | ||||||
|         return self._written_sysfiles |  | ||||||
|  |  | ||||||
|     def set_sysfile_values(self, params): |  | ||||||
|         for sysfile, value in params.iteritems(): |  | ||||||
|             verify = not sysfile.endswith('!') |  | ||||||
|             sysfile = sysfile.rstrip('!') |  | ||||||
|             self._written_sysfiles.append((sysfile, value)) |  | ||||||
|             self.target.write_value(sysfile, value, verify=verify) |  | ||||||
| @@ -1,108 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import sys |  | ||||||
| import argparse |  | ||||||
| import logging |  | ||||||
| import os |  | ||||||
| import subprocess |  | ||||||
| import warnings |  | ||||||
|  |  | ||||||
| from wlauto.core import pluginloader |  | ||||||
| from wlauto.core.command import init_argument_parser |  | ||||||
| from wlauto.core.configuration import settings |  | ||||||
| from wlauto.core.configuration.manager import ConfigManager |  | ||||||
| from wlauto.core.host import init_user_directory |  | ||||||
| from wlauto.exceptions import WAError, DevlibError, ConfigError |  | ||||||
| from wlauto.utils.doc import format_body |  | ||||||
| from wlauto.utils.log import init_logging |  | ||||||
| from wlauto.utils.misc import get_traceback |  | ||||||
|  |  | ||||||
| warnings.filterwarnings(action='ignore', category=UserWarning, module='zope') |  | ||||||
|  |  | ||||||
|  |  | ||||||
| logger = logging.getLogger('command_line') |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def load_commands(subparsers): |  | ||||||
|     commands = {} |  | ||||||
|     for command in pluginloader.list_commands(): |  | ||||||
|         commands[command.name] = pluginloader.get_command(command.name,  |  | ||||||
|                                                           subparsers=subparsers) |  | ||||||
|     return commands |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def main(): |  | ||||||
|     config = ConfigManager() |  | ||||||
|  |  | ||||||
|     if not os.path.exists(settings.user_directory): |  | ||||||
|         init_user_directory() |  | ||||||
|  |  | ||||||
|     try: |  | ||||||
|  |  | ||||||
|         description = ("Execute automated workloads on a remote device and process " |  | ||||||
|                        "the resulting output.\n\nUse \"wa <subcommand> -h\" to see " |  | ||||||
|                        "help for individual subcommands.") |  | ||||||
|         parser = argparse.ArgumentParser(description=format_body(description, 80), |  | ||||||
|                                          prog='wa', |  | ||||||
|                                          formatter_class=argparse.RawDescriptionHelpFormatter, |  | ||||||
|                                          ) |  | ||||||
|         init_argument_parser(parser) |  | ||||||
|         # each command will add its own subparser |  | ||||||
|         commands = load_commands(parser.add_subparsers(dest='command'))   |  | ||||||
|  |  | ||||||
|         args = parser.parse_args() |  | ||||||
|  |  | ||||||
|         settings.set("verbosity", args.verbose) |  | ||||||
|  |  | ||||||
|         config.load_config_file(settings.user_config_file) |  | ||||||
|         for config_file in args.config: |  | ||||||
|             if not os.path.exists(config_file): |  | ||||||
|                 raise ConfigError("Config file {} not found".format(config_file)) |  | ||||||
|             config.load_config_file(config_file) |  | ||||||
|  |  | ||||||
|         init_logging(settings.verbosity) |  | ||||||
|  |  | ||||||
|         command = commands[args.command] |  | ||||||
|         sys.exit(command.execute(config, args)) |  | ||||||
|  |  | ||||||
|     except KeyboardInterrupt: |  | ||||||
|         logging.info('Got CTRL-C. Aborting.') |  | ||||||
|         sys.exit(3) |  | ||||||
|     except (WAError, DevlibError) as e: |  | ||||||
|         logging.critical(e) |  | ||||||
|         sys.exit(1) |  | ||||||
|     except subprocess.CalledProcessError as e: |  | ||||||
|         tb = get_traceback() |  | ||||||
|         logging.critical(tb) |  | ||||||
|         command = e.cmd |  | ||||||
|         if e.args: |  | ||||||
|             command = '{} {}'.format(command, ' '.join(e.args)) |  | ||||||
|         message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n' |  | ||||||
|         logging.critical(message.format(command, e.returncode, e.output)) |  | ||||||
|         sys.exit(2) |  | ||||||
|     except SyntaxError as e: |  | ||||||
|         tb = get_traceback() |  | ||||||
|         logging.critical(tb) |  | ||||||
|         message = 'Syntax Error in {}, line {}, offset {}:' |  | ||||||
|         logging.critical(message.format(e.filename, e.lineno, e.offset)) |  | ||||||
|         logging.critical('\t{}'.format(e.msg)) |  | ||||||
|         sys.exit(2) |  | ||||||
|     except Exception as e:  # pylint: disable=broad-except |  | ||||||
|         tb = get_traceback() |  | ||||||
|         logging.critical(tb) |  | ||||||
|         logging.critical('{}({})'.format(e.__class__.__name__, e)) |  | ||||||
|         sys.exit(2) |  | ||||||
| @@ -1,875 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
| # pylint: disable=no-member |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| This module contains the execution logic for Workload Automation. It defines the |  | ||||||
| following actors: |  | ||||||
|  |  | ||||||
|     WorkloadSpec: Identifies the workload to be run and defines parameters under |  | ||||||
|                   which it should be executed. |  | ||||||
|  |  | ||||||
|     Executor: Responsible for the overall execution process. It instantiates |  | ||||||
|               and/or intialises the other actors, does any necessary vaidation |  | ||||||
|               and kicks off the whole process. |  | ||||||
|  |  | ||||||
|     Execution Context: Provides information about the current state of run |  | ||||||
|                        execution to instrumentation. |  | ||||||
|  |  | ||||||
|     RunInfo: Information about the current run. |  | ||||||
|  |  | ||||||
|     Runner: This executes workload specs that are passed to it. It goes through |  | ||||||
|             stages of execution, emitting an appropriate signal at each step to |  | ||||||
|             allow instrumentation to do its stuff. |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| import logging |  | ||||||
| import os |  | ||||||
| import random |  | ||||||
| import subprocess |  | ||||||
| import uuid |  | ||||||
| from collections import Counter, defaultdict, OrderedDict |  | ||||||
| from contextlib import contextmanager |  | ||||||
| from copy import copy |  | ||||||
| from datetime import datetime |  | ||||||
| from itertools import izip_longest |  | ||||||
|  |  | ||||||
| import wlauto.core.signal as signal |  | ||||||
| from wlauto.core import instrumentation |  | ||||||
| from wlauto.core import pluginloader |  | ||||||
| from wlauto.core.configuration import settings |  | ||||||
| from wlauto.core.device_manager import TargetInfo |  | ||||||
| from wlauto.core.plugin import Artifact |  | ||||||
| from wlauto.core.resolver import ResourceResolver |  | ||||||
| from wlauto.core.result import ResultManager, IterationResult, RunResult |  | ||||||
| from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError, |  | ||||||
|                                DeviceError, DeviceNotRespondingError) |  | ||||||
| from wlauto.utils.misc import (ensure_directory_exists as _d,  |  | ||||||
|                                get_traceback, format_duration) |  | ||||||
| from wlauto.utils.serializer import json |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # The maximum number of reboot attempts for an iteration. |  | ||||||
| MAX_REBOOT_ATTEMPTS = 3 |  | ||||||
|  |  | ||||||
| # If something went wrong during device initialization, wait this |  | ||||||
| # long (in seconds) before retrying. This is necessary, as retrying |  | ||||||
| # immediately may not give the device enough time to recover to be able |  | ||||||
| # to reboot. |  | ||||||
| REBOOT_DELAY = 3 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ExecutionContext(object): |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     def __init__(self, cm, tm, output): |  | ||||||
|         self.logger = logging.getLogger('ExecContext') |  | ||||||
|         self.cm = cm |  | ||||||
|         self.tm = tm |  | ||||||
|         self.output = output |  | ||||||
|         self.logger.debug('Loading resource discoverers') |  | ||||||
|         self.resolver = ResourceResolver(cm) |  | ||||||
|         self.resolver.load() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class OldExecutionContext(object): |  | ||||||
|     """ |  | ||||||
|     Provides a context for instrumentation. Keeps track of things like |  | ||||||
|     current workload and iteration. |  | ||||||
|  |  | ||||||
|     This class also provides two status members that can be used by workloads |  | ||||||
|     and instrumentation to keep track of arbitrary state. ``result`` |  | ||||||
|     is reset on each new iteration of a workload; run_status is maintained |  | ||||||
|     throughout a Workload Automation run. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     # These are the artifacts generated by the core framework. |  | ||||||
|     default_run_artifacts = [ |  | ||||||
|         Artifact('runlog', 'run.log', 'log', mandatory=True, |  | ||||||
|                  description='The log for the entire run.'), |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def current_iteration(self): |  | ||||||
|         if self.current_job: |  | ||||||
|             spec_id = self.current_job.spec.id |  | ||||||
|             return self.job_iteration_counts[spec_id] |  | ||||||
|         else: |  | ||||||
|             return None |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def job_status(self): |  | ||||||
|         if not self.current_job: |  | ||||||
|             return None |  | ||||||
|         return self.current_job.result.status |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def workload(self): |  | ||||||
|         return getattr(self.spec, 'workload', None) |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def spec(self): |  | ||||||
|         return getattr(self.current_job, 'spec', None) |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def result(self): |  | ||||||
|         return getattr(self.current_job, 'result', self.run_result) |  | ||||||
|  |  | ||||||
|     def __init__(self, device_manager, config): |  | ||||||
|         self.device_manager = device_manager |  | ||||||
|         self.device = self.device_manager.target |  | ||||||
|         self.config = config |  | ||||||
|         self.reboot_policy = config.reboot_policy |  | ||||||
|         self.output_directory = None |  | ||||||
|         self.current_job = None |  | ||||||
|         self.resolver = None |  | ||||||
|         self.last_error = None |  | ||||||
|         self.run_info = None |  | ||||||
|         self.run_result = None |  | ||||||
|         self.run_output_directory = self.config.output_directory |  | ||||||
|         self.host_working_directory = self.config.meta_directory |  | ||||||
|         self.iteration_artifacts = None |  | ||||||
|         self.run_artifacts = copy(self.default_run_artifacts) |  | ||||||
|         self.job_iteration_counts = defaultdict(int) |  | ||||||
|         self.aborted = False |  | ||||||
|         self.runner = None |  | ||||||
|         if config.agenda.filepath: |  | ||||||
|             self.run_artifacts.append(Artifact('agenda', |  | ||||||
|                                                os.path.join(self.host_working_directory, |  | ||||||
|                                                             os.path.basename(config.agenda.filepath)), |  | ||||||
|                                                'meta', |  | ||||||
|                                                mandatory=True, |  | ||||||
|                                                description='Agenda for this run.')) |  | ||||||
|         for i, filepath in enumerate(settings.config_paths, 1): |  | ||||||
|             name = 'config_{}'.format(i) |  | ||||||
|             path = os.path.join(self.host_working_directory, |  | ||||||
|                                 name + os.path.splitext(filepath)[1]) |  | ||||||
|             self.run_artifacts.append(Artifact(name, |  | ||||||
|                                                path, |  | ||||||
|                                                kind='meta', |  | ||||||
|                                                mandatory=True, |  | ||||||
|                                                description='Config file used for the run.')) |  | ||||||
|  |  | ||||||
|     def initialize(self): |  | ||||||
|         if not os.path.isdir(self.run_output_directory): |  | ||||||
|             os.makedirs(self.run_output_directory) |  | ||||||
|         self.output_directory = self.run_output_directory |  | ||||||
|         self.resolver = ResourceResolver(self.config) |  | ||||||
|         self.run_info = RunInfo(self.config) |  | ||||||
|         self.run_result = RunResult(self.run_info, self.run_output_directory) |  | ||||||
|  |  | ||||||
|     def next_job(self, job): |  | ||||||
|         """Invoked by the runner when starting a new iteration of workload execution.""" |  | ||||||
|         self.current_job = job |  | ||||||
|         self.job_iteration_counts[self.spec.id] += 1 |  | ||||||
|         if not self.aborted: |  | ||||||
|             outdir_name = '_'.join(map(str, [self.spec.label, self.spec.id, self.current_iteration])) |  | ||||||
|             self.output_directory = _d(os.path.join(self.run_output_directory, outdir_name)) |  | ||||||
|             self.iteration_artifacts = [wa for wa in self.workload.artifacts] |  | ||||||
|         self.current_job.result.iteration = self.current_iteration |  | ||||||
|         self.current_job.result.output_directory = self.output_directory |  | ||||||
|  |  | ||||||
|     def end_job(self): |  | ||||||
|         if self.current_job.result.status == IterationResult.ABORTED: |  | ||||||
|             self.aborted = True |  | ||||||
|         self.current_job = None |  | ||||||
|         self.output_directory = self.run_output_directory |  | ||||||
|  |  | ||||||
|     def add_metric(self, *args, **kwargs): |  | ||||||
|         self.result.add_metric(*args, **kwargs) |  | ||||||
|  |  | ||||||
|     def add_artifact(self, name, path, kind, *args, **kwargs): |  | ||||||
|         if self.current_job is None: |  | ||||||
|             self.add_run_artifact(name, path, kind, *args, **kwargs) |  | ||||||
|         else: |  | ||||||
|             self.add_iteration_artifact(name, path, kind, *args, **kwargs) |  | ||||||
|  |  | ||||||
|     def add_run_artifact(self, name, path, kind, *args, **kwargs): |  | ||||||
|         path = _check_artifact_path(path, self.run_output_directory) |  | ||||||
|         self.run_artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs)) |  | ||||||
|  |  | ||||||
|     def add_iteration_artifact(self, name, path, kind, *args, **kwargs): |  | ||||||
|         path = _check_artifact_path(path, self.output_directory) |  | ||||||
|         self.iteration_artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs)) |  | ||||||
|  |  | ||||||
|     def get_artifact(self, name): |  | ||||||
|         if self.iteration_artifacts: |  | ||||||
|             for art in self.iteration_artifacts: |  | ||||||
|                 if art.name == name: |  | ||||||
|                     return art |  | ||||||
|         for art in self.run_artifacts: |  | ||||||
|             if art.name == name: |  | ||||||
|                 return art |  | ||||||
|         return None |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _check_artifact_path(path, rootpath): |  | ||||||
|     if path.startswith(rootpath): |  | ||||||
|         return os.path.abspath(path) |  | ||||||
|     rootpath = os.path.abspath(rootpath) |  | ||||||
|     full_path = os.path.join(rootpath, path) |  | ||||||
|     if not os.path.isfile(full_path): |  | ||||||
|         raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path)) |  | ||||||
|     return full_path |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class FakeTargetManager(object): |  | ||||||
|     # TODO: this is a FAKE |  | ||||||
|  |  | ||||||
|     def __init__(self, name, config): |  | ||||||
|         self.device_name = name |  | ||||||
|         self.device_config = config |  | ||||||
|  |  | ||||||
|         from devlib import LocalLinuxTarget |  | ||||||
|         self.target = LocalLinuxTarget({'unrooted': True}) |  | ||||||
|          |  | ||||||
|     def get_target_info(self): |  | ||||||
|         return TargetInfo(self.target) |  | ||||||
|  |  | ||||||
|     def validate_runtime_parameters(self, params): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def merge_runtime_parameters(self, params): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def init_target_manager(config): |  | ||||||
|     return FakeTargetManager(config.device, config.device_config) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Executor(object): |  | ||||||
|     """ |  | ||||||
|     The ``Executor``'s job is to set up the execution context and pass to a |  | ||||||
|     ``Runner`` along with a loaded run specification. Once the ``Runner`` has |  | ||||||
|     done its thing, the ``Executor`` performs some final reporint before |  | ||||||
|     returning. |  | ||||||
|  |  | ||||||
|     The initial context set up involves combining configuration from various |  | ||||||
|     sources, loading of requided workloads, loading and installation of |  | ||||||
|     instruments and result processors, etc. Static validation of the combined |  | ||||||
|     configuration is also performed. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     # pylint: disable=R0915 |  | ||||||
|  |  | ||||||
|     def __init__(self): |  | ||||||
|         self.logger = logging.getLogger('Executor') |  | ||||||
|         self.error_logged = False |  | ||||||
|         self.warning_logged = False |  | ||||||
|         pluginloader = None |  | ||||||
|         self.device_manager = None |  | ||||||
|         self.device = None |  | ||||||
|         self.context = None |  | ||||||
|  |  | ||||||
|     def execute(self, config_manager, output): |  | ||||||
|         """ |  | ||||||
|         Execute the run specified by an agenda. Optionally, selectors may be |  | ||||||
|         used to only selecute a subset of the specified agenda. |  | ||||||
|  |  | ||||||
|         Params:: |  | ||||||
|  |  | ||||||
|             :state: a ``ConfigManager`` containing processed configuraiton |  | ||||||
|             :output: an initialized ``RunOutput`` that will be used to |  | ||||||
|                      store the results. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED) |  | ||||||
|         signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED) |  | ||||||
|  |  | ||||||
|         self.logger.info('Initializing run') |  | ||||||
|         self.logger.debug('Finalizing run configuration.') |  | ||||||
|         config = config_manager.finalize() |  | ||||||
|         output.write_config(config) |  | ||||||
|  |  | ||||||
|         self.logger.info('Connecting to target') |  | ||||||
|         target_manager = init_target_manager(config.run_config) |  | ||||||
|         output.write_target_info(target_manager.get_target_info()) |  | ||||||
|  |  | ||||||
|         self.logger.info('Initializing execution conetext') |  | ||||||
|         context = ExecutionContext(config_manager, target_manager, output) |  | ||||||
|  |  | ||||||
|         self.logger.info('Generating jobs') |  | ||||||
|         config_manager.generate_jobs(context) |  | ||||||
|         output.write_job_specs(config_manager.job_specs) |  | ||||||
|  |  | ||||||
|         self.logger.info('Installing instrumentation') |  | ||||||
|         for instrument in config_manager.get_instruments(target_manager.target): |  | ||||||
|             instrumentation.install(instrument) |  | ||||||
|         instrumentation.validate() |  | ||||||
|  |  | ||||||
|     def old_exec(self, agenda, selectors={}): |  | ||||||
|         self.config.set_agenda(agenda, selectors) |  | ||||||
|         self.config.finalize() |  | ||||||
|         config_outfile = os.path.join(self.config.meta_directory, 'run_config.json') |  | ||||||
|         with open(config_outfile, 'w') as wfh: |  | ||||||
|             json.dump(self.config, wfh) |  | ||||||
|  |  | ||||||
|         self.logger.debug('Initialising device configuration.') |  | ||||||
|         if not self.config.device: |  | ||||||
|             raise ConfigError('Make sure a device is specified in the config.') |  | ||||||
|         self.device_manager = pluginloader.get_manager(self.config.device,  |  | ||||||
|                                                        **self.config.device_config) |  | ||||||
|         self.device_manager.validate() |  | ||||||
|         self.device = self.device_manager.target |  | ||||||
|  |  | ||||||
|         self.context = ExecutionContext(self.device_manager, self.config) |  | ||||||
|  |  | ||||||
|         self.logger.debug('Loading resource discoverers.') |  | ||||||
|         self.context.initialize() |  | ||||||
|         self.context.resolver.load() |  | ||||||
|         self.context.add_artifact('run_config', config_outfile, 'meta') |  | ||||||
|  |  | ||||||
|         self.logger.debug('Installing instrumentation') |  | ||||||
|         for name, params in self.config.instrumentation.iteritems(): |  | ||||||
|             instrument = pluginloader.get_instrument(name, self.device, **params) |  | ||||||
|             instrumentation.install(instrument) |  | ||||||
|         instrumentation.validate() |  | ||||||
|  |  | ||||||
|         self.logger.debug('Installing result processors') |  | ||||||
|         result_manager = ResultManager() |  | ||||||
|         for name, params in self.config.result_processors.iteritems(): |  | ||||||
|             processor = pluginloader.get_result_processor(name, **params) |  | ||||||
|             result_manager.install(processor) |  | ||||||
|         result_manager.validate() |  | ||||||
|  |  | ||||||
|         self.logger.debug('Loading workload specs') |  | ||||||
|         for workload_spec in self.config.workload_specs: |  | ||||||
|             workload_spec.load(self.device, pluginloader) |  | ||||||
|             workload_spec.workload.init_resources(self.context) |  | ||||||
|             workload_spec.workload.validate() |  | ||||||
|  |  | ||||||
|         if self.config.flashing_config: |  | ||||||
|             if not self.device.flasher: |  | ||||||
|                 msg = 'flashing_config specified for {} device that does not support flashing.' |  | ||||||
|                 raise ConfigError(msg.format(self.device.name)) |  | ||||||
|             self.logger.debug('Flashing the device') |  | ||||||
|             self.device.flasher.flash(self.device) |  | ||||||
|  |  | ||||||
|         self.logger.info('Running workloads') |  | ||||||
|         runner = self._get_runner(result_manager) |  | ||||||
|         runner.init_queue(self.config.workload_specs) |  | ||||||
|         runner.run() |  | ||||||
|         self.execute_postamble() |  | ||||||
|  |  | ||||||
|     def execute_postamble(self): |  | ||||||
|         """ |  | ||||||
|         This happens after the run has completed. The overall results of the run are |  | ||||||
|         summarised to the user. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         result = self.context.run_result |  | ||||||
|         counter = Counter() |  | ||||||
|         for ir in result.iteration_results: |  | ||||||
|             counter[ir.status] += 1 |  | ||||||
|         self.logger.info('Done.') |  | ||||||
|         self.logger.info('Run duration: {}'.format(format_duration(self.context.run_info.duration))) |  | ||||||
|         status_summary = 'Ran a total of {} iterations: '.format(sum(self.context.job_iteration_counts.values())) |  | ||||||
|         parts = [] |  | ||||||
|         for status in IterationResult.values: |  | ||||||
|             if status in counter: |  | ||||||
|                 parts.append('{} {}'.format(counter[status], status)) |  | ||||||
|         self.logger.info(status_summary + ', '.join(parts)) |  | ||||||
|         self.logger.info('Results can be found in {}'.format(self.config.output_directory)) |  | ||||||
|  |  | ||||||
|         if self.error_logged: |  | ||||||
|             self.logger.warn('There were errors during execution.') |  | ||||||
|             self.logger.warn('Please see {}'.format(self.config.log_file)) |  | ||||||
|         elif self.warning_logged: |  | ||||||
|             self.logger.warn('There were warnings during execution.') |  | ||||||
|             self.logger.warn('Please see {}'.format(self.config.log_file)) |  | ||||||
|  |  | ||||||
|     def _get_runner(self, result_manager): |  | ||||||
|         if not self.config.execution_order or self.config.execution_order == 'by_iteration': |  | ||||||
|             if self.config.reboot_policy == 'each_spec': |  | ||||||
|                 self.logger.info('each_spec reboot policy with the default by_iteration execution order is ' |  | ||||||
|                                  'equivalent to each_iteration policy.') |  | ||||||
|             runnercls = ByIterationRunner |  | ||||||
|         elif self.config.execution_order in ['classic', 'by_spec']: |  | ||||||
|             runnercls = BySpecRunner |  | ||||||
|         elif self.config.execution_order == 'by_section': |  | ||||||
|             runnercls = BySectionRunner |  | ||||||
|         elif self.config.execution_order == 'random': |  | ||||||
|             runnercls = RandomRunner |  | ||||||
|         else: |  | ||||||
|             raise ConfigError('Unexpected execution order: {}'.format(self.config.execution_order)) |  | ||||||
|         return runnercls(self.device_manager, self.context, result_manager) |  | ||||||
|  |  | ||||||
|     def _error_signalled_callback(self): |  | ||||||
|         self.error_logged = True |  | ||||||
|         signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED) |  | ||||||
|  |  | ||||||
|     def _warning_signalled_callback(self): |  | ||||||
|         self.warning_logged = True |  | ||||||
|         signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Runner(object): |  | ||||||
|     """ |  | ||||||
|      |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class RunnerJob(object): |  | ||||||
|     """ |  | ||||||
|     Represents a single execution of a ``RunnerJobDescription``. There will be one created for each iteration |  | ||||||
|     specified by ``RunnerJobDescription.number_of_iterations``. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, spec, retry=0): |  | ||||||
|         self.spec = spec |  | ||||||
|         self.retry = retry |  | ||||||
|         self.iteration = None |  | ||||||
|         self.result = IterationResult(self.spec) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class OldRunner(object): |  | ||||||
|     """ |  | ||||||
|     This class is responsible for actually performing a workload automation |  | ||||||
|     run. The main responsibility of this class is to emit appropriate signals |  | ||||||
|     at the various stages of the run to allow things like traces an other |  | ||||||
|     instrumentation to hook into the process. |  | ||||||
|  |  | ||||||
|     This is an abstract base class that defines each step of the run, but not |  | ||||||
|     the order in which those steps are executed, which is left to the concrete |  | ||||||
|     derived classes. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     class _RunnerError(Exception): |  | ||||||
|         """Internal runner error.""" |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def config(self): |  | ||||||
|         return self.context.config |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def current_job(self): |  | ||||||
|         if self.job_queue: |  | ||||||
|             return self.job_queue[0] |  | ||||||
|         return None |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def previous_job(self): |  | ||||||
|         if self.completed_jobs: |  | ||||||
|             return self.completed_jobs[-1] |  | ||||||
|         return None |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def next_job(self): |  | ||||||
|         if self.job_queue: |  | ||||||
|             if len(self.job_queue) > 1: |  | ||||||
|                 return self.job_queue[1] |  | ||||||
|         return None |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def spec_changed(self): |  | ||||||
|         if self.previous_job is None and self.current_job is not None:  # Start of run |  | ||||||
|             return True |  | ||||||
|         if self.previous_job is not None and self.current_job is None:  # End of run |  | ||||||
|             return True |  | ||||||
|         return self.current_job.spec.id != self.previous_job.spec.id |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def spec_will_change(self): |  | ||||||
|         if self.current_job is None and self.next_job is not None:  # Start of run |  | ||||||
|             return True |  | ||||||
|         if self.current_job is not None and self.next_job is None:  # End of run |  | ||||||
|             return True |  | ||||||
|         return self.current_job.spec.id != self.next_job.spec.id |  | ||||||
|  |  | ||||||
|     def __init__(self, device_manager, context, result_manager): |  | ||||||
|         self.device_manager = device_manager |  | ||||||
|         self.device = device_manager.target |  | ||||||
|         self.context = context |  | ||||||
|         self.result_manager = result_manager |  | ||||||
|         self.logger = logging.getLogger('Runner') |  | ||||||
|         self.job_queue = [] |  | ||||||
|         self.completed_jobs = [] |  | ||||||
|         self._initial_reset = True |  | ||||||
|  |  | ||||||
|     def init_queue(self, specs): |  | ||||||
|         raise NotImplementedError() |  | ||||||
|  |  | ||||||
|     def run(self):  # pylint: disable=too-many-branches |  | ||||||
|         self._send(signal.RUN_START) |  | ||||||
|         self._initialize_run() |  | ||||||
|  |  | ||||||
|         try: |  | ||||||
|             while self.job_queue: |  | ||||||
|                 try: |  | ||||||
|                     self._init_job() |  | ||||||
|                     self._run_job() |  | ||||||
|                 except KeyboardInterrupt: |  | ||||||
|                     self.current_job.result.status = IterationResult.ABORTED |  | ||||||
|                     raise |  | ||||||
|                 except Exception, e:  # pylint: disable=broad-except |  | ||||||
|                     self.current_job.result.status = IterationResult.FAILED |  | ||||||
|                     self.current_job.result.add_event(e.message) |  | ||||||
|                     if isinstance(e, DeviceNotRespondingError): |  | ||||||
|                         self.logger.info('Device appears to be unresponsive.') |  | ||||||
|                         if self.context.reboot_policy.can_reboot and self.device.can('reset_power'): |  | ||||||
|                             self.logger.info('Attempting to hard-reset the device...') |  | ||||||
|                             try: |  | ||||||
|                                 self.device.boot(hard=True) |  | ||||||
|                                 self.device.connect() |  | ||||||
|                             except DeviceError:  # hard_boot not implemented for the device. |  | ||||||
|                                 raise e |  | ||||||
|                         else: |  | ||||||
|                             raise e |  | ||||||
|                     else:  # not a DeviceNotRespondingError |  | ||||||
|                         self.logger.error(e) |  | ||||||
|                 finally: |  | ||||||
|                     self._finalize_job() |  | ||||||
|         except KeyboardInterrupt: |  | ||||||
|             self.logger.info('Got CTRL-C. Finalizing run... (CTRL-C again to abort).') |  | ||||||
|             # Skip through the remaining jobs. |  | ||||||
|             while self.job_queue: |  | ||||||
|                 self.context.next_job(self.current_job) |  | ||||||
|                 self.current_job.result.status = IterationResult.ABORTED |  | ||||||
|                 self._finalize_job() |  | ||||||
|         except DeviceNotRespondingError: |  | ||||||
|             self.logger.info('Device unresponsive and recovery not possible. Skipping the rest of the run.') |  | ||||||
|             self.context.aborted = True |  | ||||||
|             while self.job_queue: |  | ||||||
|                 self.context.next_job(self.current_job) |  | ||||||
|                 self.current_job.result.status = IterationResult.SKIPPED |  | ||||||
|                 self._finalize_job() |  | ||||||
|  |  | ||||||
|         instrumentation.enable_all() |  | ||||||
|         self._finalize_run() |  | ||||||
|         self._process_results() |  | ||||||
|  |  | ||||||
|         self.result_manager.finalize(self.context) |  | ||||||
|         self._send(signal.RUN_END) |  | ||||||
|  |  | ||||||
|     def _initialize_run(self): |  | ||||||
|         self.context.runner = self |  | ||||||
|         self.context.run_info.start_time = datetime.utcnow() |  | ||||||
|         self._connect_to_device() |  | ||||||
|         self.logger.info('Initializing device') |  | ||||||
|         self.device_manager.initialize(self.context) |  | ||||||
|  |  | ||||||
|         self.logger.info('Initializing workloads') |  | ||||||
|         for workload_spec in self.context.config.workload_specs: |  | ||||||
|             workload_spec.workload.initialize(self.context) |  | ||||||
|  |  | ||||||
|         self.context.run_info.device_properties = self.device_manager.info |  | ||||||
|         self.result_manager.initialize(self.context) |  | ||||||
|         self._send(signal.RUN_INIT) |  | ||||||
|  |  | ||||||
|         if instrumentation.check_failures(): |  | ||||||
|             raise InstrumentError('Detected failure(s) during instrumentation initialization.') |  | ||||||
|  |  | ||||||
|     def _connect_to_device(self): |  | ||||||
|         if self.context.reboot_policy.perform_initial_boot: |  | ||||||
|             try: |  | ||||||
|                 self.device_manager.connect() |  | ||||||
|             except DeviceError:  # device may be offline |  | ||||||
|                 if self.device.can('reset_power'): |  | ||||||
|                     with self._signal_wrap('INITIAL_BOOT'): |  | ||||||
|                         self.device.boot(hard=True) |  | ||||||
|                 else: |  | ||||||
|                     raise DeviceError('Cannot connect to device for initial reboot; ' |  | ||||||
|                                       'and device does not support hard reset.') |  | ||||||
|             else:  # successfully connected |  | ||||||
|                 self.logger.info('\tBooting device') |  | ||||||
|                 with self._signal_wrap('INITIAL_BOOT'): |  | ||||||
|                     self._reboot_device() |  | ||||||
|         else: |  | ||||||
|             self.logger.info('Connecting to device') |  | ||||||
|             self.device_manager.connect() |  | ||||||
|  |  | ||||||
|     def _init_job(self): |  | ||||||
|         self.current_job.result.status = IterationResult.RUNNING |  | ||||||
|         self.context.next_job(self.current_job) |  | ||||||
|  |  | ||||||
|     def _run_job(self):   # pylint: disable=too-many-branches |  | ||||||
|         spec = self.current_job.spec |  | ||||||
|         if not spec.enabled: |  | ||||||
|             self.logger.info('Skipping workload %s (iteration %s)', spec, self.context.current_iteration) |  | ||||||
|             self.current_job.result.status = IterationResult.SKIPPED |  | ||||||
|             return |  | ||||||
|  |  | ||||||
|         self.logger.info('Running workload %s (iteration %s)', spec, self.context.current_iteration) |  | ||||||
|         if spec.flash: |  | ||||||
|             if not self.context.reboot_policy.can_reboot: |  | ||||||
|                 raise ConfigError('Cannot flash as reboot_policy does not permit rebooting.') |  | ||||||
|             if not self.device.can('flash'): |  | ||||||
|                 raise DeviceError('Device does not support flashing.') |  | ||||||
|             self._flash_device(spec.flash) |  | ||||||
|         elif not self.completed_jobs: |  | ||||||
|             # Never reboot on the very fist job of a run, as we would have done |  | ||||||
|             # the initial reboot if a reboot was needed. |  | ||||||
|             pass |  | ||||||
|         elif self.context.reboot_policy.reboot_on_each_spec and self.spec_changed: |  | ||||||
|             self.logger.debug('Rebooting on spec change.') |  | ||||||
|             self._reboot_device() |  | ||||||
|         elif self.context.reboot_policy.reboot_on_each_iteration: |  | ||||||
|             self.logger.debug('Rebooting on iteration.') |  | ||||||
|             self._reboot_device() |  | ||||||
|  |  | ||||||
|         instrumentation.disable_all() |  | ||||||
|         instrumentation.enable(spec.instrumentation) |  | ||||||
|         self.device_manager.start() |  | ||||||
|  |  | ||||||
|         if self.spec_changed: |  | ||||||
|             self._send(signal.WORKLOAD_SPEC_START) |  | ||||||
|         self._send(signal.ITERATION_START) |  | ||||||
|  |  | ||||||
|         try: |  | ||||||
|             setup_ok = False |  | ||||||
|             with self._handle_errors('Setting up device parameters'): |  | ||||||
|                 self.device_manager.set_runtime_parameters(spec.runtime_parameters) |  | ||||||
|                 setup_ok = True |  | ||||||
|  |  | ||||||
|             if setup_ok: |  | ||||||
|                 with self._handle_errors('running {}'.format(spec.workload.name)): |  | ||||||
|                     self.current_job.result.status = IterationResult.RUNNING |  | ||||||
|                     self._run_workload_iteration(spec.workload) |  | ||||||
|             else: |  | ||||||
|                 self.logger.info('\tSkipping the rest of the iterations for this spec.') |  | ||||||
|                 spec.enabled = False |  | ||||||
|         except KeyboardInterrupt: |  | ||||||
|             self._send(signal.ITERATION_END) |  | ||||||
|             self._send(signal.WORKLOAD_SPEC_END) |  | ||||||
|             raise |  | ||||||
|         else: |  | ||||||
|             self._send(signal.ITERATION_END) |  | ||||||
|             if self.spec_will_change or not spec.enabled: |  | ||||||
|                 self._send(signal.WORKLOAD_SPEC_END) |  | ||||||
|         finally: |  | ||||||
|             self.device_manager.stop() |  | ||||||
|  |  | ||||||
|     def _finalize_job(self): |  | ||||||
|         self.context.run_result.iteration_results.append(self.current_job.result) |  | ||||||
|         job = self.job_queue.pop(0) |  | ||||||
|         job.iteration = self.context.current_iteration |  | ||||||
|         if job.result.status in self.config.retry_on_status: |  | ||||||
|             if job.retry >= self.config.max_retries: |  | ||||||
|                 self.logger.error('Exceeded maxium number of retries. Abandoning job.') |  | ||||||
|             else: |  | ||||||
|                 self.logger.info('Job status was {}. Retrying...'.format(job.result.status)) |  | ||||||
|                 retry_job = RunnerJob(job.spec, job.retry + 1) |  | ||||||
|                 self.job_queue.insert(0, retry_job) |  | ||||||
|         self.completed_jobs.append(job) |  | ||||||
|         self.context.end_job() |  | ||||||
|  |  | ||||||
|     def _finalize_run(self): |  | ||||||
|         self.logger.info('Finalizing workloads') |  | ||||||
|         for workload_spec in self.context.config.workload_specs: |  | ||||||
|             workload_spec.workload.finalize(self.context) |  | ||||||
|  |  | ||||||
|         self.logger.info('Finalizing.') |  | ||||||
|         self._send(signal.RUN_FIN) |  | ||||||
|  |  | ||||||
|         with self._handle_errors('Disconnecting from the device'): |  | ||||||
|             self.device.disconnect() |  | ||||||
|  |  | ||||||
|         info = self.context.run_info |  | ||||||
|         info.end_time = datetime.utcnow() |  | ||||||
|         info.duration = info.end_time - info.start_time |  | ||||||
|  |  | ||||||
|     def _process_results(self): |  | ||||||
|         self.logger.info('Processing overall results') |  | ||||||
|         with self._signal_wrap('OVERALL_RESULTS_PROCESSING'): |  | ||||||
|             if instrumentation.check_failures(): |  | ||||||
|                 self.context.run_result.non_iteration_errors = True |  | ||||||
|             self.result_manager.process_run_result(self.context.run_result, self.context) |  | ||||||
|  |  | ||||||
|     def _run_workload_iteration(self, workload): |  | ||||||
|         self.logger.info('\tSetting up') |  | ||||||
|         with self._signal_wrap('WORKLOAD_SETUP'): |  | ||||||
|             try: |  | ||||||
|                 workload.setup(self.context) |  | ||||||
|             except: |  | ||||||
|                 self.logger.info('\tSkipping the rest of the iterations for this spec.') |  | ||||||
|                 self.current_job.spec.enabled = False |  | ||||||
|                 raise |  | ||||||
|         try: |  | ||||||
|  |  | ||||||
|             self.logger.info('\tExecuting') |  | ||||||
|             with self._handle_errors('Running workload'): |  | ||||||
|                 with self._signal_wrap('WORKLOAD_EXECUTION'): |  | ||||||
|                     workload.run(self.context) |  | ||||||
|  |  | ||||||
|             self.logger.info('\tProcessing result') |  | ||||||
|             self._send(signal.BEFORE_WORKLOAD_RESULT_UPDATE) |  | ||||||
|             try: |  | ||||||
|                 if self.current_job.result.status != IterationResult.FAILED: |  | ||||||
|                     with self._handle_errors('Processing workload result', |  | ||||||
|                                              on_error_status=IterationResult.PARTIAL): |  | ||||||
|                         workload.update_result(self.context) |  | ||||||
|                         self._send(signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE) |  | ||||||
|  |  | ||||||
|                 if self.current_job.result.status == IterationResult.RUNNING: |  | ||||||
|                     self.current_job.result.status = IterationResult.OK |  | ||||||
|             finally: |  | ||||||
|                 self._send(signal.AFTER_WORKLOAD_RESULT_UPDATE) |  | ||||||
|  |  | ||||||
|         finally: |  | ||||||
|             self.logger.info('\tTearing down') |  | ||||||
|             with self._handle_errors('Tearing down workload', |  | ||||||
|                                      on_error_status=IterationResult.NONCRITICAL): |  | ||||||
|                 with self._signal_wrap('WORKLOAD_TEARDOWN'): |  | ||||||
|                     workload.teardown(self.context) |  | ||||||
|             self.result_manager.add_result(self.current_job.result, self.context) |  | ||||||
|  |  | ||||||
|     def _flash_device(self, flashing_params): |  | ||||||
|         with self._signal_wrap('FLASHING'): |  | ||||||
|             self.device.flash(**flashing_params) |  | ||||||
|             self.device.connect() |  | ||||||
|  |  | ||||||
|     def _reboot_device(self): |  | ||||||
|         with self._signal_wrap('BOOT'): |  | ||||||
|             for reboot_attempts in xrange(MAX_REBOOT_ATTEMPTS): |  | ||||||
|                 if reboot_attempts: |  | ||||||
|                     self.logger.info('\tRetrying...') |  | ||||||
|                 with self._handle_errors('Rebooting device'): |  | ||||||
|                     self.device.boot(**self.current_job.spec.boot_parameters) |  | ||||||
|                     break |  | ||||||
|             else: |  | ||||||
|                 raise DeviceError('Could not reboot device; max reboot attempts exceeded.') |  | ||||||
|             self.device.connect() |  | ||||||
|  |  | ||||||
|     def _send(self, s): |  | ||||||
|         signal.send(s, self, self.context) |  | ||||||
|  |  | ||||||
|     def _take_screenshot(self, filename): |  | ||||||
|         if self.context.output_directory: |  | ||||||
|             filepath = os.path.join(self.context.output_directory, filename) |  | ||||||
|         else: |  | ||||||
|             filepath = os.path.join(settings.output_directory, filename) |  | ||||||
|         self.device.capture_screen(filepath) |  | ||||||
|  |  | ||||||
|     @contextmanager |  | ||||||
|     def _handle_errors(self, action, on_error_status=IterationResult.FAILED): |  | ||||||
|         try: |  | ||||||
|             if action is not None: |  | ||||||
|                 self.logger.debug(action) |  | ||||||
|             yield |  | ||||||
|         except (KeyboardInterrupt, DeviceNotRespondingError): |  | ||||||
|             raise |  | ||||||
|         except (WAError, TimeoutError), we: |  | ||||||
|             self.device.check_responsive() |  | ||||||
|             if self.current_job: |  | ||||||
|                 self.current_job.result.status = on_error_status |  | ||||||
|                 self.current_job.result.add_event(str(we)) |  | ||||||
|             try: |  | ||||||
|                 self._take_screenshot('error.png') |  | ||||||
|             except Exception, e:  # pylint: disable=W0703 |  | ||||||
|                 # We're already in error state, so the fact that taking a |  | ||||||
|                 # screenshot failed is not surprising... |  | ||||||
|                 pass |  | ||||||
|             if action: |  | ||||||
|                 action = action[0].lower() + action[1:] |  | ||||||
|             self.logger.error('Error while {}:\n\t{}'.format(action, we)) |  | ||||||
|         except Exception, e:  # pylint: disable=W0703 |  | ||||||
|             error_text = '{}("{}")'.format(e.__class__.__name__, e) |  | ||||||
|             if self.current_job: |  | ||||||
|                 self.current_job.result.status = on_error_status |  | ||||||
|                 self.current_job.result.add_event(error_text) |  | ||||||
|             self.logger.error('Error while {}'.format(action)) |  | ||||||
|             self.logger.error(error_text) |  | ||||||
|             if isinstance(e, subprocess.CalledProcessError): |  | ||||||
|                 self.logger.error('Got:') |  | ||||||
|                 self.logger.error(e.output) |  | ||||||
|             tb = get_traceback() |  | ||||||
|             self.logger.error(tb) |  | ||||||
|  |  | ||||||
|     @contextmanager |  | ||||||
|     def _signal_wrap(self, signal_name): |  | ||||||
|         """Wraps the suite in before/after signals, ensuring |  | ||||||
|         that after signal is always sent.""" |  | ||||||
|         before_signal = getattr(signal, 'BEFORE_' + signal_name) |  | ||||||
|         success_signal = getattr(signal, 'SUCCESSFUL_' + signal_name) |  | ||||||
|         after_signal = getattr(signal, 'AFTER_' + signal_name) |  | ||||||
|         try: |  | ||||||
|             self._send(before_signal) |  | ||||||
|             yield |  | ||||||
|             self._send(success_signal) |  | ||||||
|         finally: |  | ||||||
|             self._send(after_signal) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class BySpecRunner(Runner): |  | ||||||
|     """ |  | ||||||
|     This is that "classic" implementation that executes all iterations of a workload |  | ||||||
|     spec before proceeding onto the next spec. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def init_queue(self, specs): |  | ||||||
|         jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]  # pylint: disable=unused-variable |  | ||||||
|         self.job_queue = [j for spec_jobs in jobs for j in spec_jobs] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class BySectionRunner(Runner): |  | ||||||
|     """ |  | ||||||
|     Runs the first iteration for all benchmarks first, before proceeding to the next iteration, |  | ||||||
|     i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2, C1, C2... |  | ||||||
|  |  | ||||||
|     If multiple sections where specified in the agenda, this will run all specs for the first section |  | ||||||
|     followed by all specs for the seciod section, etc. |  | ||||||
|  |  | ||||||
|     e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run |  | ||||||
|  |  | ||||||
|     X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2 |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def init_queue(self, specs): |  | ||||||
|         jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] |  | ||||||
|         self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ByIterationRunner(Runner): |  | ||||||
|     """ |  | ||||||
|     Runs the first iteration for all benchmarks first, before proceeding to the next iteration, |  | ||||||
|     i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2, C1, C2... |  | ||||||
|  |  | ||||||
|     If multiple sections where specified in the agenda, this will run all sections for the first global |  | ||||||
|     spec first, followed by all sections for the second spec, etc. |  | ||||||
|  |  | ||||||
|     e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run |  | ||||||
|  |  | ||||||
|     X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2 |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def init_queue(self, specs): |  | ||||||
|         sections = OrderedDict() |  | ||||||
|         for s in specs: |  | ||||||
|             if s.section_id not in sections: |  | ||||||
|                 sections[s.section_id] = [] |  | ||||||
|             sections[s.section_id].append(s) |  | ||||||
|         specs = [s for section_specs in izip_longest(*sections.values()) for s in section_specs if s] |  | ||||||
|         jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] |  | ||||||
|         self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class RandomRunner(Runner): |  | ||||||
|     """ |  | ||||||
|     This will run specs in a random order. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def init_queue(self, specs): |  | ||||||
|         jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]  # pylint: disable=unused-variable |  | ||||||
|         all_jobs = [j for spec_jobs in jobs for j in spec_jobs] |  | ||||||
|         random.shuffle(all_jobs) |  | ||||||
|         self.job_queue = all_jobs |  | ||||||
| @@ -1,32 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Separate module to avoid circular dependencies |  | ||||||
| from wlauto.core.configuration import settings |  | ||||||
| from wlauto.core.plugin import Plugin |  | ||||||
| from wlauto.utils.misc import load_class |  | ||||||
| from wlauto.core import pluginloader |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_plugin_type(ext): |  | ||||||
|     """Given an instance of ``wlauto.core.Plugin``, return a string representing |  | ||||||
|     the type of the plugin (e.g. ``'workload'`` for a Workload subclass instance).""" |  | ||||||
|     if not isinstance(ext, Plugin): |  | ||||||
|         raise ValueError('{} is not an instance of Plugin'.format(ext)) |  | ||||||
|     for name, cls in pluginloaderkind_map.iteritems(): |  | ||||||
|         if isinstance(ext, cls): |  | ||||||
|             return name |  | ||||||
|     raise ValueError('Unknown plugin type: {}'.format(ext.__class__.__name__)) |  | ||||||
| @@ -1,33 +0,0 @@ | |||||||
| import os |  | ||||||
|  |  | ||||||
| from wlauto.core.configuration import settings |  | ||||||
|  |  | ||||||
| def init_user_directory(overwrite_existing=False):  # pylint: disable=R0914 |  | ||||||
|     """ |  | ||||||
|     Initialise a fresh user directory.  |  | ||||||
|     """ |  | ||||||
|     if os.path.exists(settings.user_directory): |  | ||||||
|         if not overwrite_existing: |  | ||||||
|             raise RuntimeError('Environment {} already exists.'.format(settings.user_directory)) |  | ||||||
|         shutil.rmtree(settings.user_directory) |  | ||||||
|  |  | ||||||
|     os.makedirs(settings.user_directory) |  | ||||||
|     os.makedirs(settings.dependencies_directory) |  | ||||||
|     os.makedirs(settings.plugins_directory) |  | ||||||
|  |  | ||||||
|     # TODO: generate default config.yaml here |  | ||||||
|  |  | ||||||
|     if os.getenv('USER') == 'root': |  | ||||||
|         # If running with sudo on POSIX, change the ownership to the real user. |  | ||||||
|         real_user = os.getenv('SUDO_USER') |  | ||||||
|         if real_user: |  | ||||||
|             import pwd  # done here as module won't import on win32 |  | ||||||
|             user_entry = pwd.getpwnam(real_user) |  | ||||||
|             uid, gid = user_entry.pw_uid, user_entry.pw_gid |  | ||||||
|             os.chown(settings.user_directory, uid, gid) |  | ||||||
|             # why, oh why isn't there a recusive=True option for os.chown? |  | ||||||
|             for root, dirs, files in os.walk(settings.user_directory): |  | ||||||
|                 for d in dirs: |  | ||||||
|                     os.chown(os.path.join(root, d), uid, gid) |  | ||||||
|                 for f in files:  |  | ||||||
|                     os.chown(os.path.join(root, f), uid, gid) |  | ||||||
| @@ -1,399 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| Adding New Instrument |  | ||||||
| ===================== |  | ||||||
|  |  | ||||||
| Any new instrument should be a subclass of Instrument and it must have a name. |  | ||||||
| When a new instrument is added to Workload Automation, the methods of the new |  | ||||||
| instrument will be found automatically and hooked up to the supported signals. |  | ||||||
| Once a signal is broadcasted, the corresponding registered method is invoked. |  | ||||||
|  |  | ||||||
| Each method in Instrument must take two arguments, which are self and context. |  | ||||||
| Supported signals can be found in [... link to signals ...] To make |  | ||||||
| implementations easier and common, the basic steps to add new instrument is |  | ||||||
| similar to the steps to add new workload. |  | ||||||
|  |  | ||||||
| Hence, the following methods are sufficient to implement to add new instrument: |  | ||||||
|  |  | ||||||
|     - setup: This method is invoked after the workload is setup. All the |  | ||||||
|        necessary setups should go inside this method. Setup, includes operations |  | ||||||
|        like, pushing the files to the target device, install them, clear logs, |  | ||||||
|        etc. |  | ||||||
|     - start: It is invoked just before the workload start execution. Here is |  | ||||||
|        where instrument measures start being registered/taken. |  | ||||||
|     - stop: It is invoked just after the workload execution stops. The measures |  | ||||||
|        should stop being taken/registered. |  | ||||||
|     - update_result: It is invoked after the workload updated its result. |  | ||||||
|        update_result is where the taken measures are added to the result so it |  | ||||||
|        can be processed by Workload Automation. |  | ||||||
|     - teardown is invoked after the workload is teared down. It is a good place |  | ||||||
|        to clean any logs generated by the instrument. |  | ||||||
|  |  | ||||||
| For example, to add an instrument which will trace device errors, we subclass |  | ||||||
| Instrument and overwrite the variable name.:: |  | ||||||
|  |  | ||||||
|         #BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace') |  | ||||||
|         class TraceErrorsInstrument(Instrument): |  | ||||||
|  |  | ||||||
|             name = 'trace-errors' |  | ||||||
|  |  | ||||||
|             def __init__(self, device): |  | ||||||
|                 super(TraceErrorsInstrument, self).__init__(device) |  | ||||||
|                 self.trace_on_device = os.path.join(self.device.working_directory, 'trace') |  | ||||||
|  |  | ||||||
| We then declare and implement the aforementioned methods. For the setup method, |  | ||||||
| we want to push the file to the target device and then change the file mode to |  | ||||||
| 755 :: |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         self.device.push(BINARY_FILE, self.device.working_directory) |  | ||||||
|         self.device.execute('chmod 755 {}'.format(self.trace_on_device)) |  | ||||||
|  |  | ||||||
| Then we implemented the start method, which will simply run the file to start |  | ||||||
| tracing. :: |  | ||||||
|  |  | ||||||
|     def start(self, context): |  | ||||||
|         self.device.execute('{} start'.format(self.trace_on_device)) |  | ||||||
|  |  | ||||||
| Lastly, we need to stop tracing once the workload stops and this happens in the |  | ||||||
| stop method:: |  | ||||||
|  |  | ||||||
|     def stop(self, context): |  | ||||||
|         self.device.execute('{} stop'.format(self.trace_on_device)) |  | ||||||
|  |  | ||||||
| The generated result can be updated inside update_result, or if it is trace, we |  | ||||||
| just pull the file to the host device. context has a result variable which |  | ||||||
| has add_metric method. It can be used to add the instrumentation results metrics |  | ||||||
| to the final result for the workload. The method can be passed 4 params, which |  | ||||||
| are metric key, value, unit and lower_is_better, which is a boolean. :: |  | ||||||
|  |  | ||||||
|     def update_result(self, context): |  | ||||||
|         # pull the trace file to the device |  | ||||||
|         result = os.path.join(self.device.working_directory, 'trace.txt') |  | ||||||
|         self.device.pull(result, context.working_directory) |  | ||||||
|  |  | ||||||
|         # parse the file if needs to be parsed, or add result to |  | ||||||
|         # context.result |  | ||||||
|  |  | ||||||
| At the end, we might want to delete any files generated by the instrumentation |  | ||||||
| and the code to clear these file goes in teardown method. :: |  | ||||||
|  |  | ||||||
|     def teardown(self, context): |  | ||||||
|         self.device.remove(os.path.join(self.device.working_directory, 'trace.txt')) |  | ||||||
|  |  | ||||||
| """ |  | ||||||
|  |  | ||||||
| import logging |  | ||||||
| import inspect |  | ||||||
| from collections import OrderedDict |  | ||||||
|  |  | ||||||
| import wlauto.core.signal as signal |  | ||||||
| from wlauto.core.plugin import Plugin |  | ||||||
| from wlauto.exceptions import WAError, DeviceNotRespondingError, TimeoutError |  | ||||||
| from wlauto.utils.misc import get_traceback, isiterable |  | ||||||
| from wlauto.utils.types import identifier |  | ||||||
|  |  | ||||||
|  |  | ||||||
| logger = logging.getLogger('instrumentation') |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Maps method names onto signals the should be registered to. |  | ||||||
| # Note: the begin/end signals are paired -- if a begin_ signal is sent, |  | ||||||
| #       then the corresponding end_ signal is guaranteed to also be sent. |  | ||||||
| # Note: using OrderedDict to preserve logical ordering for the table generated |  | ||||||
| #       in the documentation |  | ||||||
| SIGNAL_MAP = OrderedDict([ |  | ||||||
|     # Below are "aliases" for some of the more common signals to allow |  | ||||||
|     # instrumentation to have similar structure to workloads |  | ||||||
|     ('initialize', signal.RUN_INIT), |  | ||||||
|     ('setup', signal.SUCCESSFUL_WORKLOAD_SETUP), |  | ||||||
|     ('start', signal.BEFORE_WORKLOAD_EXECUTION), |  | ||||||
|     ('stop', signal.AFTER_WORKLOAD_EXECUTION), |  | ||||||
|     ('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE), |  | ||||||
|     ('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE), |  | ||||||
|     ('teardown', signal.AFTER_WORKLOAD_TEARDOWN), |  | ||||||
|     ('finalize', signal.RUN_FIN), |  | ||||||
|  |  | ||||||
|     ('on_run_start', signal.RUN_START), |  | ||||||
|     ('on_run_end', signal.RUN_END), |  | ||||||
|     ('on_workload_spec_start', signal.WORKLOAD_SPEC_START), |  | ||||||
|     ('on_workload_spec_end', signal.WORKLOAD_SPEC_END), |  | ||||||
|     ('on_iteration_start', signal.ITERATION_START), |  | ||||||
|     ('on_iteration_end', signal.ITERATION_END), |  | ||||||
|  |  | ||||||
|     ('before_initial_boot', signal.BEFORE_INITIAL_BOOT), |  | ||||||
|     ('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT), |  | ||||||
|     ('after_initial_boot', signal.AFTER_INITIAL_BOOT), |  | ||||||
|     ('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT), |  | ||||||
|     ('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT), |  | ||||||
|     ('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT), |  | ||||||
|     ('before_boot', signal.BEFORE_BOOT), |  | ||||||
|     ('on_successful_boot', signal.SUCCESSFUL_BOOT), |  | ||||||
|     ('after_boot', signal.AFTER_BOOT), |  | ||||||
|  |  | ||||||
|     ('on_spec_init', signal.SPEC_INIT), |  | ||||||
|     ('on_run_init', signal.RUN_INIT), |  | ||||||
|     ('on_iteration_init', signal.ITERATION_INIT), |  | ||||||
|  |  | ||||||
|     ('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP), |  | ||||||
|     ('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP), |  | ||||||
|     ('after_workload_setup', signal.AFTER_WORKLOAD_SETUP), |  | ||||||
|     ('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION), |  | ||||||
|     ('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION), |  | ||||||
|     ('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION), |  | ||||||
|     ('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE), |  | ||||||
|     ('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE), |  | ||||||
|     ('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE), |  | ||||||
|     ('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN), |  | ||||||
|     ('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN), |  | ||||||
|     ('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN), |  | ||||||
|  |  | ||||||
|     ('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING), |  | ||||||
|     ('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING), |  | ||||||
|     ('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING), |  | ||||||
|  |  | ||||||
|     ('on_error', signal.ERROR_LOGGED), |  | ||||||
|     ('on_warning', signal.WARNING_LOGGED), |  | ||||||
| ]) |  | ||||||
|  |  | ||||||
| PRIORITY_MAP = OrderedDict([ |  | ||||||
|     ('very_fast_', 20), |  | ||||||
|     ('fast_', 10), |  | ||||||
|     ('normal_', 0), |  | ||||||
|     ('slow_', -10), |  | ||||||
|     ('very_slow_', -20), |  | ||||||
| ]) |  | ||||||
|  |  | ||||||
| installed = [] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def is_installed(instrument): |  | ||||||
|     if isinstance(instrument, Instrument): |  | ||||||
|         if instrument in installed: |  | ||||||
|             return True |  | ||||||
|         if instrument.name in [i.name for i in installed]: |  | ||||||
|             return True |  | ||||||
|     elif isinstance(instrument, type): |  | ||||||
|         if instrument in [i.__class__ for i in installed]: |  | ||||||
|             return True |  | ||||||
|     else:  # assume string |  | ||||||
|         if identifier(instrument) in [identifier(i.name) for i in installed]: |  | ||||||
|             return True |  | ||||||
|     return False |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def is_enabled(instrument): |  | ||||||
|     if isinstance(instrument, Instrument) or isinstance(instrument, type): |  | ||||||
|         name = instrument.name |  | ||||||
|     else:  # assume string |  | ||||||
|         name = instrument |  | ||||||
|     try: |  | ||||||
|         installed_instrument = get_instrument(name) |  | ||||||
|         return installed_instrument.is_enabled |  | ||||||
|     except ValueError: |  | ||||||
|         return False |  | ||||||
|  |  | ||||||
|  |  | ||||||
| failures_detected = False |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def reset_failures(): |  | ||||||
|     global failures_detected  # pylint: disable=W0603 |  | ||||||
|     failures_detected = False |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def check_failures(): |  | ||||||
|     result = failures_detected |  | ||||||
|     reset_failures() |  | ||||||
|     return result |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ManagedCallback(object): |  | ||||||
|     """ |  | ||||||
|     This wraps instruments' callbacks to ensure that errors do interfer |  | ||||||
|     with run execution. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, instrument, callback): |  | ||||||
|         self.instrument = instrument |  | ||||||
|         self.callback = callback |  | ||||||
|  |  | ||||||
|     def __call__(self, context): |  | ||||||
|         if self.instrument.is_enabled: |  | ||||||
|             try: |  | ||||||
|                 self.callback(context) |  | ||||||
|             except (KeyboardInterrupt, DeviceNotRespondingError, TimeoutError):  # pylint: disable=W0703 |  | ||||||
|                 raise |  | ||||||
|             except Exception as e:  # pylint: disable=W0703 |  | ||||||
|                 logger.error('Error in insturment {}'.format(self.instrument.name)) |  | ||||||
|                 global failures_detected  # pylint: disable=W0603 |  | ||||||
|                 failures_detected = True |  | ||||||
|                 if isinstance(e, WAError): |  | ||||||
|                     logger.error(e) |  | ||||||
|                 else: |  | ||||||
|                     tb = get_traceback() |  | ||||||
|                     logger.error(tb) |  | ||||||
|                     logger.error('{}({})'.format(e.__class__.__name__, e)) |  | ||||||
|                 if not context.current_iteration: |  | ||||||
|                     # Error occureed outside of an iteration (most likely |  | ||||||
|                     # during intial setup or teardown). Since this would affect |  | ||||||
|                     # the rest of the run, mark the instument as broken so that |  | ||||||
|                     # it doesn't get re-enabled for subsequent iterations. |  | ||||||
|                     self.instrument.is_broken = True |  | ||||||
|                 disable(self.instrument) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Need this to keep track of callbacks, because the dispatcher only keeps |  | ||||||
| # weak references, so if the callbacks aren't referenced elsewhere, they will |  | ||||||
| # be deallocated before they've had a chance to be invoked. |  | ||||||
| _callbacks = [] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def install(instrument): |  | ||||||
|     """ |  | ||||||
|     This will look for methods (or any callable members) with specific names |  | ||||||
|     in the instrument and hook them up to the corresponding signals. |  | ||||||
|  |  | ||||||
|     :param instrument: Instrument instance to install. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     logger.debug('Installing instrument %s.', instrument) |  | ||||||
|     if is_installed(instrument): |  | ||||||
|         raise ValueError('Instrument {} is already installed.'.format(instrument.name)) |  | ||||||
|     for attr_name in dir(instrument): |  | ||||||
|         priority = 0 |  | ||||||
|         stripped_attr_name = attr_name |  | ||||||
|         for key, value in PRIORITY_MAP.iteritems(): |  | ||||||
|             if attr_name.startswith(key): |  | ||||||
|                 stripped_attr_name = attr_name[len(key):] |  | ||||||
|                 priority = value |  | ||||||
|                 break |  | ||||||
|         if stripped_attr_name in SIGNAL_MAP: |  | ||||||
|             attr = getattr(instrument, attr_name) |  | ||||||
|             if not callable(attr): |  | ||||||
|                 raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument)) |  | ||||||
|             argspec = inspect.getargspec(attr) |  | ||||||
|             arg_num = len(argspec.args) |  | ||||||
|             # Instrument callbacks will be passed exactly two arguments: self |  | ||||||
|             # (the instrument instance to which the callback is bound) and |  | ||||||
|             # context. However, we also allow callbacks to capture the context |  | ||||||
|             # in variable arguments (declared as "*args" in the definition). |  | ||||||
|             if arg_num > 2 or (arg_num < 2 and argspec.varargs is None): |  | ||||||
|                 message = '{} must take exactly 2 positional arguments; {} given.' |  | ||||||
|                 raise ValueError(message.format(attr_name, arg_num)) |  | ||||||
|  |  | ||||||
|             logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name]) |  | ||||||
|             mc = ManagedCallback(instrument, attr) |  | ||||||
|             _callbacks.append(mc) |  | ||||||
|             signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority) |  | ||||||
|     installed.append(instrument) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def uninstall(instrument): |  | ||||||
|     instrument = get_instrument(instrument) |  | ||||||
|     installed.remove(instrument) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def validate(): |  | ||||||
|     for instrument in installed: |  | ||||||
|         instrument.validate() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_instrument(inst): |  | ||||||
|     if isinstance(inst, Instrument): |  | ||||||
|         return inst |  | ||||||
|     for installed_inst in installed: |  | ||||||
|         if identifier(installed_inst.name) == identifier(inst): |  | ||||||
|             return installed_inst |  | ||||||
|     raise ValueError('Instrument {} is not installed'.format(inst)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def disable_all(): |  | ||||||
|     for instrument in installed: |  | ||||||
|         _disable_instrument(instrument) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def enable_all(): |  | ||||||
|     for instrument in installed: |  | ||||||
|         _enable_instrument(instrument) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def enable(to_enable): |  | ||||||
|     if isiterable(to_enable): |  | ||||||
|         for inst in to_enable: |  | ||||||
|             _enable_instrument(inst) |  | ||||||
|     else: |  | ||||||
|         _enable_instrument(to_enable) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def disable(to_disable): |  | ||||||
|     if isiterable(to_disable): |  | ||||||
|         for inst in to_disable: |  | ||||||
|             _disable_instrument(inst) |  | ||||||
|     else: |  | ||||||
|         _disable_instrument(to_disable) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _enable_instrument(inst): |  | ||||||
|     inst = get_instrument(inst) |  | ||||||
|     if not inst.is_broken: |  | ||||||
|         logger.debug('Enabling instrument {}'.format(inst.name)) |  | ||||||
|         inst.is_enabled = True |  | ||||||
|     else: |  | ||||||
|         logger.debug('Not enabling broken instrument {}'.format(inst.name)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _disable_instrument(inst): |  | ||||||
|     inst = get_instrument(inst) |  | ||||||
|     if inst.is_enabled: |  | ||||||
|         logger.debug('Disabling instrument {}'.format(inst.name)) |  | ||||||
|         inst.is_enabled = False |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_enabled(): |  | ||||||
|     return [i for i in installed if i.is_enabled] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_disabled(): |  | ||||||
|     return [i for i in installed if not i.is_enabled] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Instrument(Plugin): |  | ||||||
|     """ |  | ||||||
|     Base class for instrumentation implementations. |  | ||||||
|     """ |  | ||||||
|     kind = "instrument" |  | ||||||
|  |  | ||||||
|     def __init__(self, target, **kwargs): |  | ||||||
|         super(Instrument, self).__init__(**kwargs) |  | ||||||
|         self.target = target |  | ||||||
|         self.is_enabled = True |  | ||||||
|         self.is_broken = False |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def finalize(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return self.name |  | ||||||
|  |  | ||||||
|     def __repr__(self): |  | ||||||
|         return 'Instrument({})'.format(self.name) |  | ||||||
| @@ -1,188 +0,0 @@ | |||||||
| import logging |  | ||||||
| import os |  | ||||||
| import shutil |  | ||||||
| import string |  | ||||||
| import sys |  | ||||||
| import uuid |  | ||||||
| from copy import copy |  | ||||||
|  |  | ||||||
| from wlauto.core.configuration.configuration import JobSpec |  | ||||||
| from wlauto.core.configuration.manager import ConfigManager |  | ||||||
| from wlauto.core.device_manager import TargetInfo |  | ||||||
| from wlauto.utils.misc import touch |  | ||||||
| from wlauto.utils.serializer import write_pod, read_pod |  | ||||||
|  |  | ||||||
|  |  | ||||||
| logger = logging.getLogger('output') |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class RunInfo(object): |  | ||||||
|     """ |  | ||||||
|     Information about the current run, such as its unique ID, run |  | ||||||
|     time, etc. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     @staticmethod |  | ||||||
|     def from_pod(pod): |  | ||||||
|         uid = pod.pop('uuid') |  | ||||||
|         if uid is not None: |  | ||||||
|             uid = uuid.UUID(uid) |  | ||||||
|         instance = RunInfo(**pod) |  | ||||||
|         instance.uuid = uid |  | ||||||
|         return instance |  | ||||||
|  |  | ||||||
|     def __init__(self, run_name=None, project=None, project_stage=None, |  | ||||||
|                  start_time=None, end_time=None, duration=None): |  | ||||||
|         self.uuid = uuid.uuid4() |  | ||||||
|         self.run_name = None |  | ||||||
|         self.project = None |  | ||||||
|         self.project_stage = None |  | ||||||
|         self.start_time = None |  | ||||||
|         self.end_time = None |  | ||||||
|         self.duration = None |  | ||||||
|  |  | ||||||
|     def to_pod(self): |  | ||||||
|         d = copy(self.__dict__) |  | ||||||
|         d['uuid'] = str(self.uuid) |  | ||||||
|         return d |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class RunState(object): |  | ||||||
|     """ |  | ||||||
|     Represents the state of a WA run. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     @staticmethod |  | ||||||
|     def from_pod(pod): |  | ||||||
|         return RunState() |  | ||||||
|  |  | ||||||
|     def __init__(self): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def to_pod(self): |  | ||||||
|         return {} |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class RunOutput(object): |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def logfile(self): |  | ||||||
|         return os.path.join(self.basepath, 'run.log') |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def metadir(self): |  | ||||||
|         return os.path.join(self.basepath, '__meta') |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def infofile(self): |  | ||||||
|         return os.path.join(self.metadir, 'run_info.json') |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def statefile(self): |  | ||||||
|         return os.path.join(self.basepath, '.run_state.json') |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def configfile(self): |  | ||||||
|         return os.path.join(self.metadir, 'config.json') |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def targetfile(self): |  | ||||||
|         return os.path.join(self.metadir, 'target_info.json') |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def jobsfile(self): |  | ||||||
|         return os.path.join(self.metadir, 'jobs.json') |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def raw_config_dir(self): |  | ||||||
|         return os.path.join(self.metadir, 'raw_config') |  | ||||||
|  |  | ||||||
|     def __init__(self, path): |  | ||||||
|         self.basepath = path |  | ||||||
|         self.info = None |  | ||||||
|         self.state = None |  | ||||||
|         if (not os.path.isfile(self.statefile) or |  | ||||||
|                 not os.path.isfile(self.infofile)): |  | ||||||
|             msg = '"{}" does not exist or is not a valid WA output directory.' |  | ||||||
|             raise ValueError(msg.format(self.basepath)) |  | ||||||
|         self.reload() |  | ||||||
|  |  | ||||||
|     def reload(self): |  | ||||||
|         self.info = RunInfo.from_pod(read_pod(self.infofile)) |  | ||||||
|         self.state = RunState.from_pod(read_pod(self.statefile)) |  | ||||||
|  |  | ||||||
|     def write_info(self): |  | ||||||
|         write_pod(self.info.to_pod(), self.infofile) |  | ||||||
|  |  | ||||||
|     def write_state(self): |  | ||||||
|         write_pod(self.state.to_pod(), self.statefile) |  | ||||||
|  |  | ||||||
|     def write_config(self, config): |  | ||||||
|         write_pod(config.to_pod(), self.configfile) |  | ||||||
|  |  | ||||||
|     def read_config(self): |  | ||||||
|         if not os.path.isfile(self.configfile): |  | ||||||
|             return None |  | ||||||
|         return ConfigManager.from_pod(read_pod(self.configfile)) |  | ||||||
|  |  | ||||||
|     def write_target_info(self, ti): |  | ||||||
|         write_pod(ti.to_pod(), self.targetfile) |  | ||||||
|  |  | ||||||
|     def read_config(self): |  | ||||||
|         if not os.path.isfile(self.targetfile): |  | ||||||
|             return None |  | ||||||
|         return TargetInfo.from_pod(read_pod(self.targetfile)) |  | ||||||
|  |  | ||||||
|     def write_job_specs(self, job_specs): |  | ||||||
|         job_specs[0].to_pod() |  | ||||||
|         js_pod = {'jobs': [js.to_pod() for js in job_specs]} |  | ||||||
|         write_pod(js_pod, self.jobsfile) |  | ||||||
|  |  | ||||||
|     def read_job_specs(self): |  | ||||||
|         if not os.path.isfile(self.jobsfile): |  | ||||||
|             return None |  | ||||||
|         pod = read_pod(self.jobsfile) |  | ||||||
|         return [JobSpec.from_pod(jp) for jp in pod['jobs']] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def init_wa_output(path, wa_state, force=False): |  | ||||||
|     if os.path.exists(path): |  | ||||||
|         if force: |  | ||||||
|             logger.info('Removing existing output directory.') |  | ||||||
|             shutil.rmtree(os.path.abspath(path)) |  | ||||||
|         else: |  | ||||||
|             raise RuntimeError('path exists: {}'.format(path)) |  | ||||||
|  |  | ||||||
|     logger.info('Creating output directory.') |  | ||||||
|     os.makedirs(path) |  | ||||||
|     meta_dir = os.path.join(path, '__meta') |  | ||||||
|     os.makedirs(meta_dir) |  | ||||||
|     _save_raw_config(meta_dir, wa_state) |  | ||||||
|     touch(os.path.join(path, 'run.log')) |  | ||||||
|  |  | ||||||
|     info = RunInfo( |  | ||||||
|             run_name=wa_state.run_config.run_name, |  | ||||||
|             project=wa_state.run_config.project, |  | ||||||
|             project_stage=wa_state.run_config.project_stage, |  | ||||||
|            ) |  | ||||||
|     write_pod(info.to_pod(), os.path.join(meta_dir, 'run_info.json')) |  | ||||||
|      |  | ||||||
|     with open(os.path.join(path, '.run_state.json'), 'w') as wfh: |  | ||||||
|         wfh.write('{}') |  | ||||||
|  |  | ||||||
|     return RunOutput(path) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _save_raw_config(meta_dir, state): |  | ||||||
|     raw_config_dir = os.path.join(meta_dir, 'raw_config') |  | ||||||
|     os.makedirs(raw_config_dir) |  | ||||||
|  |  | ||||||
|     for i, source in enumerate(state.loaded_config_sources): |  | ||||||
|         if not os.path.isfile(source): |  | ||||||
|             continue |  | ||||||
|         basename = os.path.basename(source) |  | ||||||
|         dest_path = os.path.join(raw_config_dir, 'cfg{}-{}'.format(i, basename)) |  | ||||||
|         shutil.copy(source, dest_path) |  | ||||||
|                                       |  | ||||||
|                                       |  | ||||||
|  |  | ||||||
| @@ -1,793 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # pylint: disable=E1101 |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import inspect |  | ||||||
| import imp |  | ||||||
| import string |  | ||||||
| import logging |  | ||||||
| from collections import OrderedDict, defaultdict |  | ||||||
| from itertools import chain |  | ||||||
| from copy import copy |  | ||||||
|  |  | ||||||
| from wlauto.exceptions import NotFoundError, LoaderError, ValidationError, ConfigError, HostError |  | ||||||
| from wlauto.utils.misc import (ensure_directory_exists as _d, |  | ||||||
|                                walk_modules, load_class, merge_dicts_simple, get_article) |  | ||||||
| from wlauto.core.configuration import settings |  | ||||||
| from wlauto.utils.types import identifier, boolean |  | ||||||
| from wlauto.core.configuration.configuration import ConfigurationPoint as Parameter |  | ||||||
|  |  | ||||||
|  |  | ||||||
| MODNAME_TRANS = string.maketrans(':/\\.', '____') |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class AttributeCollection(object): |  | ||||||
|     """ |  | ||||||
|     Accumulator for plugin attribute objects (such as Parameters or Artifacts). This will |  | ||||||
|     replace any class member list accumulating such attributes through the magic of |  | ||||||
|     metaprogramming\ [*]_. |  | ||||||
|  |  | ||||||
|     .. [*] which is totally safe and not going backfire in any way... |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def values(self): |  | ||||||
|         return self._attrs.values() |  | ||||||
|  |  | ||||||
|     def __init__(self, attrcls): |  | ||||||
|         self._attrcls = attrcls |  | ||||||
|         self._attrs = OrderedDict() |  | ||||||
|  |  | ||||||
|     def add(self, p): |  | ||||||
|         p = self._to_attrcls(p) |  | ||||||
|         if p.name in self._attrs: |  | ||||||
|             if p.override: |  | ||||||
|                 newp = copy(self._attrs[p.name]) |  | ||||||
|                 for a, v in p.__dict__.iteritems(): |  | ||||||
|                     if v is not None: |  | ||||||
|                         setattr(newp, a, v) |  | ||||||
|                 if not hasattr(newp, "_overridden"): |  | ||||||
|                     newp._overridden = p._owner |  | ||||||
|                 self._attrs[p.name] = newp |  | ||||||
|             else: |  | ||||||
|                 # Duplicate attribute condition is check elsewhere. |  | ||||||
|                 pass |  | ||||||
|         else: |  | ||||||
|             self._attrs[p.name] = p |  | ||||||
|  |  | ||||||
|     append = add |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return 'AC({})'.format(map(str, self._attrs.values())) |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|     def _to_attrcls(self, p): |  | ||||||
|         old_owner = getattr(p, "_owner", None) |  | ||||||
|         if isinstance(p, basestring): |  | ||||||
|             p = self._attrcls(p) |  | ||||||
|         elif isinstance(p, tuple) or isinstance(p, list): |  | ||||||
|             p = self._attrcls(*p) |  | ||||||
|         elif isinstance(p, dict): |  | ||||||
|             p = self._attrcls(**p) |  | ||||||
|         elif not isinstance(p, self._attrcls): |  | ||||||
|             raise ValueError('Invalid parameter value: {}'.format(p)) |  | ||||||
|         if (p.name in self._attrs and not p.override and |  | ||||||
|                 p.name != 'modules'):  # TODO: HACK due to "diamond dependecy" in workloads... |  | ||||||
|             raise ValueError('Attribute {} has already been defined.'.format(p.name)) |  | ||||||
|         p._owner = old_owner |  | ||||||
|         return p |  | ||||||
|  |  | ||||||
|     def __iadd__(self, other): |  | ||||||
|         for p in other: |  | ||||||
|             self.add(p) |  | ||||||
|         return self |  | ||||||
|  |  | ||||||
|     def __iter__(self): |  | ||||||
|         return iter(self.values) |  | ||||||
|  |  | ||||||
|     def __contains__(self, p): |  | ||||||
|         return p in self._attrs |  | ||||||
|  |  | ||||||
|     def __getitem__(self, i): |  | ||||||
|         return self._attrs[i] |  | ||||||
|  |  | ||||||
|     def __len__(self): |  | ||||||
|         return len(self._attrs) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class AliasCollection(AttributeCollection): |  | ||||||
|  |  | ||||||
|     def __init__(self): |  | ||||||
|         super(AliasCollection, self).__init__(Alias) |  | ||||||
|  |  | ||||||
|     def _to_attrcls(self, p): |  | ||||||
|         if isinstance(p, tuple) or isinstance(p, list): |  | ||||||
|             # must be in the form (name, {param: value, ...}) |  | ||||||
|             p = self._attrcls(p[1], **p[1]) |  | ||||||
|         elif not isinstance(p, self._attrcls): |  | ||||||
|             raise ValueError('Invalid parameter value: {}'.format(p)) |  | ||||||
|         if p.name in self._attrs: |  | ||||||
|             raise ValueError('Attribute {} has already been defined.'.format(p.name)) |  | ||||||
|         return p |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ListCollection(list): |  | ||||||
|  |  | ||||||
|     def __init__(self, attrcls):  # pylint: disable=unused-argument |  | ||||||
|         super(ListCollection, self).__init__() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Artifact(object): |  | ||||||
|     """ |  | ||||||
|     This is an artifact generated during execution/post-processing of a workload. |  | ||||||
|     Unlike metrics, this represents an actual artifact, such as a file, generated. |  | ||||||
|     This may be "result", such as trace, or it could be "meta data" such as logs. |  | ||||||
|     These are distinguished using the ``kind`` attribute, which also helps WA decide |  | ||||||
|     how it should be handled. Currently supported kinds are: |  | ||||||
|  |  | ||||||
|         :log: A log file. Not part of "results" as such but contains information about the |  | ||||||
|               run/workload execution that be useful for diagnostics/meta analysis. |  | ||||||
|         :meta: A file containing metadata. This is not part of "results", but contains |  | ||||||
|                information that may be necessary to reproduce the results (contrast with |  | ||||||
|                ``log`` artifacts which are *not* necessary). |  | ||||||
|         :data: This file contains new data, not available otherwise and should be considered |  | ||||||
|                part of the "results" generated by WA. Most traces would fall into this category. |  | ||||||
|         :export: Exported version of results or some other artifact. This signifies that |  | ||||||
|                  this artifact does not contain any new data that is not available |  | ||||||
|                  elsewhere and that it may be safely discarded without losing information. |  | ||||||
|         :raw: Signifies that this is a raw dump/log that is normally processed to extract |  | ||||||
|               useful information and is then discarded. In a sense, it is the opposite of |  | ||||||
|               ``export``, but in general may also be discarded. |  | ||||||
|  |  | ||||||
|               .. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on |  | ||||||
|                         how important it is to preserve this file, e.g. when archiving, vs |  | ||||||
|                         how much space it takes up. Unlike ``export`` artifacts which are |  | ||||||
|                         (almost) always ignored by other exporters as that would never result |  | ||||||
|                         in data loss, ``raw`` files *may* be processed by exporters if they |  | ||||||
|                         decided that the risk of losing potentially (though unlikely) useful |  | ||||||
|                         data is greater than the time/space cost of handling the artifact (e.g. |  | ||||||
|                         a database uploader may choose to ignore ``raw`` artifacts, where as a |  | ||||||
|                         network filer archiver may choose to archive them). |  | ||||||
|  |  | ||||||
|         .. note: The kind parameter is intended to represent the logical function of a particular |  | ||||||
|                  artifact, not its intended means of processing -- this is left entirely up to the |  | ||||||
|                  result processors. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     RUN = 'run' |  | ||||||
|     ITERATION = 'iteration' |  | ||||||
|  |  | ||||||
|     valid_kinds = ['log', 'meta', 'data', 'export', 'raw'] |  | ||||||
|  |  | ||||||
|     def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None): |  | ||||||
|         """" |  | ||||||
|         :param name: Name that uniquely identifies this artifact. |  | ||||||
|         :param path: The *relative* path of the artifact. Depending on the ``level`` |  | ||||||
|                      must be either relative to the run or iteration output directory. |  | ||||||
|                      Note: this path *must* be delimited using ``/`` irrespective of the |  | ||||||
|                      operating system. |  | ||||||
|         :param kind: The type of the artifact this is (e.g. log file, result, etc.) this |  | ||||||
|                      will be used a hit to result processors. This must be one of ``'log'``, |  | ||||||
|                      ``'meta'``, ``'data'``, ``'export'``, ``'raw'``. |  | ||||||
|         :param level: The level at which the artifact will be generated. Must be either |  | ||||||
|                       ``'iteration'`` or ``'run'``. |  | ||||||
|         :param mandatory: Boolean value indicating whether this artifact must be present |  | ||||||
|                           at the end of result processing for its level. |  | ||||||
|         :param description: A free-form description of what this artifact is. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         if kind not in self.valid_kinds: |  | ||||||
|             raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds)) |  | ||||||
|         self.name = name |  | ||||||
|         self.path = path.replace('/', os.sep) if path is not None else path |  | ||||||
|         self.kind = kind |  | ||||||
|         self.level = level |  | ||||||
|         self.mandatory = mandatory |  | ||||||
|         self.description = description |  | ||||||
|  |  | ||||||
|     def exists(self, context): |  | ||||||
|         """Returns ``True`` if artifact exists within the specified context, and |  | ||||||
|         ``False`` otherwise.""" |  | ||||||
|         fullpath = os.path.join(context.output_directory, self.path) |  | ||||||
|         return os.path.exists(fullpath) |  | ||||||
|  |  | ||||||
|     def to_dict(self): |  | ||||||
|         return copy(self.__dict__) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Alias(object): |  | ||||||
|     """ |  | ||||||
|     This represents a configuration alias for an plugin, mapping an alternative name to |  | ||||||
|     a set of parameter values, effectively providing an alternative set of default values. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, name, **kwargs): |  | ||||||
|         self.name = name |  | ||||||
|         self.params = kwargs |  | ||||||
|         self.plugin_name = None  # gets set by the MetaClass |  | ||||||
|  |  | ||||||
|     def validate(self, ext): |  | ||||||
|         ext_params = set(p.name for p in ext.parameters) |  | ||||||
|         for param in self.params: |  | ||||||
|             if param not in ext_params: |  | ||||||
|                 # Raising config error because aliases might have come through |  | ||||||
|                 # the config. |  | ||||||
|                 msg = 'Parameter {} (defined in alias {}) is invalid for {}' |  | ||||||
|                 raise ConfigError(msg.format(param, self.name, ext.name)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class PluginMeta(type): |  | ||||||
|     """ |  | ||||||
|     This basically adds some magic to plugins to make implementing new plugins, such as |  | ||||||
|     workloads less complicated. |  | ||||||
|  |  | ||||||
|     It ensures that certain class attributes (specified by the ``to_propagate`` |  | ||||||
|     attribute of the metaclass) get propagated down the inheritance hierarchy. The assumption |  | ||||||
|     is that the values of the attributes specified in the class are iterable; if that is not met, |  | ||||||
|     Bad Things (tm) will happen. |  | ||||||
|  |  | ||||||
|     This also provides virtual method implementation, similar to those in C-derived OO languages, |  | ||||||
|     and alias specifications. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     to_propagate = [ |  | ||||||
|         ('parameters', Parameter, AttributeCollection), |  | ||||||
|         ('artifacts', Artifact, AttributeCollection), |  | ||||||
|         ('core_modules', str, ListCollection), |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     virtual_methods = ['validate', 'initialize', 'finalize'] |  | ||||||
|     global_virtuals = ['initialize', 'finalize'] |  | ||||||
|  |  | ||||||
|     def __new__(mcs, clsname, bases, attrs): |  | ||||||
|         mcs._propagate_attributes(bases, attrs, clsname) |  | ||||||
|         cls = type.__new__(mcs, clsname, bases, attrs) |  | ||||||
|         mcs._setup_aliases(cls) |  | ||||||
|         mcs._implement_virtual(cls, bases) |  | ||||||
|         return cls |  | ||||||
|  |  | ||||||
|     @classmethod |  | ||||||
|     def _propagate_attributes(mcs, bases, attrs, clsname): |  | ||||||
|         """ |  | ||||||
|         For attributes specified by to_propagate, their values will be a union of |  | ||||||
|         that specified for cls and its bases (cls values overriding those of bases |  | ||||||
|         in case of conflicts). |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         for prop_attr, attr_cls, attr_collector_cls in mcs.to_propagate: |  | ||||||
|             should_propagate = False |  | ||||||
|             propagated = attr_collector_cls(attr_cls) |  | ||||||
|             for base in bases: |  | ||||||
|                 if hasattr(base, prop_attr): |  | ||||||
|                     propagated += getattr(base, prop_attr) or [] |  | ||||||
|                     should_propagate = True |  | ||||||
|             if prop_attr in attrs: |  | ||||||
|                 pattrs = attrs[prop_attr] or [] |  | ||||||
|                 for pa in pattrs: |  | ||||||
|                     if not isinstance(pa, basestring): |  | ||||||
|                         pa._owner = clsname |  | ||||||
|                 propagated += pattrs |  | ||||||
|                 should_propagate = True |  | ||||||
|             if should_propagate: |  | ||||||
|                 for p in propagated: |  | ||||||
|                     override = bool(getattr(p, "override", None)) |  | ||||||
|                     overridden = bool(getattr(p, "_overridden", None)) |  | ||||||
|                     if override != overridden: |  | ||||||
|                         msg = "Overriding non existing parameter '{}' inside '{}'" |  | ||||||
|                         raise ValueError(msg.format(p.name, p._owner)) |  | ||||||
|                 attrs[prop_attr] = propagated |  | ||||||
|  |  | ||||||
|     @classmethod |  | ||||||
|     def _setup_aliases(mcs, cls): |  | ||||||
|         if hasattr(cls, 'aliases'): |  | ||||||
|             aliases, cls.aliases = cls.aliases, AliasCollection() |  | ||||||
|             for alias in aliases: |  | ||||||
|                 if isinstance(alias, basestring): |  | ||||||
|                     alias = Alias(alias) |  | ||||||
|                 alias.validate(cls) |  | ||||||
|                 alias.plugin_name = cls.name |  | ||||||
|                 cls.aliases.add(alias) |  | ||||||
|  |  | ||||||
|     @classmethod |  | ||||||
|     def _implement_virtual(mcs, cls, bases): |  | ||||||
|         """ |  | ||||||
|         This implements automatic method propagation to the bases, so |  | ||||||
|         that you don't have to do something like |  | ||||||
|  |  | ||||||
|             super(cls, self).vmname() |  | ||||||
|  |  | ||||||
|         This also ensures that the methods that have beend identified as |  | ||||||
|         "globally virtual" are executed exactly once per WA execution, even if |  | ||||||
|         invoked through instances of different subclasses |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         methods = {} |  | ||||||
|         called_globals = set() |  | ||||||
|         for vmname in mcs.virtual_methods: |  | ||||||
|             clsmethod = getattr(cls, vmname, None) |  | ||||||
|             if clsmethod: |  | ||||||
|                 basemethods = [getattr(b, vmname) for b in bases if hasattr(b, vmname)] |  | ||||||
|                 methods[vmname] = [bm for bm in basemethods if bm != clsmethod] |  | ||||||
|                 methods[vmname].append(clsmethod) |  | ||||||
|  |  | ||||||
|                 def generate_method_wrapper(vname):  # pylint: disable=unused-argument |  | ||||||
|                     # this creates a closure with the method name so that it |  | ||||||
|                     # does not need to be passed to the wrapper as an argument, |  | ||||||
|                     # leaving the wrapper to accept exactly the same set of |  | ||||||
|                     # arguments as the method it is wrapping. |  | ||||||
|                     name__ = vmname  # pylint: disable=cell-var-from-loop |  | ||||||
|  |  | ||||||
|                     def wrapper(self, *args, **kwargs): |  | ||||||
|                         for dm in methods[name__]: |  | ||||||
|                             if name__ in mcs.global_virtuals: |  | ||||||
|                                 if dm not in called_globals: |  | ||||||
|                                     dm(self, *args, **kwargs) |  | ||||||
|                                     called_globals.add(dm) |  | ||||||
|                             else: |  | ||||||
|                                 dm(self, *args, **kwargs) |  | ||||||
|                     return wrapper |  | ||||||
|  |  | ||||||
|                 setattr(cls, vmname, generate_method_wrapper(vmname)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Plugin(object): |  | ||||||
|     """ |  | ||||||
|     Base class for all WA plugins. An plugin is basically a plug-in. |  | ||||||
|     It extends the functionality of WA in some way. Plugins are discovered |  | ||||||
|     and loaded dynamically by the plugin loader upon invocation of WA scripts. |  | ||||||
|     Adding an plugin is a matter of placing a class that implements an appropriate |  | ||||||
|     interface somewhere it would be discovered by the loader. That "somewhere" is |  | ||||||
|     typically one of the plugin subdirectories under ``~/.workload_automation/``. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     __metaclass__ = PluginMeta |  | ||||||
|  |  | ||||||
|     kind = None |  | ||||||
|     name = None |  | ||||||
|     parameters = [ |  | ||||||
|         Parameter('modules', kind=list, |  | ||||||
|                   description=""" |  | ||||||
|                   Lists the modules to be loaded by this plugin. A module is a plug-in that |  | ||||||
|                   further extends functionality of an plugin. |  | ||||||
|                   """), |  | ||||||
|     ] |  | ||||||
|     artifacts = [] |  | ||||||
|     aliases = [] |  | ||||||
|     core_modules = [] |  | ||||||
|  |  | ||||||
|     @classmethod |  | ||||||
|     def get_default_config(cls): |  | ||||||
|         return {p.name: p.default for p in cls.parameters} |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def dependencies_directory(self): |  | ||||||
|         return _d(os.path.join(settings.dependencies_directory, self.name)) |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def _classname(self): |  | ||||||
|         return self.__class__.__name__ |  | ||||||
|  |  | ||||||
|     def __init__(self, **kwargs): |  | ||||||
|         self.logger = logging.getLogger(self._classname) |  | ||||||
|         self._modules = [] |  | ||||||
|         self.capabilities = getattr(self.__class__, 'capabilities', []) |  | ||||||
|         for param in self.parameters: |  | ||||||
|             param.set_value(self, kwargs.get(param.name)) |  | ||||||
|         for key in kwargs: |  | ||||||
|             if key not in self.parameters: |  | ||||||
|                 message = 'Unexpected parameter "{}" for {}' |  | ||||||
|                 raise ConfigError(message.format(key, self.name)) |  | ||||||
|  |  | ||||||
|     def get_config(self): |  | ||||||
|         """ |  | ||||||
|         Returns current configuration (i.e. parameter values) of this plugin. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         config = {} |  | ||||||
|         for param in self.parameters: |  | ||||||
|             config[param.name] = getattr(self, param.name, None) |  | ||||||
|         return config |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         """ |  | ||||||
|         Perform basic validation to ensure that this plugin is capable of running. |  | ||||||
|         This is intended as an early check to ensure the plugin has not been mis-configured, |  | ||||||
|         rather than a comprehensive check (that may, e.g., require access to the execution |  | ||||||
|         context). |  | ||||||
|  |  | ||||||
|         This method may also be used to enforce (i.e. set as well as check) inter-parameter |  | ||||||
|         constraints for the plugin (e.g. if valid values for parameter A depend on the value |  | ||||||
|         of parameter B -- something that is not possible to enfroce using ``Parameter``\ 's |  | ||||||
|         ``constraint`` attribute. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         if self.name is None: |  | ||||||
|             raise ValidationError('Name not set for {}'.format(self._classname)) |  | ||||||
|         for param in self.parameters: |  | ||||||
|             param.validate(self) |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def finalize(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def check_artifacts(self, context, level): |  | ||||||
|         """ |  | ||||||
|         Make sure that all mandatory artifacts have been generated. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         for artifact in self.artifacts: |  | ||||||
|             if artifact.level != level or not artifact.mandatory: |  | ||||||
|                 continue |  | ||||||
|             fullpath = os.path.join(context.output_directory, artifact.path) |  | ||||||
|             if not os.path.exists(fullpath): |  | ||||||
|                 message = 'Mandatory "{}" has not been generated for {}.' |  | ||||||
|                 raise ValidationError(message.format(artifact.path, self.name)) |  | ||||||
|  |  | ||||||
|     def __getattr__(self, name): |  | ||||||
|         if name == '_modules': |  | ||||||
|             raise ValueError('_modules accessed too early!') |  | ||||||
|         for module in self._modules: |  | ||||||
|             if hasattr(module, name): |  | ||||||
|                 return getattr(module, name) |  | ||||||
|         raise AttributeError(name) |  | ||||||
|  |  | ||||||
|     def load_modules(self, loader): |  | ||||||
|         """ |  | ||||||
|         Load the modules specified by the "modules" Parameter using the provided loader. A loader |  | ||||||
|         can be any object that has an atribute called "get_module" that implements the following |  | ||||||
|         signature:: |  | ||||||
|  |  | ||||||
|             get_module(name, owner, **kwargs) |  | ||||||
|  |  | ||||||
|         and returns an instance of :class:`wlauto.core.plugin.Module`. If the module with the |  | ||||||
|         specified name is not found, the loader must raise an appropriate exception. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         modules = list(reversed(self.core_modules)) + list(reversed(self.modules or [])) |  | ||||||
|         if not modules: |  | ||||||
|             return |  | ||||||
|         for module_spec in modules: |  | ||||||
|             if not module_spec: |  | ||||||
|                 continue |  | ||||||
|             module = self._load_module(loader, module_spec) |  | ||||||
|             self._install_module(module) |  | ||||||
|  |  | ||||||
|     def has(self, capability): |  | ||||||
|         """Check if this plugin has the specified capability. The alternative method ``can`` is |  | ||||||
|         identical to this. Which to use is up to the caller depending on what makes semantic sense |  | ||||||
|         in the context of the capability, e.g. ``can('hard_reset')`` vs  ``has('active_cooling')``.""" |  | ||||||
|         return capability in self.capabilities |  | ||||||
|  |  | ||||||
|     can = has |  | ||||||
|  |  | ||||||
|     def _load_module(self, loader, module_spec): |  | ||||||
|         if isinstance(module_spec, basestring): |  | ||||||
|             name = module_spec |  | ||||||
|             params = {} |  | ||||||
|         elif isinstance(module_spec, dict): |  | ||||||
|             if len(module_spec) != 1: |  | ||||||
|                 message = 'Invalid module spec: {}; dict must have exctly one key -- the module name.' |  | ||||||
|                 raise ValueError(message.format(module_spec)) |  | ||||||
|             name, params = module_spec.items()[0] |  | ||||||
|         else: |  | ||||||
|             message = 'Invalid module spec: {}; must be a string or a one-key dict.' |  | ||||||
|             raise ValueError(message.format(module_spec)) |  | ||||||
|  |  | ||||||
|         if not isinstance(params, dict): |  | ||||||
|             message = 'Invalid module spec: {}; dict value must also be a dict.' |  | ||||||
|             raise ValueError(message.format(module_spec)) |  | ||||||
|  |  | ||||||
|         module = loader.get_module(name, owner=self, **params) |  | ||||||
|         module.initialize(None) |  | ||||||
|         return module |  | ||||||
|  |  | ||||||
|     def _install_module(self, module): |  | ||||||
|         for capability in module.capabilities: |  | ||||||
|             if capability not in self.capabilities: |  | ||||||
|                 self.capabilities.append(capability) |  | ||||||
|         self._modules.append(module) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class PluginLoaderItem(object): |  | ||||||
|  |  | ||||||
|     def __init__(self, ext_tuple): |  | ||||||
|         self.name = ext_tuple.name |  | ||||||
|         self.default_package = ext_tuple.default_package |  | ||||||
|         self.default_path = ext_tuple.default_path |  | ||||||
|         self.cls = load_class(ext_tuple.cls) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class PluginLoader(object): |  | ||||||
|     """ |  | ||||||
|     Discovers, enumerates and loads available devices, configs, etc. |  | ||||||
|     The loader will attempt to discover things on construction by looking |  | ||||||
|     in predetermined set of locations defined by default_paths. Optionally, |  | ||||||
|     additional locations may specified through paths parameter that must |  | ||||||
|     be a list of additional Python module paths (i.e. dot-delimited). |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False): |  | ||||||
|         """ |  | ||||||
|         params:: |  | ||||||
|  |  | ||||||
|             :packages: List of packages to load plugins from. |  | ||||||
|             :paths: List of paths to be searched for Python modules containing |  | ||||||
|                     WA plugins. |  | ||||||
|             :ignore_paths: List of paths to ignore when search for WA plugins (these would |  | ||||||
|                            typically be subdirectories of one or more locations listed in |  | ||||||
|                            ``paths`` parameter. |  | ||||||
|             :keep_going: Specifies whether to keep going if an error occurs while loading |  | ||||||
|                          plugins. |  | ||||||
|         """ |  | ||||||
|         self.logger = logging.getLogger('pluginloader') |  | ||||||
|         self.keep_going = keep_going |  | ||||||
|         self.packages = packages or [] |  | ||||||
|         self.paths = paths or [] |  | ||||||
|         self.ignore_paths = ignore_paths or [] |  | ||||||
|         self.plugins = {} |  | ||||||
|         self.kind_map = defaultdict(dict) |  | ||||||
|         self.aliases = {} |  | ||||||
|         self.global_param_aliases = {} |  | ||||||
|         self._discover_from_packages(self.packages) |  | ||||||
|         self._discover_from_paths(self.paths, self.ignore_paths) |  | ||||||
|  |  | ||||||
|     def update(self, packages=None, paths=None, ignore_paths=None): |  | ||||||
|         """ Load plugins from the specified paths/packages |  | ||||||
|         without clearing or reloading existing plugin. """ |  | ||||||
|         msg = 'Updating from: packages={} paths={}' |  | ||||||
|         self.logger.debug(msg.format(packages, paths)) |  | ||||||
|         if packages: |  | ||||||
|             self.packages.extend(packages) |  | ||||||
|             self._discover_from_packages(packages) |  | ||||||
|         if paths: |  | ||||||
|             self.paths.extend(paths) |  | ||||||
|             self.ignore_paths.extend(ignore_paths or []) |  | ||||||
|             self._discover_from_paths(paths, ignore_paths or []) |  | ||||||
|  |  | ||||||
|     def clear(self): |  | ||||||
|         """ Clear all discovered items. """ |  | ||||||
|         self.plugins = [] |  | ||||||
|         self.kind_map.clear() |  | ||||||
|  |  | ||||||
|     def reload(self): |  | ||||||
|         """ Clear all discovered items and re-run the discovery. """ |  | ||||||
|         self.logger.debug('Reloading') |  | ||||||
|         self.clear() |  | ||||||
|         self._discover_from_packages(self.packages) |  | ||||||
|         self._discover_from_paths(self.paths, self.ignore_paths) |  | ||||||
|  |  | ||||||
|     def get_plugin_class(self, name, kind=None): |  | ||||||
|         """ |  | ||||||
|         Return the class for the specified plugin if found or raises ``ValueError``. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         name, _ = self.resolve_alias(name) |  | ||||||
|         if kind is None: |  | ||||||
|             try: |  | ||||||
|                 return self.plugins[name] |  | ||||||
|             except KeyError: |  | ||||||
|                 raise NotFoundError('plugins {} not found.'.format(name)) |  | ||||||
|         if kind not in self.kind_map: |  | ||||||
|             raise ValueError('Unknown plugin type: {}'.format(kind)) |  | ||||||
|         store = self.kind_map[kind] |  | ||||||
|         if name not in store: |  | ||||||
|             msg = 'plugins {} is not {} {}.' |  | ||||||
|             raise NotFoundError(msg.format(name, get_article(kind), kind)) |  | ||||||
|         return store[name] |  | ||||||
|  |  | ||||||
|     def get_plugin(self, name=None, kind=None, *args, **kwargs): |  | ||||||
|         """ |  | ||||||
|         Return plugin of the specified kind with the specified name. Any |  | ||||||
|         additional parameters will be passed to the plugin's __init__. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         name, base_kwargs = self.resolve_alias(name) |  | ||||||
|         kwargs = OrderedDict(chain(base_kwargs.iteritems(), kwargs.iteritems())) |  | ||||||
|         cls = self.get_plugin_class(name, kind) |  | ||||||
|         plugin = cls(*args, **kwargs) |  | ||||||
|         return plugin |  | ||||||
|  |  | ||||||
|     def get_default_config(self, name): |  | ||||||
|         """ |  | ||||||
|         Returns the default configuration for the specified plugin name. The |  | ||||||
|         name may be an alias, in which case, the returned config will be |  | ||||||
|         augmented with appropriate alias overrides. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         real_name, alias_config = self.resolve_alias(name) |  | ||||||
|         base_default_config = self.get_plugin_class(real_name).get_default_config() |  | ||||||
|         return merge_dicts_simple(base_default_config, alias_config) |  | ||||||
|  |  | ||||||
|     def list_plugins(self, kind=None): |  | ||||||
|         """ |  | ||||||
|         List discovered plugin classes. Optionally, only list plugins of a |  | ||||||
|         particular type. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         if kind is None: |  | ||||||
|             return self.plugins.values() |  | ||||||
|         if kind not in self.kind_map: |  | ||||||
|             raise ValueError('Unknown plugin type: {}'.format(kind)) |  | ||||||
|         return self.kind_map[kind].values() |  | ||||||
|  |  | ||||||
|     def has_plugin(self, name, kind=None): |  | ||||||
|         """ |  | ||||||
|         Returns ``True`` if an plugins with the specified ``name`` has been |  | ||||||
|         discovered by the loader. If ``kind`` was specified, only returns ``True`` |  | ||||||
|         if the plugin has been found, *and* it is of the specified kind. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         try: |  | ||||||
|             self.get_plugin_class(name, kind) |  | ||||||
|             return True |  | ||||||
|         except NotFoundError: |  | ||||||
|             return False |  | ||||||
|  |  | ||||||
|     def resolve_alias(self, alias_name): |  | ||||||
|         """ |  | ||||||
|         Try to resolve the specified name as an plugin alias. Returns a |  | ||||||
|         two-tuple, the first value of which is actual plugin name, and the |  | ||||||
|         iisecond is a dict of parameter values for this alias. If the name passed |  | ||||||
|         is already an plugin name, then the result is ``(alias_name, {})``. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         alias_name = identifier(alias_name.lower()) |  | ||||||
|         if alias_name in self.plugins: |  | ||||||
|             return (alias_name, {}) |  | ||||||
|         if alias_name in self.aliases: |  | ||||||
|             alias = self.aliases[alias_name] |  | ||||||
|             return (alias.plugin_name, alias.params) |  | ||||||
|         raise NotFoundError('Could not find plugin or alias "{}"'.format(alias_name)) |  | ||||||
|  |  | ||||||
|     # Internal methods. |  | ||||||
|  |  | ||||||
|     def __getattr__(self, name): |  | ||||||
|         """ |  | ||||||
|         This resolves methods for specific plugins types based on corresponding |  | ||||||
|         generic plugin methods. So it's possible to say things like :: |  | ||||||
|  |  | ||||||
|             loader.get_device('foo') |  | ||||||
|  |  | ||||||
|         instead of :: |  | ||||||
|  |  | ||||||
|             loader.get_plugin('foo', kind='device') |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         if name.startswith('get_'): |  | ||||||
|             name = name.replace('get_', '', 1) |  | ||||||
|             if name in self.kind_map: |  | ||||||
|                 def __wrapper(pname, *args, **kwargs): |  | ||||||
|                     return self.get_plugin(pname, name, *args, **kwargs) |  | ||||||
|                 return __wrapper |  | ||||||
|         if name.startswith('list_'): |  | ||||||
|             name = name.replace('list_', '', 1).rstrip('s') |  | ||||||
|             if name in self.kind_map: |  | ||||||
|                 def __wrapper(*args, **kwargs):  # pylint: disable=E0102 |  | ||||||
|                     return self.list_plugins(name, *args, **kwargs) |  | ||||||
|                 return __wrapper |  | ||||||
|         if name.startswith('has_'): |  | ||||||
|             name = name.replace('has_', '', 1) |  | ||||||
|             if name in self.kind_map: |  | ||||||
|                 def __wrapper(pname, *args, **kwargs):  # pylint: disable=E0102 |  | ||||||
|                     return self.has_plugin(pname, name, *args, **kwargs) |  | ||||||
|                 return __wrapper |  | ||||||
|         raise AttributeError(name) |  | ||||||
|  |  | ||||||
|     def _discover_from_packages(self, packages): |  | ||||||
|         self.logger.debug('Discovering plugins in packages') |  | ||||||
|         try: |  | ||||||
|             for package in packages: |  | ||||||
|                 for module in walk_modules(package): |  | ||||||
|                     self._discover_in_module(module) |  | ||||||
|         except HostError as e: |  | ||||||
|             message = 'Problem loading plugins from {}: {}' |  | ||||||
|             raise LoaderError(message.format(e.module, str(e.orig_exc))) |  | ||||||
|  |  | ||||||
|     def _discover_from_paths(self, paths, ignore_paths): |  | ||||||
|         paths = paths or [] |  | ||||||
|         ignore_paths = ignore_paths or [] |  | ||||||
|  |  | ||||||
|         self.logger.debug('Discovering plugins in paths') |  | ||||||
|         for path in paths: |  | ||||||
|             self.logger.debug('Checking path %s', path) |  | ||||||
|             if os.path.isfile(path): |  | ||||||
|                 self._discover_from_file(path) |  | ||||||
|             for root, _, files in os.walk(path, followlinks=True): |  | ||||||
|                 should_skip = False |  | ||||||
|                 for igpath in ignore_paths: |  | ||||||
|                     if root.startswith(igpath): |  | ||||||
|                         should_skip = True |  | ||||||
|                         break |  | ||||||
|                 if should_skip: |  | ||||||
|                     continue |  | ||||||
|                 for fname in files: |  | ||||||
|                     if os.path.splitext(fname)[1].lower() != '.py': |  | ||||||
|                         continue |  | ||||||
|                     filepath = os.path.join(root, fname) |  | ||||||
|                     self._discover_from_file(filepath) |  | ||||||
|  |  | ||||||
|     def _discover_from_file(self, filepath): |  | ||||||
|         try: |  | ||||||
|             modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS) |  | ||||||
|             module = imp.load_source(modname, filepath) |  | ||||||
|             self._discover_in_module(module) |  | ||||||
|         except (SystemExit, ImportError), e: |  | ||||||
|             if self.keep_going: |  | ||||||
|                 self.logger.warning('Failed to load {}'.format(filepath)) |  | ||||||
|                 self.logger.warning('Got: {}'.format(e)) |  | ||||||
|             else: |  | ||||||
|                 raise LoaderError('Failed to load {}'.format(filepath), sys.exc_info()) |  | ||||||
|         except Exception as e: |  | ||||||
|             message = 'Problem loading plugins from {}: {}' |  | ||||||
|             raise LoaderError(message.format(filepath, e)) |  | ||||||
|  |  | ||||||
|     def _discover_in_module(self, module):  # NOQA pylint: disable=too-many-branches |  | ||||||
|         self.logger.debug('Checking module %s', module.__name__) |  | ||||||
|         #log.indent() |  | ||||||
|         try: |  | ||||||
|             for obj in vars(module).itervalues(): |  | ||||||
|                 if inspect.isclass(obj): |  | ||||||
|                     if not issubclass(obj, Plugin): |  | ||||||
|                         continue |  | ||||||
|                     if not obj.kind: |  | ||||||
|                         message = 'Skipping plugin {} as it does not define a kind' |  | ||||||
|                         self.logger.debug(message.format(obj.__name__)) |  | ||||||
|                         continue |  | ||||||
|                     if not obj.name: |  | ||||||
|                         message = 'Skipping {} {} as it does not define a name' |  | ||||||
|                         self.logger.debug(message.format(obj.kind, obj.__name__)) |  | ||||||
|                         continue |  | ||||||
|                     try: |  | ||||||
|                         self._add_found_plugin(obj) |  | ||||||
|                     except LoaderError as e: |  | ||||||
|                         if self.keep_going: |  | ||||||
|                             self.logger.warning(e) |  | ||||||
|                         else: |  | ||||||
|                             raise e |  | ||||||
|         finally: |  | ||||||
|             # log.dedent() |  | ||||||
|             pass |  | ||||||
|  |  | ||||||
|     def _add_found_plugin(self, obj): |  | ||||||
|         """ |  | ||||||
|             :obj: Found plugin class |  | ||||||
|             :ext: matching plugin item. |  | ||||||
|         """ |  | ||||||
|         self.logger.debug('Adding %s %s', obj.kind, obj.name) |  | ||||||
|         key = identifier(obj.name.lower()) |  | ||||||
|         if key in self.plugins or key in self.aliases: |  | ||||||
|             raise LoaderError('{} "{}" already exists.'.format(obj.kind, obj.name)) |  | ||||||
|         # plugins are tracked both, in a common plugins |  | ||||||
|         # dict, and in per-plugin kind dict (as retrieving |  | ||||||
|         # plugins by kind is a common use case. |  | ||||||
|         self.plugins[key] = obj |  | ||||||
|         self.kind_map[obj.kind][key] = obj |  | ||||||
|  |  | ||||||
|         for alias in obj.aliases: |  | ||||||
|             alias_id = identifier(alias.name.lower()) |  | ||||||
|             if alias_id in self.plugins or alias_id in self.aliases: |  | ||||||
|                 raise LoaderError('{} "{}" already exists.'.format(obj.kind, obj.name)) |  | ||||||
|             self.aliases[alias_id] = alias |  | ||||||
| @@ -1,89 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
| import sys |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class __LoaderWrapper(object): |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def kinds(self): |  | ||||||
|         if not self._loader: |  | ||||||
|             self.reset() |  | ||||||
|         return self._loader.kind_map.keys() |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def kind_map(self): |  | ||||||
|         if not self._loader: |  | ||||||
|             self.reset() |  | ||||||
|         return self._loader.kind_map |  | ||||||
|  |  | ||||||
|     def __init__(self): |  | ||||||
|         self._loader = None |  | ||||||
|  |  | ||||||
|     def reset(self): |  | ||||||
|         # These imports cannot be done at top level, because of |  | ||||||
|         # sys.modules manipulation below |  | ||||||
|         from wlauto.core.plugin import PluginLoader |  | ||||||
|         from wlauto.core.configuration import settings |  | ||||||
|         self._loader = PluginLoader(settings.plugin_packages, |  | ||||||
|                                     [settings.plugins_directory], []) |  | ||||||
|  |  | ||||||
|     def update(self, packages=None, paths=None, ignore_paths=None): |  | ||||||
|         if not self._loader: |  | ||||||
|             self.reset() |  | ||||||
|         self._loader.update(packages, paths, ignore_paths) |  | ||||||
|  |  | ||||||
|     def reload(self): |  | ||||||
|         if not self._loader: |  | ||||||
|             self.reset() |  | ||||||
|         self._loader.reload() |  | ||||||
|  |  | ||||||
|     def list_plugins(self, kind=None): |  | ||||||
|         if not self._loader: |  | ||||||
|             self.reset() |  | ||||||
|         return self._loader.list_plugins(kind) |  | ||||||
|  |  | ||||||
|     def has_plugin(self, name, kind=None): |  | ||||||
|         if not self._loader: |  | ||||||
|             self.reset() |  | ||||||
|         return self._loader.has_plugin(name, kind) |  | ||||||
|  |  | ||||||
|     def get_plugin_class(self, name, kind=None): |  | ||||||
|         if not self._loader: |  | ||||||
|             self.reset() |  | ||||||
|         return self._loader.get_plugin_class(name, kind) |  | ||||||
|  |  | ||||||
|     def get_plugin(self, name=None, kind=None, *args, **kwargs): |  | ||||||
|         if not self._loader: |  | ||||||
|             self.reset() |  | ||||||
|         return self._loader.get_plugin(name=name, kind=kind, *args, **kwargs) |  | ||||||
|  |  | ||||||
|     def get_default_config(self, name): |  | ||||||
|         if not self._loader: |  | ||||||
|             self.reset() |  | ||||||
|         return self._loader.get_default_config(name) |  | ||||||
|  |  | ||||||
|     def resolve_alias(self, name): |  | ||||||
|         if not self._loader: |  | ||||||
|             self.reset() |  | ||||||
|         return self._loader.resolve_alias(name) |  | ||||||
|  |  | ||||||
|     def __getattr__(self, name): |  | ||||||
|         if not self._loader: |  | ||||||
|             self.reset() |  | ||||||
|         return getattr(self._loader, name) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| sys.modules[__name__] = __LoaderWrapper() |  | ||||||
| @@ -1,111 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| Defines infrastructure for resource resolution. This is used to find |  | ||||||
| various dependencies/assets/etc that WA objects rely on in a flexible way. |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| import logging |  | ||||||
| from collections import defaultdict |  | ||||||
|  |  | ||||||
| # Note: this is the modified louie library in wlauto/external. |  | ||||||
| #       prioritylist does not exist in vanilla louie. |  | ||||||
| from wlauto.utils.types import prioritylist  # pylint: disable=E0611,F0401 |  | ||||||
|  |  | ||||||
| from wlauto.exceptions import ResourceError |  | ||||||
| from wlauto.core import pluginloader |  | ||||||
|  |  | ||||||
| class ResourceResolver(object): |  | ||||||
|     """ |  | ||||||
|     Discovers and registers getters, and then handles requests for |  | ||||||
|     resources using registered getters. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, config): |  | ||||||
|         self.logger = logging.getLogger(self.__class__.__name__) |  | ||||||
|         self.getters = defaultdict(prioritylist) |  | ||||||
|         self.config = config |  | ||||||
|  |  | ||||||
|     def load(self): |  | ||||||
|         """ |  | ||||||
|         Discover getters under the specified source. The source could |  | ||||||
|         be either a python package/module or a path. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|  |  | ||||||
|         for rescls in pluginloader.list_resource_getters(): |  | ||||||
|             getter = self.config.get_plugin(name=rescls.name, kind="resource_getter", resolver=self) |  | ||||||
|             getter.register() |  | ||||||
|  |  | ||||||
|     def get(self, resource, strict=True, *args, **kwargs): |  | ||||||
|         """ |  | ||||||
|         Uses registered getters to attempt to discover a resource of the specified |  | ||||||
|         kind and matching the specified criteria. Returns path to the resource that |  | ||||||
|         has been discovered. If a resource has not been discovered, this will raise |  | ||||||
|         a ``ResourceError`` or, if ``strict`` has been set to ``False``, will return |  | ||||||
|         ``None``. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         self.logger.debug('Resolving {}'.format(resource)) |  | ||||||
|         for getter in self.getters[resource.name]: |  | ||||||
|             self.logger.debug('Trying {}'.format(getter)) |  | ||||||
|             result = getter.get(resource, *args, **kwargs) |  | ||||||
|             if result is not None: |  | ||||||
|                 self.logger.debug('Resource {} found using {}:'.format(resource, getter)) |  | ||||||
|                 self.logger.debug('\t{}'.format(result)) |  | ||||||
|                 return result |  | ||||||
|         if strict: |  | ||||||
|             raise ResourceError('{} could not be found'.format(resource)) |  | ||||||
|         self.logger.debug('Resource {} not found.'.format(resource)) |  | ||||||
|         return None |  | ||||||
|  |  | ||||||
|     def register(self, getter, kind, priority=0): |  | ||||||
|         """ |  | ||||||
|         Register the specified resource getter as being able to discover a resource |  | ||||||
|         of the specified kind with the specified priority. |  | ||||||
|  |  | ||||||
|         This method would typically be invoked by a getter inside its __init__. |  | ||||||
|         The idea being that getters register themselves for resources they know |  | ||||||
|         they can discover. |  | ||||||
|  |  | ||||||
|         *priorities* |  | ||||||
|  |  | ||||||
|         getters that are registered with the highest priority will be invoked first. If |  | ||||||
|         multiple getters are registered under the same priority, they will be invoked |  | ||||||
|         in the order they were registered (i.e. in the order they were discovered). This is |  | ||||||
|         essentially non-deterministic. |  | ||||||
|  |  | ||||||
|         Generally getters that are more likely to find a resource, or would find a |  | ||||||
|         "better" version of the resource should register with higher (positive) priorities. |  | ||||||
|         Fall-back getters that should only be invoked if a resource is not found by usual |  | ||||||
|         means should register with lower (negative) priorities. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         self.logger.debug('Registering {} for {} resources'.format(getter.name, kind)) |  | ||||||
|         self.getters[kind].add(getter, priority) |  | ||||||
|  |  | ||||||
|     def unregister(self, getter, kind): |  | ||||||
|         """ |  | ||||||
|         Unregister a getter that has been registered earlier. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         self.logger.debug('Unregistering {}'.format(getter.name)) |  | ||||||
|         try: |  | ||||||
|             self.getters[kind].remove(getter) |  | ||||||
|         except ValueError: |  | ||||||
|             raise ValueError('Resource getter {} is not installed.'.format(getter.name)) |  | ||||||
| @@ -1,185 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
| from wlauto.core.configuration import settings |  | ||||||
| from wlauto.core.plugin import Plugin |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class GetterPriority(object): |  | ||||||
|     """ |  | ||||||
|     Enumerates standard ResourceGetter priorities. In general, getters should register |  | ||||||
|     under one of these, rather than specifying other priority values. |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     :cached: The cached version of the resource. Look here first. This priority also implies |  | ||||||
|              that the resource at this location is a "cache" and is not the only version of the |  | ||||||
|              resource, so it may be cleared without losing access to the resource. |  | ||||||
|     :preferred: Take this resource in favour of the environment resource. |  | ||||||
|     :environment: Found somewhere under ~/.workload_automation/ or equivalent, or |  | ||||||
|                     from environment variables, external configuration files, etc. |  | ||||||
|                     These will override resource supplied with the package. |  | ||||||
|     :external_package: Resource provided by another package. |  | ||||||
|     :package: Resource provided with the package. |  | ||||||
|     :remote: Resource will be downloaded from a remote location (such as an HTTP server |  | ||||||
|                 or a samba share). Try this only if no other getter was successful. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     cached = 20 |  | ||||||
|     preferred = 10 |  | ||||||
|     remote = 5 |  | ||||||
|     environment = 0 |  | ||||||
|     external_package = -5 |  | ||||||
|     package = -10 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Resource(object): |  | ||||||
|     """ |  | ||||||
|     Represents a resource that needs to be resolved. This can be pretty much |  | ||||||
|     anything: a file, environment variable, a Python object, etc. The only thing |  | ||||||
|     a resource *has* to have is an owner (which would normally be the |  | ||||||
|     Workload/Instrument/Device/etc object that needs the resource). In addition, |  | ||||||
|     a resource have any number of attributes to identify, but all of them are resource |  | ||||||
|     type specific. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     name = None |  | ||||||
|  |  | ||||||
|     def __init__(self, owner): |  | ||||||
|         self.owner = owner |  | ||||||
|  |  | ||||||
|     def delete(self, instance): |  | ||||||
|         """ |  | ||||||
|         Delete an instance of this resource type. This must be implemented by the concrete |  | ||||||
|         subclasses based on what the resource looks like, e.g. deleting a file or a directory |  | ||||||
|         tree, or removing an entry from a database. |  | ||||||
|  |  | ||||||
|         :note: Implementation should *not* contain any logic for deciding whether or not |  | ||||||
|                a resource should be deleted, only the actual deletion. The assumption is |  | ||||||
|                that if this method is invoked, then the decision has already been made. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         raise NotImplementedError() |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '<{}\'s {}>'.format(self.owner, self.name) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ResourceGetter(Plugin): |  | ||||||
|     """ |  | ||||||
|     Base class for implementing resolvers. Defines resolver interface. Resolvers are |  | ||||||
|     responsible for discovering resources (such as particular kinds of files) they know |  | ||||||
|     about based on the parameters that are passed to them. Each resolver also has a dict of |  | ||||||
|     attributes that describe its operation, and may be used to determine which get invoked. |  | ||||||
|     There is no pre-defined set of attributes and resolvers may define their own. |  | ||||||
|  |  | ||||||
|     Class attributes: |  | ||||||
|  |  | ||||||
|     :name: Name that uniquely identifies this getter. Must be set by any concrete subclass. |  | ||||||
|     :resource_type: Identifies resource type(s) that this getter can handle. This must |  | ||||||
|                     be either a string (for a single type) or a list of strings for |  | ||||||
|                     multiple resource types. This must be set by any concrete subclass. |  | ||||||
|     :priority: Priority with which this getter will be invoked. This should be one of |  | ||||||
|                 the standard priorities specified in ``GetterPriority`` enumeration. If not |  | ||||||
|                 set, this will default to ``GetterPriority.environment``. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     kind = "resource_getter" |  | ||||||
|     name = None |  | ||||||
|     resource_type = None |  | ||||||
|     priority = GetterPriority.environment |  | ||||||
|  |  | ||||||
|     def __init__(self, resolver=None, **kwargs): |  | ||||||
|         super(ResourceGetter, self).__init__(**kwargs) |  | ||||||
|         self.resolver = resolver |  | ||||||
|  |  | ||||||
|     def register(self): |  | ||||||
|         """ |  | ||||||
|         Registers with a resource resolver. Concrete implementations must override this |  | ||||||
|         to invoke ``self.resolver.register()`` method to register ``self`` for specific |  | ||||||
|         resource types. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         if self.resource_type is None: |  | ||||||
|             raise ValueError('No resource type specified for {}'.format(self.name)) |  | ||||||
|         elif isinstance(self.resource_type, list): |  | ||||||
|             for rt in self.resource_type: |  | ||||||
|                 self.resolver.register(self, rt, self.priority) |  | ||||||
|         else: |  | ||||||
|             self.resolver.register(self, self.resource_type, self.priority) |  | ||||||
|  |  | ||||||
|     def unregister(self): |  | ||||||
|         """Unregister from a resource resolver.""" |  | ||||||
|         if self.resource_type is None: |  | ||||||
|             raise ValueError('No resource type specified for {}'.format(self.name)) |  | ||||||
|         elif isinstance(self.resource_type, list): |  | ||||||
|             for rt in self.resource_type: |  | ||||||
|                 self.resolver.unregister(self, rt) |  | ||||||
|         else: |  | ||||||
|             self.resolver.unregister(self, self.resource_type) |  | ||||||
|  |  | ||||||
|     def get(self, resource, **kwargs): |  | ||||||
|         """ |  | ||||||
|         This will get invoked by the resolver when attempting to resolve a resource, passing |  | ||||||
|         in the resource to be resolved as the first parameter. Any additional parameters would |  | ||||||
|         be specific to a particular resource type. |  | ||||||
|  |  | ||||||
|         This method will only be invoked for resource types that the getter has registered for. |  | ||||||
|  |  | ||||||
|         :param resource: an instance of :class:`wlauto.core.resource.Resource`. |  | ||||||
|  |  | ||||||
|         :returns: Implementations of this method must return either the discovered resource or |  | ||||||
|                   ``None`` if the resource could not be discovered. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         raise NotImplementedError() |  | ||||||
|  |  | ||||||
|     def delete(self, resource, *args, **kwargs): |  | ||||||
|         """ |  | ||||||
|         Delete the resource if it is discovered. All arguments are passed to a call |  | ||||||
|         to``self.get()``. If that call returns a resource, it is deleted. |  | ||||||
|  |  | ||||||
|         :returns: ``True`` if the specified resource has been discovered and deleted, |  | ||||||
|                   and ``False`` otherwise. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         discovered = self.get(resource, *args, **kwargs) |  | ||||||
|         if discovered: |  | ||||||
|             resource.delete(discovered) |  | ||||||
|             return True |  | ||||||
|         else: |  | ||||||
|             return False |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '<ResourceGetter {}>'.format(self.name) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class __NullOwner(object): |  | ||||||
|     """Represents an owner for a resource not owned by anyone.""" |  | ||||||
|  |  | ||||||
|     name = 'noone' |  | ||||||
|     dependencies_directory = settings.dependencies_directory |  | ||||||
|  |  | ||||||
|     def __getattr__(self, name): |  | ||||||
|         return None |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return 'no-one' |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| NO_ONE = __NullOwner() |  | ||||||
| @@ -1,319 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
| # pylint: disable=no-member |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| This module defines the classes used to handle result |  | ||||||
| processing inside Workload Automation. There will be a |  | ||||||
| :class:`wlauto.core.workload.WorkloadResult` object generated for |  | ||||||
| every workload iteration executed. This object will have a list of |  | ||||||
| :class:`wlauto.core.workload.WorkloadMetric` objects. This list will be |  | ||||||
| populated by the workload itself and may also be updated by instrumentation |  | ||||||
| (e.g. to add power measurements).  Once the result object has been fully |  | ||||||
| populated, it will be passed into the ``process_iteration_result`` method of |  | ||||||
| :class:`ResultProcessor`. Once the entire run has completed, a list containing |  | ||||||
| result objects from all iterations will be passed into ``process_results`` |  | ||||||
| method of :class`ResultProcessor`. |  | ||||||
|  |  | ||||||
| Which result processors will be active is defined by the ``result_processors`` |  | ||||||
| list in the ``~/.workload_automation/config.py``. Only the result_processors |  | ||||||
| who's names appear in this list will be used. |  | ||||||
|  |  | ||||||
| A :class:`ResultsManager`  keeps track of active results processors. |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| import logging |  | ||||||
| import traceback |  | ||||||
| from copy import copy |  | ||||||
| from contextlib import contextmanager |  | ||||||
| from datetime import datetime |  | ||||||
|  |  | ||||||
| from wlauto.core.plugin import Plugin |  | ||||||
| from wlauto.core.configuration.configuration import ITERATION_STATUS |  | ||||||
| from wlauto.exceptions import WAError |  | ||||||
| from wlauto.utils.types import numeric |  | ||||||
| from wlauto.utils.misc import enum_metaclass, merge_dicts_simple |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ResultManager(object): |  | ||||||
|     """ |  | ||||||
|     Keeps track of result processors and passes on the results onto the individual processors. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self): |  | ||||||
|         self.logger = logging.getLogger('ResultsManager') |  | ||||||
|         self.processors = [] |  | ||||||
|         self._bad = [] |  | ||||||
|  |  | ||||||
|     def install(self, processor): |  | ||||||
|         self.logger.debug('Installing results processor %s', processor.name) |  | ||||||
|         self.processors.append(processor) |  | ||||||
|  |  | ||||||
|     def uninstall(self, processor): |  | ||||||
|         if processor in self.processors: |  | ||||||
|             self.logger.debug('Uninstalling results processor %s', processor.name) |  | ||||||
|             self.processors.remove(processor) |  | ||||||
|         else: |  | ||||||
|             self.logger.warning('Attempting to uninstall results processor %s, which is not installed.', |  | ||||||
|                                 processor.name) |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         # Errors aren't handled at this stage, because this gets executed |  | ||||||
|         # before workload execution starts and we just want to propagte them |  | ||||||
|         # and terminate (so that error can be corrected and WA restarted). |  | ||||||
|         for processor in self.processors: |  | ||||||
|             processor.initialize(context) |  | ||||||
|  |  | ||||||
|     def add_result(self, result, context): |  | ||||||
|         with self._manage_processors(context): |  | ||||||
|             for processor in self.processors: |  | ||||||
|                 with self._handle_errors(processor): |  | ||||||
|                     processor.process_iteration_result(result, context) |  | ||||||
|             for processor in self.processors: |  | ||||||
|                 with self._handle_errors(processor): |  | ||||||
|                     processor.export_iteration_result(result, context) |  | ||||||
|  |  | ||||||
|     def process_run_result(self, result, context): |  | ||||||
|         with self._manage_processors(context): |  | ||||||
|             for processor in self.processors: |  | ||||||
|                 with self._handle_errors(processor): |  | ||||||
|                     processor.process_run_result(result, context) |  | ||||||
|             for processor in self.processors: |  | ||||||
|                 with self._handle_errors(processor): |  | ||||||
|                     processor.export_run_result(result, context) |  | ||||||
|  |  | ||||||
|     def finalize(self, context): |  | ||||||
|         with self._manage_processors(context): |  | ||||||
|             for processor in self.processors: |  | ||||||
|                 with self._handle_errors(processor): |  | ||||||
|                     processor.finalize(context) |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         for processor in self.processors: |  | ||||||
|             processor.validate() |  | ||||||
|  |  | ||||||
|     @contextmanager |  | ||||||
|     def _manage_processors(self, context, finalize_bad=True): |  | ||||||
|         yield |  | ||||||
|         for processor in self._bad: |  | ||||||
|             if finalize_bad: |  | ||||||
|                 processor.finalize(context) |  | ||||||
|             self.uninstall(processor) |  | ||||||
|         self._bad = [] |  | ||||||
|  |  | ||||||
|     @contextmanager |  | ||||||
|     def _handle_errors(self, processor): |  | ||||||
|         try: |  | ||||||
|             yield |  | ||||||
|         except KeyboardInterrupt, e: |  | ||||||
|             raise e |  | ||||||
|         except WAError, we: |  | ||||||
|             self.logger.error('"{}" result processor has encountered an error'.format(processor.name)) |  | ||||||
|             self.logger.error('{}("{}")'.format(we.__class__.__name__, we.message)) |  | ||||||
|             self._bad.append(processor) |  | ||||||
|         except Exception, e:  # pylint: disable=W0703 |  | ||||||
|             self.logger.error('"{}" result processor has encountered an error'.format(processor.name)) |  | ||||||
|             self.logger.error('{}("{}")'.format(e.__class__.__name__, e)) |  | ||||||
|             self.logger.error(traceback.format_exc()) |  | ||||||
|             self._bad.append(processor) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ResultProcessor(Plugin): |  | ||||||
|     """ |  | ||||||
|     Base class for result processors. Defines an interface that should be implemented |  | ||||||
|     by the subclasses. A result processor can be used to do any kind of post-processing |  | ||||||
|     of the results, from writing them out to a file, to uploading them to a database, |  | ||||||
|     performing calculations, generating plots, etc. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     kind = "result_processor" |  | ||||||
|     def initialize(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def process_iteration_result(self, result, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def export_iteration_result(self, result, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def process_run_result(self, result, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def export_run_result(self, result, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def finalize(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class RunResult(object): |  | ||||||
|     """ |  | ||||||
|     Contains overall results for a run. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     __metaclass__ = enum_metaclass('values', return_name=True) |  | ||||||
|  |  | ||||||
|     values = [ |  | ||||||
|         'OK', |  | ||||||
|         'OKISH', |  | ||||||
|         'PARTIAL', |  | ||||||
|         'FAILED', |  | ||||||
|         'UNKNOWN', |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def status(self): |  | ||||||
|         if not self.iteration_results or all([s.status == IterationResult.FAILED for s in self.iteration_results]): |  | ||||||
|             return self.FAILED |  | ||||||
|         elif any([s.status == IterationResult.FAILED for s in self.iteration_results]): |  | ||||||
|             return self.PARTIAL |  | ||||||
|         elif any([s.status == IterationResult.ABORTED for s in self.iteration_results]): |  | ||||||
|             return self.PARTIAL |  | ||||||
|         elif (any([s.status == IterationResult.PARTIAL for s in self.iteration_results]) or |  | ||||||
|                 self.non_iteration_errors): |  | ||||||
|             return self.OKISH |  | ||||||
|         elif all([s.status == IterationResult.OK for s in self.iteration_results]): |  | ||||||
|             return self.OK |  | ||||||
|         else: |  | ||||||
|             return self.UNKNOWN  # should never happen |  | ||||||
|  |  | ||||||
|     def __init__(self, run_info, output_directory=None): |  | ||||||
|         self.info = run_info |  | ||||||
|         self.iteration_results = [] |  | ||||||
|         self.artifacts = [] |  | ||||||
|         self.events = [] |  | ||||||
|         self.non_iteration_errors = False |  | ||||||
|         self.output_directory = output_directory |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class RunEvent(object): |  | ||||||
|     """ |  | ||||||
|     An event that occured during a run. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     def __init__(self, message): |  | ||||||
|         self.timestamp = datetime.utcnow() |  | ||||||
|         self.message = message |  | ||||||
|  |  | ||||||
|     def to_dict(self): |  | ||||||
|         return copy(self.__dict__) |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '{} {}'.format(self.timestamp, self.message) |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class IterationResult(object): |  | ||||||
|     """ |  | ||||||
|     Contains the result of running a single iteration of a workload. It is the |  | ||||||
|     responsibility of a workload to instantiate a IterationResult, populate it, |  | ||||||
|     and return it form its get_result() method. |  | ||||||
|  |  | ||||||
|     Status explanations: |  | ||||||
|  |  | ||||||
|        :NOT_STARTED: This iteration has not yet started. |  | ||||||
|        :RUNNING: This iteration is currently running and no errors have been detected. |  | ||||||
|        :OK: This iteration has completed and no errors have been detected |  | ||||||
|        :PARTIAL: One or more instruments have failed (the iteration may still be running). |  | ||||||
|        :FAILED: The workload itself has failed. |  | ||||||
|        :ABORTED: The user interupted the workload |  | ||||||
|        :SKIPPED: The iteration was skipped due to a previous failure |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     __metaclass__ = enum_metaclass('values', return_name=True) |  | ||||||
|  |  | ||||||
|     values = ITERATION_STATUS |  | ||||||
|  |  | ||||||
|     def __init__(self, spec): |  | ||||||
|         self.spec = spec |  | ||||||
|         self.id = spec.id |  | ||||||
|         self.workload = spec.workload |  | ||||||
|         self.classifiers = copy(spec.classifiers) |  | ||||||
|         self.iteration = None |  | ||||||
|         self.status = self.NOT_STARTED |  | ||||||
|         self.output_directory = None |  | ||||||
|         self.events = [] |  | ||||||
|         self.metrics = [] |  | ||||||
|         self.artifacts = [] |  | ||||||
|  |  | ||||||
|     def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None): |  | ||||||
|         self.metrics.append(Metric(name, value, units, lower_is_better, |  | ||||||
|                                    merge_dicts_simple(self.classifiers, classifiers))) |  | ||||||
|  |  | ||||||
|     def has_metric(self, name): |  | ||||||
|         for metric in self.metrics: |  | ||||||
|             if metric.name == name: |  | ||||||
|                 return True |  | ||||||
|         return False |  | ||||||
|  |  | ||||||
|     def add_event(self, message): |  | ||||||
|         self.events.append(RunEvent(message)) |  | ||||||
|  |  | ||||||
|     def to_dict(self): |  | ||||||
|         d = copy(self.__dict__) |  | ||||||
|         d['events'] = [e.to_dict() for e in self.events] |  | ||||||
|         return d |  | ||||||
|  |  | ||||||
|     def __iter__(self): |  | ||||||
|         return iter(self.metrics) |  | ||||||
|  |  | ||||||
|     def __getitem__(self, name): |  | ||||||
|         for metric in self.metrics: |  | ||||||
|             if metric.name == name: |  | ||||||
|                 return metric |  | ||||||
|         raise KeyError('Metric {} not found.'.format(name)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Metric(object): |  | ||||||
|     """ |  | ||||||
|     This is a single metric collected from executing a workload. |  | ||||||
|  |  | ||||||
|     :param name: the name of the metric. Uniquely identifies the metric |  | ||||||
|                  within the results. |  | ||||||
|     :param value: The numerical value of the metric for this execution of |  | ||||||
|                   a workload. This can be either an int or a float. |  | ||||||
|     :param units: Units for the collected value. Can be None if the value |  | ||||||
|                   has no units (e.g. it's a count or a standardised score). |  | ||||||
|     :param lower_is_better: Boolean flag indicating where lower values are |  | ||||||
|                             better than higher ones. Defaults to False. |  | ||||||
|     :param classifiers: A set of key-value pairs to further classify this metric |  | ||||||
|                         beyond current iteration (e.g. this can be used to identify |  | ||||||
|                         sub-tests). |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None): |  | ||||||
|         self.name = name |  | ||||||
|         self.value = numeric(value) |  | ||||||
|         self.units = units |  | ||||||
|         self.lower_is_better = lower_is_better |  | ||||||
|         self.classifiers = classifiers or {} |  | ||||||
|  |  | ||||||
|     def to_dict(self): |  | ||||||
|         return self.__dict__ |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         result = '{}: {}'.format(self.name, self.value) |  | ||||||
|         if self.units: |  | ||||||
|             result += ' ' + self.units |  | ||||||
|         result += ' ({})'.format('-' if self.lower_is_better else '+') |  | ||||||
|         return '<{}>'.format(result) |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
| @@ -1,272 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| This module wraps louie signalling mechanism. It relies on modified version of loiue |  | ||||||
| that has prioritization added to handler invocation. |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| import logging |  | ||||||
| from contextlib import contextmanager |  | ||||||
|  |  | ||||||
| from louie import dispatcher |  | ||||||
|  |  | ||||||
| from wlauto.utils.types import prioritylist |  | ||||||
|  |  | ||||||
|  |  | ||||||
| logger = logging.getLogger('dispatcher') |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Signal(object): |  | ||||||
|     """ |  | ||||||
|     This class implements the signals to be used for notifiying callbacks |  | ||||||
|     registered to respond to different states and stages of the execution of workload |  | ||||||
|     automation. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, name, description='no description', invert_priority=False): |  | ||||||
|         """ |  | ||||||
|         Instantiates a Signal. |  | ||||||
|  |  | ||||||
|             :param name: name is the identifier of the Signal object. Signal instances with |  | ||||||
|                         the same name refer to the same execution stage/stage. |  | ||||||
|             :param invert_priority: boolean parameter that determines whether multiple |  | ||||||
|                                     callbacks for the same signal should be ordered with |  | ||||||
|                                     ascending or descending priorities. Typically this flag |  | ||||||
|                                     should be set to True if the Signal is triggered AFTER an |  | ||||||
|                                     a state/stage has been reached. That way callbacks with high |  | ||||||
|                                     priorities will be called right after the event has occured. |  | ||||||
|         """ |  | ||||||
|         self.name = name |  | ||||||
|         self.description = description |  | ||||||
|         self.invert_priority = invert_priority |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return self.name |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|     def __hash__(self): |  | ||||||
|         return id(self.name) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # These are paired events -- if the before_event is sent, the after_ signal is |  | ||||||
| # guaranteed to also be sent. In particular, the after_ signals will be sent |  | ||||||
| # even if there is an error, so you cannot assume in the handler that the |  | ||||||
| # device has booted successfully. In most cases, you should instead use the |  | ||||||
| # non-paired signals below. |  | ||||||
| BEFORE_FLASHING = Signal('before-flashing-signal', invert_priority=True) |  | ||||||
| SUCCESSFUL_FLASHING = Signal('successful-flashing-signal') |  | ||||||
| AFTER_FLASHING = Signal('after-flashing-signal') |  | ||||||
|  |  | ||||||
| BEFORE_BOOT = Signal('before-boot-signal', invert_priority=True) |  | ||||||
| SUCCESSFUL_BOOT = Signal('successful-boot-signal') |  | ||||||
| AFTER_BOOT = Signal('after-boot-signal') |  | ||||||
|  |  | ||||||
| BEFORE_INITIAL_BOOT = Signal('before-initial-boot-signal', invert_priority=True) |  | ||||||
| SUCCESSFUL_INITIAL_BOOT = Signal('successful-initial-boot-signal') |  | ||||||
| AFTER_INITIAL_BOOT = Signal('after-initial-boot-signal') |  | ||||||
|  |  | ||||||
| BEFORE_FIRST_ITERATION_BOOT = Signal('before-first-iteration-boot-signal', invert_priority=True) |  | ||||||
| SUCCESSFUL_FIRST_ITERATION_BOOT = Signal('successful-first-iteration-boot-signal') |  | ||||||
| AFTER_FIRST_ITERATION_BOOT = Signal('after-first-iteration-boot-signal') |  | ||||||
|  |  | ||||||
| BEFORE_WORKLOAD_SETUP = Signal('before-workload-setup-signal', invert_priority=True) |  | ||||||
| SUCCESSFUL_WORKLOAD_SETUP = Signal('successful-workload-setup-signal') |  | ||||||
| AFTER_WORKLOAD_SETUP = Signal('after-workload-setup-signal') |  | ||||||
|  |  | ||||||
| BEFORE_WORKLOAD_EXECUTION = Signal('before-workload-execution-signal', invert_priority=True) |  | ||||||
| SUCCESSFUL_WORKLOAD_EXECUTION = Signal('successful-workload-execution-signal') |  | ||||||
| AFTER_WORKLOAD_EXECUTION = Signal('after-workload-execution-signal') |  | ||||||
|  |  | ||||||
| BEFORE_WORKLOAD_RESULT_UPDATE = Signal('before-iteration-result-update-signal', invert_priority=True) |  | ||||||
| SUCCESSFUL_WORKLOAD_RESULT_UPDATE = Signal('successful-iteration-result-update-signal') |  | ||||||
| AFTER_WORKLOAD_RESULT_UPDATE = Signal('after-iteration-result-update-signal') |  | ||||||
|  |  | ||||||
| BEFORE_WORKLOAD_TEARDOWN = Signal('before-workload-teardown-signal', invert_priority=True) |  | ||||||
| SUCCESSFUL_WORKLOAD_TEARDOWN = Signal('successful-workload-teardown-signal') |  | ||||||
| AFTER_WORKLOAD_TEARDOWN = Signal('after-workload-teardown-signal') |  | ||||||
|  |  | ||||||
| BEFORE_OVERALL_RESULTS_PROCESSING = Signal('before-overall-results-process-signal', invert_priority=True) |  | ||||||
| SUCCESSFUL_OVERALL_RESULTS_PROCESSING = Signal('successful-overall-results-process-signal') |  | ||||||
| AFTER_OVERALL_RESULTS_PROCESSING = Signal('after-overall-results-process-signal') |  | ||||||
|  |  | ||||||
| # These are the not-paired signals; they are emitted independently. E.g. the |  | ||||||
| # fact that RUN_START was emitted does not mean run end will be. |  | ||||||
| RUN_START = Signal('start-signal', invert_priority=True) |  | ||||||
| RUN_END = Signal('end-signal') |  | ||||||
| WORKLOAD_SPEC_START = Signal('workload-spec-start-signal', invert_priority=True) |  | ||||||
| WORKLOAD_SPEC_END = Signal('workload-spec-end-signal') |  | ||||||
| ITERATION_START = Signal('iteration-start-signal', invert_priority=True) |  | ||||||
| ITERATION_END = Signal('iteration-end-signal') |  | ||||||
|  |  | ||||||
| RUN_INIT = Signal('run-init-signal') |  | ||||||
| SPEC_INIT = Signal('spec-init-signal') |  | ||||||
| ITERATION_INIT = Signal('iteration-init-signal') |  | ||||||
|  |  | ||||||
| RUN_FIN = Signal('run-fin-signal') |  | ||||||
|  |  | ||||||
| # These signals are used by the LoggerFilter to tell about logging events |  | ||||||
| ERROR_LOGGED = Signal('error_logged') |  | ||||||
| WARNING_LOGGED = Signal('warning_logged') |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CallbackPriority(object): |  | ||||||
|  |  | ||||||
|     EXTREMELY_HIGH = 30 |  | ||||||
|     VERY_HIGH = 20 |  | ||||||
|     HIGH = 10 |  | ||||||
|     NORMAL = 0 |  | ||||||
|     LOW = -10 |  | ||||||
|     VERY_LOW = -20 |  | ||||||
|     EXTREMELY_LOW = -30 |  | ||||||
|  |  | ||||||
|     def __init__(self): |  | ||||||
|         raise ValueError('Cannot instantiate') |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class _prioritylist_wrapper(prioritylist): |  | ||||||
|     """ |  | ||||||
|     This adds a NOP append() method so that when louie invokes it to add the |  | ||||||
|     handler to receivers, nothing will happen; the handler is actually added inside |  | ||||||
|     the connect() below according to priority, before louie's connect() gets invoked. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def append(self, *args, **kwargs): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def connect(handler, signal, sender=dispatcher.Any, priority=0): |  | ||||||
|     """ |  | ||||||
|     Connects a callback to a signal, so that the callback will be automatically invoked |  | ||||||
|     when that signal is sent. |  | ||||||
|  |  | ||||||
|     Parameters: |  | ||||||
|  |  | ||||||
|         :handler: This can be any callable that that takes the right arguments for |  | ||||||
|                   the signal. For most signals this means a single argument that |  | ||||||
|                   will be an ``ExecutionContext`` instance. But please see documentation |  | ||||||
|                   for individual signals in the :ref:`signals reference <instrumentation_method_map>`. |  | ||||||
|         :signal: The signal to which the handler will be subscribed. Please see |  | ||||||
|                  :ref:`signals reference <instrumentation_method_map>` for the list of standard WA |  | ||||||
|                  signals. |  | ||||||
|  |  | ||||||
|                  .. note:: There is nothing that prevents instrumentation from sending their |  | ||||||
|                            own signals that are not part of the standard set. However the signal |  | ||||||
|                            must always be an :class:`wlauto.core.signal.Signal` instance. |  | ||||||
|  |  | ||||||
|         :sender: The handler will be invoked only for the signals emitted by this sender. By |  | ||||||
|                  default, this is set to :class:`louie.dispatcher.Any`, so the handler will |  | ||||||
|                  be invoked for signals from any sender. |  | ||||||
|         :priority: An integer (positive or negative) the specifies the priority of the handler. |  | ||||||
|                    Handlers with higher priority will be called before handlers with lower |  | ||||||
|                    priority. The  call order of handlers with the same priority is not specified. |  | ||||||
|                    Defaults to 0. |  | ||||||
|  |  | ||||||
|                    .. note:: Priorities for some signals are inverted (so highest priority |  | ||||||
|                              handlers get executed last). Please see :ref:`signals reference <instrumentation_method_map>` |  | ||||||
|                              for details. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     if getattr(signal, 'invert_priority', False): |  | ||||||
|         priority = -priority |  | ||||||
|     senderkey = id(sender) |  | ||||||
|     if senderkey in dispatcher.connections: |  | ||||||
|         signals = dispatcher.connections[senderkey] |  | ||||||
|     else: |  | ||||||
|         dispatcher.connections[senderkey] = signals = {} |  | ||||||
|     if signal in signals: |  | ||||||
|         receivers = signals[signal] |  | ||||||
|     else: |  | ||||||
|         receivers = signals[signal] = _prioritylist_wrapper() |  | ||||||
|     receivers.add(handler, priority) |  | ||||||
|     dispatcher.connect(handler, signal, sender) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def disconnect(handler, signal, sender=dispatcher.Any): |  | ||||||
|     """ |  | ||||||
|     Disconnect a previously connected handler form the specified signal, optionally, only |  | ||||||
|     for the specified sender. |  | ||||||
|  |  | ||||||
|     Parameters: |  | ||||||
|  |  | ||||||
|         :handler: The callback to be disconnected. |  | ||||||
|         :signal: The signal the handler is to be disconnected form. It will |  | ||||||
|                  be an :class:`wlauto.core.signal.Signal` instance. |  | ||||||
|         :sender: If specified, the handler will only be disconnected from the signal |  | ||||||
|                 sent by this sender. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     dispatcher.disconnect(handler, signal, sender) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def send(signal, sender=dispatcher.Anonymous, *args, **kwargs): |  | ||||||
|     """ |  | ||||||
|     Sends a signal, causing connected handlers to be invoked. |  | ||||||
|  |  | ||||||
|     Paramters: |  | ||||||
|  |  | ||||||
|         :signal: Signal to be sent. This must be an instance of :class:`wlauto.core.signal.Signal` |  | ||||||
|                  or its subclasses. |  | ||||||
|         :sender: The sender of the signal (typically, this would be ``self``). Some handlers may only |  | ||||||
|                  be subscribed to signals from a particular sender. |  | ||||||
|  |  | ||||||
|         The rest of the parameters will be passed on as aruments to the handler. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     return dispatcher.send(signal, sender, *args, **kwargs) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # This will normally be set to log_error() by init_logging(); see wa.framework/log.py. |  | ||||||
| # Done this way to prevent a circular import dependency. |  | ||||||
| log_error_func = logger.error |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def safe_send(signal, sender=dispatcher.Anonymous, |  | ||||||
|               propagate=[KeyboardInterrupt], *args, **kwargs): |  | ||||||
|     """ |  | ||||||
|     Same as ``send``, except this will catch and log all exceptions raised |  | ||||||
|     by handlers, except those specified in ``propagate`` argument (defaults |  | ||||||
|     to just ``[KeyboardInterrupt]``). |  | ||||||
|     """ |  | ||||||
|     try: |  | ||||||
|         send(singnal, sender, *args, **kwargs) |  | ||||||
|     except Exception as e: |  | ||||||
|         if any(isinstance(e, p) for p in propagate): |  | ||||||
|             raise e |  | ||||||
|         log_error_func(e) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @contextmanager |  | ||||||
| def wrap(signal_name, sender=dispatcher.Anonymous, safe=False, *args, **kwargs): |  | ||||||
|     """Wraps the suite in before/after signals, ensuring |  | ||||||
|     that after signal is always sent.""" |  | ||||||
|     signal_name = signal_name.upper().replace('-', '_') |  | ||||||
|     send_func = safe_send if safe else send |  | ||||||
|     try: |  | ||||||
|         before_signal = globals()['BEFORE_' + signal_name] |  | ||||||
|         success_signal = globals()['SUCCESSFUL_' + signal_name] |  | ||||||
|         after_signal = globals()['AFTER_' + signal_name] |  | ||||||
|     except KeyError: |  | ||||||
|         raise ValueError('Invalid wrapped signal name: {}'.format(signal_name)) |  | ||||||
|     try: |  | ||||||
|         send_func(before_signal, sender, *args, **kwargs) |  | ||||||
|         yield |  | ||||||
|         send_func(success_signal, sender, *args, **kwargs) |  | ||||||
|     finally: |  | ||||||
|         send_func(after_signal, sender, *args, **kwargs) |  | ||||||
| @@ -1,26 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| from collections import namedtuple |  | ||||||
|  |  | ||||||
| VersionTuple = namedtuple('Version', ['major', 'minor', 'revision']) |  | ||||||
|  |  | ||||||
| version = VersionTuple(2, 4, 0) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_wa_version(): |  | ||||||
|     version_string = '{}.{}.{}'.format(version.major, version.minor, version.revision) |  | ||||||
|     return version_string |  | ||||||
| @@ -1,104 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| A workload is the unit of execution. It represents a set of activities are are performed |  | ||||||
| and measured together, as well as the necessary setup and teardown procedures. A single |  | ||||||
| execution of a workload produces one :class:`wlauto.core.result.WorkloadResult` that is populated with zero or more |  | ||||||
| :class:`wlauto.core.result.WorkloadMetric`\ s and/or |  | ||||||
| :class:`wlauto.core.result.Artifact`\s by the workload and active instrumentation. |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| from wlauto.core.plugin import Plugin |  | ||||||
| from wlauto.exceptions import WorkloadError |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Workload(Plugin): |  | ||||||
|     """ |  | ||||||
|     This is the base class for the workloads executed by the framework. |  | ||||||
|     Each of the methods throwing NotImplementedError *must* be implemented |  | ||||||
|     by the derived classes. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     kind = "workload" |  | ||||||
|     supported_devices = [] |  | ||||||
|     supported_platforms = [] |  | ||||||
|     summary_metrics = [] |  | ||||||
|  |  | ||||||
|     def __init__(self, device, **kwargs): |  | ||||||
|         """ |  | ||||||
|         Creates a new Workload. |  | ||||||
|  |  | ||||||
|         :param device: the Device on which the workload will be executed. |  | ||||||
|         """ |  | ||||||
|         super(Workload, self).__init__(**kwargs) |  | ||||||
|         if self.supported_devices and device.name not in self.supported_devices: |  | ||||||
|             raise WorkloadError('Workload {} does not support device {}'.format(self.name, device.name)) |  | ||||||
|  |  | ||||||
|         if self.supported_platforms and device.os not in self.supported_platforms: |  | ||||||
|             raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.os)) |  | ||||||
|         self.device = device |  | ||||||
|  |  | ||||||
|     def init_resources(self, context): |  | ||||||
|         """ |  | ||||||
|         This method may be used to perform early resource discovery and initialization. This is invoked |  | ||||||
|         during the initial loading stage and before the device is ready, so cannot be used for any |  | ||||||
|         device-dependent initialization. This method is invoked before the workload instance is |  | ||||||
|         validated. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         """ |  | ||||||
|         This method should be used to perform once-per-run initialization of a workload instance, i.e., |  | ||||||
|         unlike ``setup()`` it will not be invoked on each iteration. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         """ |  | ||||||
|         Perform the setup necessary to run the workload, such as copying the necessary files |  | ||||||
|         to the device, configuring the environments, etc. |  | ||||||
|  |  | ||||||
|         This is also the place to perform any on-device checks prior to attempting to execute |  | ||||||
|         the workload. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def run(self, context): |  | ||||||
|         """Execute the workload. This is the method that performs the actual "work" of the""" |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def update_result(self, context): |  | ||||||
|         """ |  | ||||||
|         Update the result within the specified execution context with the metrics |  | ||||||
|         form this workload iteration. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def teardown(self, context): |  | ||||||
|         """ Perform any final clean up for the Workload. """ |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def finalize(self, context): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '<Workload {}>'.format(self.name) |  | ||||||
| @@ -1,162 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| from wlauto.utils.misc import get_traceback |  | ||||||
|  |  | ||||||
| from devlib.exception import DevlibError, HostError, TargetError, TimeoutError |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class WAError(Exception): |  | ||||||
|     """Base class for all Workload Automation exceptions.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class NotFoundError(WAError): |  | ||||||
|     """Raised when the specified item is not found.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ValidationError(WAError): |  | ||||||
|     """Raised on failure to validate an plugin.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DeviceError(WAError): |  | ||||||
|     """General Device error.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DeviceNotRespondingError(WAError): |  | ||||||
|     """The device is not responding.""" |  | ||||||
|  |  | ||||||
|     def __init__(self, device): |  | ||||||
|         super(DeviceNotRespondingError, self).__init__('Device {} is not responding.'.format(device)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class WorkloadError(WAError): |  | ||||||
|     """General Workload error.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class HostError(WAError): |  | ||||||
|     """Problem with the host on which WA is running.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ModuleError(WAError): |  | ||||||
|     """ |  | ||||||
|     Problem with a module. |  | ||||||
|  |  | ||||||
|     .. note:: Modules for specific plugin types should raise execeptions |  | ||||||
|               appropriate to that plugin. E.g. a ``Device`` module should raise |  | ||||||
|               ``DeviceError``. This is intended for situation where a module is |  | ||||||
|               unsure (and/or doesn't care) what its owner is. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class InstrumentError(WAError): |  | ||||||
|     """General Instrument error.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ResultProcessorError(WAError): |  | ||||||
|     """General ResultProcessor error.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ResourceError(WAError): |  | ||||||
|     """General Resolver error.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CommandError(WAError): |  | ||||||
|     """Raised by commands when they have encountered an error condition |  | ||||||
|     during execution.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ToolError(WAError): |  | ||||||
|     """Raised by tools when they have encountered an error condition |  | ||||||
|     during execution.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class LoaderError(WAError): |  | ||||||
|     """Raised when there is an error loading an plugin or |  | ||||||
|     an external resource. Apart form the usual message, the __init__ |  | ||||||
|     takes an exc_info parameter which should be the result of |  | ||||||
|     sys.exc_info() for the original exception (if any) that |  | ||||||
|     caused the error.""" |  | ||||||
|  |  | ||||||
|     def __init__(self, message, exc_info=None): |  | ||||||
|         super(LoaderError, self).__init__(message) |  | ||||||
|         self.exc_info = exc_info |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         if self.exc_info: |  | ||||||
|             orig = self.exc_info[1] |  | ||||||
|             orig_name = type(orig).__name__ |  | ||||||
|             if isinstance(orig, WAError): |  | ||||||
|                 reason = 'because of:\n{}: {}'.format(orig_name, orig) |  | ||||||
|             else: |  | ||||||
|                 reason = 'because of:\n{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig) |  | ||||||
|             return '\n'.join([self.message, reason]) |  | ||||||
|         else: |  | ||||||
|             return self.message |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ConfigError(WAError): |  | ||||||
|     """Raised when configuration provided is invalid. This error suggests that |  | ||||||
|     the user should modify their config and try again.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class WorkerThreadError(WAError): |  | ||||||
|     """ |  | ||||||
|     This should get raised  in the main thread if a non-WAError-derived exception occurs on |  | ||||||
|     a worker/background thread. If a WAError-derived exception is raised in the worker, then |  | ||||||
|     it that exception should be re-raised on the main thread directly -- the main point of this is |  | ||||||
|     to preserve the backtrace in the output, and backtrace doesn't get output for WAErrors. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, thread, exc_info): |  | ||||||
|         self.thread = thread |  | ||||||
|         self.exc_info = exc_info |  | ||||||
|         orig = self.exc_info[1] |  | ||||||
|         orig_name = type(orig).__name__ |  | ||||||
|         message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread) |  | ||||||
|         message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig) |  | ||||||
|         super(WorkerThreadError, self).__init__(message) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class SerializerSyntaxError(Exception): |  | ||||||
|     """ |  | ||||||
|     Error loading a serialized structure from/to a file handle. |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, message, line=None, column=None): |  | ||||||
|         super(SerializerSyntaxError, self).__init__(message) |  | ||||||
|         self.line = line |  | ||||||
|         self.column = column |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         linestring = ' on line {}'.format(self.line) if self.line else '' |  | ||||||
|         colstring = ' in column {}'.format(self.column) if self.column else '' |  | ||||||
|         message = 'Syntax Error{}: {}' |  | ||||||
|         return message.format(''.join([linestring, colstring]), self.message) |  | ||||||
							
								
								
									
										74
									
								
								wlauto/external/README
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										74
									
								
								wlauto/external/README
									
									
									
									
										vendored
									
									
								
							| @@ -1,74 +0,0 @@ | |||||||
| This directory contains external libraries and standalone utilities which have |  | ||||||
| been written/modified to work with Workload Automation (and thus need to be |  | ||||||
| included with WA rather than obtained from orignal sources). |  | ||||||
|  |  | ||||||
|  |  | ||||||
| bbench_server |  | ||||||
| ============= |  | ||||||
|  |  | ||||||
| This is a small sever that is used to detect when ``bbench`` workload has completed.  |  | ||||||
| ``bbench`` navigates though a bunch of web pages in a browser using javascript. |  | ||||||
| It will cause the browser to sent a GET request to the port the bbench_server is |  | ||||||
| listening on, indicating the end of workload. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| daq_server |  | ||||||
| ========== |  | ||||||
|  |  | ||||||
| Contains Daq server files that will run on a Windows machine. Please refer to |  | ||||||
| daq instrument documentation. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| louie (third party) |  | ||||||
| ===== |  | ||||||
|  |  | ||||||
| Python package that is itself a fork (and now, a replacement for) pydispatcher. |  | ||||||
| This library provides a signal dispatching mechanism. This has been modified for |  | ||||||
| WA to add prioritization to callbacks. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| pmu_logger |  | ||||||
| ========== |  | ||||||
|  |  | ||||||
| Source for the kernel driver that enable the logging of CCI counters to ftrace  |  | ||||||
| on periodic basis. This driver is required by the ``cci_pmu_logger`` instrument. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| readenergy |  | ||||||
| ========== |  | ||||||
|  |  | ||||||
| Outputs Juno internal energy/power/voltage/current measurments by reading APB |  | ||||||
| regesiters from memory. This is used by ``juno_energy`` instrument. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| revent |  | ||||||
| ====== |  | ||||||
|  |  | ||||||
| This is a tool that is used to both record and playback key press and screen tap |  | ||||||
| events. It is used to record UI manipulation for some workloads (such as games) |  | ||||||
| where it is not possible to use the Android UI Automator. |  | ||||||
|  |  | ||||||
| The tools is also included in binary form in wlauto/common/. In order to build  |  | ||||||
| the tool from source, you will need to have Android NDK in your PATH. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| stacktracer.py (third party) |  | ||||||
| ============== |  | ||||||
|  |  | ||||||
| A module based on an ActiveState recipe that allows tracing thread stacks during |  | ||||||
| execution of a Python program. This is used through the ``--debug`` flag in WA |  | ||||||
| to ease debuging multi-threaded parts of the code. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| terminalsize.py (third party) |  | ||||||
| =============== |  | ||||||
|  |  | ||||||
| Implements a platform-agnostic way of determining terminal window size. Taken |  | ||||||
| from a public Github gist. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| uiauto |  | ||||||
| ====== |  | ||||||
|  |  | ||||||
| Contains the utilities library for UI automation. |  | ||||||
|  |  | ||||||
							
								
								
									
										31
									
								
								wlauto/external/bbench_server/build.sh
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										31
									
								
								wlauto/external/bbench_server/build.sh
									
									
									
									
										vendored
									
									
								
							| @@ -1,31 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| BUILD_COMMAND=ndk-build |  | ||||||
|  |  | ||||||
| if [[ $(which $BUILD_COMMAND) ]] ; then  |  | ||||||
| 	$BUILD_COMMAND |  | ||||||
| 	if [[ $? ]]; then |  | ||||||
| 		echo Coping to ../../workloads/bbench/ |  | ||||||
| 		cp libs/armeabi/bbench_server ../../workloads/bbench/bin/armeabi/bbench_server |  | ||||||
| 	fi |  | ||||||
| else  |  | ||||||
| 	echo Please make sure you have Android NDK in your PATH.  |  | ||||||
| 	exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
							
								
								
									
										9
									
								
								wlauto/external/bbench_server/jni/Android.mk
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								wlauto/external/bbench_server/jni/Android.mk
									
									
									
									
										vendored
									
									
								
							| @@ -1,9 +0,0 @@ | |||||||
| LOCAL_PATH:= $(call my-dir) |  | ||||||
|  |  | ||||||
| include $(CLEAR_VARS) |  | ||||||
| LOCAL_SRC_FILES:= bbench_server.cpp |  | ||||||
| LOCAL_MODULE := bbench_server |  | ||||||
| LOCAL_MODULE_TAGS := optional |  | ||||||
| LOCAL_STATIC_LIBRARIES := libc |  | ||||||
| LOCAL_SHARED_LIBRARIES :=  |  | ||||||
| include $(BUILD_EXECUTABLE) |  | ||||||
							
								
								
									
										151
									
								
								wlauto/external/bbench_server/jni/bbench_server.cpp
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										151
									
								
								wlauto/external/bbench_server/jni/bbench_server.cpp
									
									
									
									
										vendored
									
									
								
							| @@ -1,151 +0,0 @@ | |||||||
| /*    Copyright 2012-2015 ARM Limited |  | ||||||
|  * |  | ||||||
|  * Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
|  * you may not use this file except in compliance with the License. |  | ||||||
|  * You may obtain a copy of the License at |  | ||||||
|  * |  | ||||||
|  *     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
|  * |  | ||||||
|  * Unless required by applicable law or agreed to in writing, software |  | ||||||
|  * distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
|  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
|  * See the License for the specific language governing permissions and |  | ||||||
|  * limitations under the License. |  | ||||||
| */ |  | ||||||
|  |  | ||||||
| /**************************************************************************/ |  | ||||||
| /* Simple HTTP server program that will return on accepting connection    */ |  | ||||||
| /**************************************************************************/ |  | ||||||
|  |  | ||||||
| /* Tested on Android ICS browser and FireFox browser */ |  | ||||||
|  |  | ||||||
| #include <stdio.h> |  | ||||||
| #include <stdlib.h> |  | ||||||
| #include <unistd.h> |  | ||||||
| #include <errno.h> |  | ||||||
| #include <string.h> |  | ||||||
| #include <sys/types.h> |  | ||||||
| #include <sys/socket.h> |  | ||||||
| #include <netinet/in.h> |  | ||||||
| #include <netdb.h> |  | ||||||
| #include <arpa/inet.h> |  | ||||||
| #include <sys/wait.h> |  | ||||||
|  |  | ||||||
| #define SERVERPORT "3030" |  | ||||||
|  |  | ||||||
| void ExitOnError(int condition, const char *msg) |  | ||||||
| { |  | ||||||
|    if(condition) { printf("Server: %s\n", msg); exit(1);} |  | ||||||
| } |  | ||||||
|  |  | ||||||
| void *GetInetAddr(struct sockaddr *sa) |  | ||||||
| { |  | ||||||
|     if (sa->sa_family == AF_INET) |  | ||||||
|     { |  | ||||||
|         return &(((struct sockaddr_in*)sa)->sin_addr); |  | ||||||
|     } |  | ||||||
|     else |  | ||||||
|     { |  | ||||||
| 	    return &(((struct sockaddr_in6*)sa)->sin6_addr); |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
|  |  | ||||||
| int main(int argc, char *argv[]) |  | ||||||
| { |  | ||||||
|  |  | ||||||
|     socklen_t addr_size; |  | ||||||
|     struct addrinfo hints, *res; |  | ||||||
|     int server_fd, client_fd; |  | ||||||
|     int retval; |  | ||||||
|     int timeout_in_seconds; |  | ||||||
|  |  | ||||||
|     // Get the timeout value in seconds |  | ||||||
|     if(argc < 2) |  | ||||||
|     { |  | ||||||
|         printf("Usage %s <timeout in seconds>\n", argv[0]); |  | ||||||
|         exit(1); |  | ||||||
|     } |  | ||||||
|     else |  | ||||||
|     { |  | ||||||
|         timeout_in_seconds = atoi(argv[1]); |  | ||||||
|         printf("Server: Waiting for connection on port %s with timeout of %d seconds\n", SERVERPORT, timeout_in_seconds); |  | ||||||
|  |  | ||||||
|     } |  | ||||||
|  |  | ||||||
| 	/**************************************************************************/ |  | ||||||
| 	/* Listen to a socket	                                                  */ |  | ||||||
| 	/**************************************************************************/ |  | ||||||
|     memset(&hints, 0, sizeof hints); |  | ||||||
|     hints.ai_family = AF_UNSPEC;  // use IPv4 or IPv6, whichever |  | ||||||
|     hints.ai_socktype = SOCK_STREAM; |  | ||||||
|     hints.ai_flags = AI_PASSIVE;     // fill in my IP for me |  | ||||||
|  |  | ||||||
|     getaddrinfo(NULL, SERVERPORT, &hints, &res); |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     server_fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); |  | ||||||
|     ExitOnError(server_fd < 0, "Socket creation failed"); |  | ||||||
|  |  | ||||||
|     retval = bind(server_fd, res->ai_addr, res->ai_addrlen); |  | ||||||
|     ExitOnError(retval < 0, "Bind failed"); |  | ||||||
|  |  | ||||||
|     retval = listen(server_fd, 10); |  | ||||||
|     ExitOnError(retval < 0, "Listen failed"); |  | ||||||
|  |  | ||||||
| 	/**************************************************************************/ |  | ||||||
| 	/* Wait for connection to arrive or time out							  */ |  | ||||||
| 	/**************************************************************************/ |  | ||||||
|     fd_set readfds; |  | ||||||
|     FD_ZERO(&readfds); |  | ||||||
|     FD_SET(server_fd, &readfds); |  | ||||||
|  |  | ||||||
|     // Timeout parameter |  | ||||||
|     timeval tv; |  | ||||||
|     tv.tv_sec  = timeout_in_seconds; |  | ||||||
|     tv.tv_usec = 0; |  | ||||||
|  |  | ||||||
|     int ret = select(server_fd+1, &readfds, NULL, NULL, &tv); |  | ||||||
|     ExitOnError(ret <= 0, "No connection established, timed out"); |  | ||||||
| 	ExitOnError(FD_ISSET(server_fd, &readfds) == 0, "Error occured in select"); |  | ||||||
|  |  | ||||||
| 	/**************************************************************************/ |  | ||||||
| 	/* Accept connection and print the information							  */ |  | ||||||
| 	/**************************************************************************/ |  | ||||||
|     { |  | ||||||
| 		struct sockaddr_storage client_addr; |  | ||||||
| 		char client_addr_string[INET6_ADDRSTRLEN]; |  | ||||||
|     	addr_size = sizeof client_addr; |  | ||||||
|     	client_fd = accept(server_fd, (struct sockaddr *)&client_addr, &addr_size); |  | ||||||
|     	ExitOnError(client_fd < 0, "Accept failed"); |  | ||||||
|  |  | ||||||
|     	inet_ntop(client_addr.ss_family, |  | ||||||
|     			  GetInetAddr((struct sockaddr *)&client_addr), |  | ||||||
|     			  client_addr_string, |  | ||||||
|     			  sizeof client_addr_string); |  | ||||||
|     	printf("Server: Received connection from %s\n", client_addr_string); |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     /**************************************************************************/ |  | ||||||
|     /* Send a acceptable HTTP response									      */ |  | ||||||
|     /**************************************************************************/ |  | ||||||
|     { |  | ||||||
|  |  | ||||||
| 		char response[] = "HTTP/1.1 200 OK\r\n" |  | ||||||
|                           "Content-Type: text/html\r\n" |  | ||||||
|                           "Connection: close\r\n" |  | ||||||
|                           "\r\n" |  | ||||||
|                           "<html>" |  | ||||||
|                           "<head>Local Server: Connection Accepted</head>" |  | ||||||
|                           "<body></body>" |  | ||||||
|                           "</html>"; |  | ||||||
| 		int  bytes_sent; |  | ||||||
|         bytes_sent = send(client_fd, response, strlen(response), 0); |  | ||||||
| 		ExitOnError(bytes_sent < 0, "Sending Response failed"); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     close(client_fd); |  | ||||||
|     close(server_fd); |  | ||||||
|     return 0; |  | ||||||
| } |  | ||||||
							
								
								
									
										
											BIN
										
									
								
								wlauto/external/daq_server/daqpower-1.0.5.tar.gz
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								wlauto/external/daq_server/daqpower-1.0.5.tar.gz
									
									
									
									
										vendored
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										0
									
								
								wlauto/external/daq_server/src/README
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										0
									
								
								wlauto/external/daq_server/src/README
									
									
									
									
										vendored
									
									
								
							
							
								
								
									
										25
									
								
								wlauto/external/daq_server/src/build.sh
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										25
									
								
								wlauto/external/daq_server/src/build.sh
									
									
									
									
										vendored
									
									
								
							| @@ -1,25 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| python setup.py sdist |  | ||||||
| rm -rf build |  | ||||||
| rm -f MANIFEST |  | ||||||
| if [[ -d dist ]]; then |  | ||||||
| 	mv dist/*.tar.gz .. |  | ||||||
| 	rm -rf dist |  | ||||||
| fi |  | ||||||
| find . -iname \*.pyc -delete |  | ||||||
| @@ -1,17 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| __version__ = '1.0.5' |  | ||||||
							
								
								
									
										380
									
								
								wlauto/external/daq_server/src/daqpower/client.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										380
									
								
								wlauto/external/daq_server/src/daqpower/client.py
									
									
									
									
										vendored
									
									
								
							| @@ -1,380 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # pylint: disable=E1101,E1103,wrong-import-position |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
|  |  | ||||||
| from twisted.internet import reactor |  | ||||||
| from twisted.internet.protocol import Protocol, ClientFactory, ReconnectingClientFactory |  | ||||||
| from twisted.internet.error import ConnectionLost, ConnectionDone |  | ||||||
| from twisted.protocols.basic import LineReceiver |  | ||||||
|  |  | ||||||
| if __name__ == '__main__':  # for debugging |  | ||||||
|     sys.path.append(os.path.join(os.path.dirname(__file__), '..')) |  | ||||||
| from daqpower import log |  | ||||||
| from daqpower.common import DaqServerRequest, DaqServerResponse, Status |  | ||||||
| from daqpower.config import get_config_parser |  | ||||||
|  |  | ||||||
|  |  | ||||||
| __all__ = ['execute_command', 'run_send_command', 'Status'] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Command(object): |  | ||||||
|  |  | ||||||
|     def __init__(self, name, **params): |  | ||||||
|         self.name = name |  | ||||||
|         self.params = params |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CommandResult(object): |  | ||||||
|  |  | ||||||
|     def __init__(self): |  | ||||||
|         self.status = None |  | ||||||
|         self.message = None |  | ||||||
|         self.data = None |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '{} {}'.format(self.status, self.message) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CommandExecutorProtocol(Protocol): |  | ||||||
|  |  | ||||||
|     def __init__(self, command, timeout=10, retries=1): |  | ||||||
|         self.command = command |  | ||||||
|         self.sent_request = None |  | ||||||
|         self.waiting_for_response = False |  | ||||||
|         self.keep_going = None |  | ||||||
|         self.ports_to_pull = None |  | ||||||
|         self.factory = None |  | ||||||
|         self.timeoutCallback = None |  | ||||||
|         self.timeout = timeout |  | ||||||
|         self.retries = retries |  | ||||||
|         self.retry_count = 0 |  | ||||||
|  |  | ||||||
|     def connectionMade(self): |  | ||||||
|         if self.command.name == 'get_data': |  | ||||||
|             self.sendRequest('list_port_files') |  | ||||||
|         else: |  | ||||||
|             self.sendRequest(self.command.name, **self.command.params) |  | ||||||
|  |  | ||||||
|     def connectionLost(self, reason=ConnectionDone): |  | ||||||
|         if isinstance(reason, ConnectionLost): |  | ||||||
|             self.errorOut('connection lost: {}'.format(reason)) |  | ||||||
|         elif self.waiting_for_response: |  | ||||||
|             self.errorOut('Server closed connection without sending a response.') |  | ||||||
|         else: |  | ||||||
|             log.debug('connection terminated.') |  | ||||||
|  |  | ||||||
|     def sendRequest(self, command, **params): |  | ||||||
|         self.sent_request = DaqServerRequest(command, params) |  | ||||||
|         request_string = self.sent_request.serialize() |  | ||||||
|         log.debug('sending request: {}'.format(request_string)) |  | ||||||
|         self.transport.write(''.join([request_string, '\r\n'])) |  | ||||||
|         self.timeoutCallback = reactor.callLater(self.timeout, self.requestTimedOut) |  | ||||||
|         self.waiting_for_response = True |  | ||||||
|  |  | ||||||
|     def dataReceived(self, data): |  | ||||||
|         self.keep_going = False |  | ||||||
|         if self.waiting_for_response: |  | ||||||
|             self.waiting_for_response = False |  | ||||||
|             self.timeoutCallback.cancel() |  | ||||||
|             try: |  | ||||||
|                 response = DaqServerResponse.deserialize(data) |  | ||||||
|             except Exception, e:  # pylint: disable=W0703 |  | ||||||
|                 self.errorOut('Invalid response: {} ({})'.format(data, e)) |  | ||||||
|             else: |  | ||||||
|                 if response.status != Status.ERROR: |  | ||||||
|                     self.processResponse(response)  # may set self.keep_going |  | ||||||
|                     if not self.keep_going: |  | ||||||
|                         self.commandCompleted(response.status, response.message, response.data) |  | ||||||
|                 else: |  | ||||||
|                     self.errorOut(response.message) |  | ||||||
|         else: |  | ||||||
|             self.errorOut('unexpected data received: {}\n'.format(data)) |  | ||||||
|  |  | ||||||
|     def processResponse(self, response): |  | ||||||
|         if self.sent_request.command in ['list_ports', 'list_port_files']: |  | ||||||
|             self.processPortsResponse(response) |  | ||||||
|         elif self.sent_request.command == 'list_devices': |  | ||||||
|             self.processDevicesResponse(response) |  | ||||||
|         elif self.sent_request.command == 'pull': |  | ||||||
|             self.processPullResponse(response) |  | ||||||
|  |  | ||||||
|     def processPortsResponse(self, response): |  | ||||||
|         if 'ports' not in response.data: |  | ||||||
|             self.errorOut('Response did not containt ports data: {} ({}).'.format(response, response.data)) |  | ||||||
|         ports = response.data['ports'] |  | ||||||
|         response.data = ports |  | ||||||
|         if self.command.name == 'get_data': |  | ||||||
|             if ports: |  | ||||||
|                 self.ports_to_pull = ports |  | ||||||
|                 self.sendPullRequest(self.ports_to_pull.pop()) |  | ||||||
|             else: |  | ||||||
|                 response.status = Status.OKISH |  | ||||||
|                 response.message = 'No ports were returned.' |  | ||||||
|  |  | ||||||
|     def processDevicesResponse(self, response): |  | ||||||
|         if response.status == Status.OK: |  | ||||||
|             if 'devices' not in response.data: |  | ||||||
|                 self.errorOut('Response did not containt devices data: {} ({}).'.format(response, response.data)) |  | ||||||
|             devices = response.data['devices'] |  | ||||||
|             response.data = devices |  | ||||||
|  |  | ||||||
|     def sendPullRequest(self, port_id): |  | ||||||
|         self.sendRequest('pull', port_id=port_id) |  | ||||||
|         self.keep_going = True |  | ||||||
|  |  | ||||||
|     def processPullResponse(self, response): |  | ||||||
|         if 'port_number' not in response.data: |  | ||||||
|             self.errorOut('Response does not contain port number: {} ({}).'.format(response, response.data)) |  | ||||||
|         port_number = response.data.pop('port_number') |  | ||||||
|         filename = self.sent_request.params['port_id'] + '.csv' |  | ||||||
|         self.factory.initiateFileTransfer(filename, port_number) |  | ||||||
|         if self.ports_to_pull: |  | ||||||
|             self.sendPullRequest(self.ports_to_pull.pop()) |  | ||||||
|  |  | ||||||
|     def commandCompleted(self, status, message=None, data=None): |  | ||||||
|         self.factory.result.status = status |  | ||||||
|         self.factory.result.message = message |  | ||||||
|         self.factory.result.data = data |  | ||||||
|         self.transport.loseConnection() |  | ||||||
|  |  | ||||||
|     def requestTimedOut(self): |  | ||||||
|         self.retry_count += 1 |  | ||||||
|         if self.retry_count > self.retries: |  | ||||||
|             self.errorOut("Request timed out; server failed to respond.") |  | ||||||
|         else: |  | ||||||
|             log.debug('Retrying...') |  | ||||||
|             self.connectionMade() |  | ||||||
|  |  | ||||||
|     def errorOut(self, message): |  | ||||||
|         self.factory.errorOut(message) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CommandExecutorFactory(ClientFactory): |  | ||||||
|  |  | ||||||
|     protocol = CommandExecutorProtocol |  | ||||||
|     wait_delay = 1 |  | ||||||
|  |  | ||||||
|     def __init__(self, config, command, timeout=10, retries=1): |  | ||||||
|         self.config = config |  | ||||||
|         self.command = command |  | ||||||
|         self.timeout = timeout |  | ||||||
|         self.retries = retries |  | ||||||
|         self.result = CommandResult() |  | ||||||
|         self.done = False |  | ||||||
|         self.transfers_in_progress = {} |  | ||||||
|         if command.name == 'get_data': |  | ||||||
|             if 'output_directory' not in command.params: |  | ||||||
|                 self.errorOut('output_directory not specifed for get_data command.') |  | ||||||
|             self.output_directory = command.params['output_directory'] |  | ||||||
|             if not os.path.isdir(self.output_directory): |  | ||||||
|                 log.debug('Creating output directory {}'.format(self.output_directory)) |  | ||||||
|                 os.makedirs(self.output_directory) |  | ||||||
|  |  | ||||||
|     def buildProtocol(self, addr): |  | ||||||
|         protocol = CommandExecutorProtocol(self.command, self.timeout, self.retries) |  | ||||||
|         protocol.factory = self |  | ||||||
|         return protocol |  | ||||||
|  |  | ||||||
|     def initiateFileTransfer(self, filename, port): |  | ||||||
|         log.debug('Downloading {} from port {}'.format(filename, port)) |  | ||||||
|         filepath = os.path.join(self.output_directory, filename) |  | ||||||
|         session = FileReceiverFactory(filepath, self) |  | ||||||
|         connector = reactor.connectTCP(self.config.host, port, session) |  | ||||||
|         self.transfers_in_progress[session] = connector |  | ||||||
|  |  | ||||||
|     def transferComplete(self, session): |  | ||||||
|         connector = self.transfers_in_progress[session] |  | ||||||
|         log.debug('Transfer on port {} complete.'.format(connector.port)) |  | ||||||
|         del self.transfers_in_progress[session] |  | ||||||
|  |  | ||||||
|     def clientConnectionLost(self, connector, reason): |  | ||||||
|         if self.transfers_in_progress: |  | ||||||
|             log.debug('Waiting for the transfer(s) to complete.') |  | ||||||
|         self.waitForTransfersToCompleteAndExit() |  | ||||||
|  |  | ||||||
|     def clientConnectionFailed(self, connector, reason): |  | ||||||
|         self.result.status = Status.ERROR |  | ||||||
|         self.result.message = 'Could not connect to server.' |  | ||||||
|         self.waitForTransfersToCompleteAndExit() |  | ||||||
|  |  | ||||||
|     def waitForTransfersToCompleteAndExit(self): |  | ||||||
|         if self.transfers_in_progress: |  | ||||||
|             reactor.callLater(self.wait_delay, self.waitForTransfersToCompleteAndExit) |  | ||||||
|         else: |  | ||||||
|             log.debug('Stopping the reactor.') |  | ||||||
|             reactor.stop() |  | ||||||
|  |  | ||||||
|     def errorOut(self, message): |  | ||||||
|         self.result.status = Status.ERROR |  | ||||||
|         self.result.message = message |  | ||||||
|         reactor.crash() |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '<CommandExecutorProtocol {}>'.format(self.command.name) |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class FileReceiver(LineReceiver):  # pylint: disable=W0223 |  | ||||||
|  |  | ||||||
|     def __init__(self, path): |  | ||||||
|         self.path = path |  | ||||||
|         self.fh = None |  | ||||||
|         self.factory = None |  | ||||||
|  |  | ||||||
|     def connectionMade(self): |  | ||||||
|         if os.path.isfile(self.path): |  | ||||||
|             log.warning('overriding existing file.') |  | ||||||
|             os.remove(self.path) |  | ||||||
|         self.fh = open(self.path, 'w') |  | ||||||
|  |  | ||||||
|     def connectionLost(self, reason=ConnectionDone): |  | ||||||
|         if self.fh: |  | ||||||
|             self.fh.close() |  | ||||||
|  |  | ||||||
|     def lineReceived(self, line): |  | ||||||
|         line = line.rstrip('\r\n') + '\n' |  | ||||||
|         self.fh.write(line) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class FileReceiverFactory(ReconnectingClientFactory): |  | ||||||
|  |  | ||||||
|     def __init__(self, path, owner): |  | ||||||
|         self.path = path |  | ||||||
|         self.owner = owner |  | ||||||
|  |  | ||||||
|     def buildProtocol(self, addr): |  | ||||||
|         protocol = FileReceiver(self.path) |  | ||||||
|         protocol.factory = self |  | ||||||
|         self.resetDelay() |  | ||||||
|         return protocol |  | ||||||
|  |  | ||||||
|     def clientConnectionLost(self, conector, reason): |  | ||||||
|         if isinstance(reason, ConnectionLost): |  | ||||||
|             log.error('Connection lost: {}'.format(reason)) |  | ||||||
|             ReconnectingClientFactory.clientConnectionLost(self, conector, reason) |  | ||||||
|         else: |  | ||||||
|             self.owner.transferComplete(self) |  | ||||||
|  |  | ||||||
|     def clientConnectionFailed(self, conector, reason): |  | ||||||
|         if isinstance(reason, ConnectionLost): |  | ||||||
|             log.error('Connection failed: {}'.format(reason)) |  | ||||||
|             ReconnectingClientFactory.clientConnectionFailed(self, conector, reason) |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '<FileReceiver {}>'.format(self.path) |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def execute_command(server_config, command, **kwargs): |  | ||||||
|     before_fds = _get_open_fds()  # see the comment in the finally clause below |  | ||||||
|     if isinstance(command, basestring): |  | ||||||
|         command = Command(command, **kwargs) |  | ||||||
|     timeout = 300 if command.name in ['stop', 'pull'] else 10 |  | ||||||
|     factory = CommandExecutorFactory(server_config, command, timeout) |  | ||||||
|  |  | ||||||
|     # reactors aren't designed to be re-startable. In order to be |  | ||||||
|     # able to call execute_command multiple times, we need to froce |  | ||||||
|     # re-installation of the reactor; hence this hackery. |  | ||||||
|     # TODO: look into implementing restartable reactors. According to the |  | ||||||
|     #       Twisted FAQ, there is no good reason why there isn't one: |  | ||||||
|     #       http://twistedmatrix.com/trac/wiki/FrequentlyAskedQuestions#WhycanttheTwistedsreactorberestarted |  | ||||||
|     from twisted.internet import default |  | ||||||
|     del sys.modules['twisted.internet.reactor'] |  | ||||||
|     default.install() |  | ||||||
|     global reactor  # pylint: disable=W0603 |  | ||||||
|     reactor = sys.modules['twisted.internet.reactor'] |  | ||||||
|  |  | ||||||
|     try: |  | ||||||
|         reactor.connectTCP(server_config.host, server_config.port, factory) |  | ||||||
|         reactor.run() |  | ||||||
|         return factory.result |  | ||||||
|     finally: |  | ||||||
|         # re-startable reactor hack part 2. |  | ||||||
|         # twisted hijacks SIGINT and doesn't bother to un-hijack it when the reactor |  | ||||||
|         # stops. So we have to do it for it *rolls eye*. |  | ||||||
|         import signal |  | ||||||
|         signal.signal(signal.SIGINT, signal.default_int_handler) |  | ||||||
|         # OK, the reactor is also leaking file descriptors. Tracking down all |  | ||||||
|         # of them is non trivial, so instead we're just comparing the before |  | ||||||
|         # and after lists of open FDs for the current process, and closing all |  | ||||||
|         # new ones, as execute_command should never leave anything open after |  | ||||||
|         # it exits (even when downloading data files from the server). |  | ||||||
|         # TODO: This is way too hacky even compared to the rest of this function. |  | ||||||
|         #       Additionally, the current implementation ties this to UNIX, |  | ||||||
|         #       so in the long run, we need to do this properly and get the FDs |  | ||||||
|         #       from the reactor. |  | ||||||
|         after_fds = _get_open_fds() |  | ||||||
|         for fd in after_fds - before_fds: |  | ||||||
|             try: |  | ||||||
|                 os.close(int(fd[1:])) |  | ||||||
|             except OSError: |  | ||||||
|                 pass |  | ||||||
|         # Below is the alternative code that gets FDs from the reactor, however |  | ||||||
|         # at the moment it doesn't seem to get everything, which is why code |  | ||||||
|         # above is used instead. |  | ||||||
|         #for fd in readtor._selectables: |  | ||||||
|         #    os.close(fd) |  | ||||||
|         #reactor._poller.close() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _get_open_fds(): |  | ||||||
|     if os.name == 'posix': |  | ||||||
|         import subprocess |  | ||||||
|         pid = os.getpid() |  | ||||||
|         procs = subprocess.check_output(["lsof", '-w', '-Ff', "-p", str(pid)]) |  | ||||||
|         return set(procs.split()) |  | ||||||
|     else: |  | ||||||
|         # TODO: Implement the Windows equivalent. |  | ||||||
|         return [] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def run_send_command(): |  | ||||||
|     """Main entry point when running as a script -- should not be invoked form another module.""" |  | ||||||
|     parser = get_config_parser() |  | ||||||
|     parser.add_argument('command') |  | ||||||
|     parser.add_argument('-o', '--output-directory', metavar='DIR', default='.', |  | ||||||
|                         help='Directory used to output data files (defaults to the current directory).') |  | ||||||
|     parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False) |  | ||||||
|     args = parser.parse_args() |  | ||||||
|     if not args.device_config.labels: |  | ||||||
|         args.device_config.labels = ['PORT_{}'.format(i) for i in xrange(len(args.device_config.resistor_values))] |  | ||||||
|  |  | ||||||
|     if args.verbose: |  | ||||||
|         log.start_logging('DEBUG') |  | ||||||
|     else: |  | ||||||
|         log.start_logging('INFO', fmt='%(levelname)-8s %(message)s') |  | ||||||
|  |  | ||||||
|     if args.command == 'configure': |  | ||||||
|         args.device_config.validate() |  | ||||||
|         command = Command(args.command, config=args.device_config) |  | ||||||
|     elif args.command == 'get_data': |  | ||||||
|         command = Command(args.command, output_directory=args.output_directory) |  | ||||||
|     else: |  | ||||||
|         command = Command(args.command) |  | ||||||
|  |  | ||||||
|     result = execute_command(args.server_config, command) |  | ||||||
|     print result |  | ||||||
|     if result.data: |  | ||||||
|         print result.data |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     run_send_command() |  | ||||||
							
								
								
									
										103
									
								
								wlauto/external/daq_server/src/daqpower/common.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										103
									
								
								wlauto/external/daq_server/src/daqpower/common.py
									
									
									
									
										vendored
									
									
								
							| @@ -1,103 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # pylint: disable=E1101 |  | ||||||
| import json |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Serializer(json.JSONEncoder): |  | ||||||
|  |  | ||||||
|     def default(self, o):  # pylint: disable=E0202 |  | ||||||
|         if isinstance(o, Serializable): |  | ||||||
|             return o.serialize() |  | ||||||
|         if isinstance(o, EnumEntry): |  | ||||||
|             return o.name |  | ||||||
|         return json.JSONEncoder.default(self, o) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Serializable(object): |  | ||||||
|  |  | ||||||
|     @classmethod |  | ||||||
|     def deserialize(cls, text): |  | ||||||
|         return cls(**json.loads(text)) |  | ||||||
|  |  | ||||||
|     def serialize(self, d=None): |  | ||||||
|         if d is None: |  | ||||||
|             d = self.__dict__ |  | ||||||
|         return json.dumps(d, cls=Serializer) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DaqServerRequest(Serializable): |  | ||||||
|  |  | ||||||
|     def __init__(self, command, params=None):  # pylint: disable=W0231 |  | ||||||
|         self.command = command |  | ||||||
|         self.params = params or {} |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DaqServerResponse(Serializable): |  | ||||||
|  |  | ||||||
|     def __init__(self, status, message=None, data=None):  # pylint: disable=W0231 |  | ||||||
|         self.status = status |  | ||||||
|         self.message = message.strip().replace('\r\n', ' ') if message else '' |  | ||||||
|         self.data = data or {} |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '{} {}'.format(self.status, self.message or '') |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class EnumEntry(object): |  | ||||||
|  |  | ||||||
|     def __init__(self, name): |  | ||||||
|         self.name = name |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return self.name |  | ||||||
|  |  | ||||||
|     def __cmp__(self, other): |  | ||||||
|         return cmp(self.name, str(other)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Enum(object): |  | ||||||
|     """ |  | ||||||
|     Assuming MyEnum = Enum('A', 'B'), |  | ||||||
|  |  | ||||||
|     MyEnum.A and MyEnum.B are valid values. |  | ||||||
|  |  | ||||||
|     a = MyEnum.A |  | ||||||
|     (a == MyEnum.A) == True |  | ||||||
|     (a in MyEnum) == True |  | ||||||
|  |  | ||||||
|     MyEnum('A') == MyEnum.A |  | ||||||
|  |  | ||||||
|     str(MyEnum.A) == 'A' |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, *args): |  | ||||||
|         for a in args: |  | ||||||
|             setattr(self, a, EnumEntry(a)) |  | ||||||
|  |  | ||||||
|     def __call__(self, value): |  | ||||||
|         if value not in self.__dict__: |  | ||||||
|             raise ValueError('Not enum value: {}'.format(value)) |  | ||||||
|         return self.__dict__[value] |  | ||||||
|  |  | ||||||
|     def __iter__(self): |  | ||||||
|         for e in self.__dict__: |  | ||||||
|             yield self.__dict__[e] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| Status = Enum('OK', 'OKISH', 'ERROR') |  | ||||||
							
								
								
									
										153
									
								
								wlauto/external/daq_server/src/daqpower/config.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										153
									
								
								wlauto/external/daq_server/src/daqpower/config.py
									
									
									
									
										vendored
									
									
								
							| @@ -1,153 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import argparse |  | ||||||
|  |  | ||||||
| from daqpower.common import Serializable |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ConfigurationError(Exception): |  | ||||||
|     """Raised when configuration passed into DaqServer is invaid.""" |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DeviceConfiguration(Serializable): |  | ||||||
|     """Encapulates configuration for the DAQ, typically, passed from |  | ||||||
|     the client.""" |  | ||||||
|  |  | ||||||
|     valid_settings = ['device_id', 'v_range', 'dv_range', 'sampling_rate', 'resistor_values', 'labels'] |  | ||||||
|  |  | ||||||
|     default_device_id = 'Dev1' |  | ||||||
|     default_v_range = 2.5 |  | ||||||
|     default_dv_range = 0.2 |  | ||||||
|     default_sampling_rate = 10000 |  | ||||||
|     # Channel map used in DAQ 6363 and similar. |  | ||||||
|     default_channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23) |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def number_of_ports(self): |  | ||||||
|         return len(self.resistor_values) |  | ||||||
|  |  | ||||||
|     def __init__(self, **kwargs):  # pylint: disable=W0231 |  | ||||||
|         try: |  | ||||||
|             self.device_id = kwargs.pop('device_id') or self.default_device_id |  | ||||||
|             self.v_range = float(kwargs.pop('v_range') or self.default_v_range) |  | ||||||
|             self.dv_range = float(kwargs.pop('dv_range') or self.default_dv_range) |  | ||||||
|             self.sampling_rate = int(kwargs.pop('sampling_rate') or self.default_sampling_rate) |  | ||||||
|             self.resistor_values = kwargs.pop('resistor_values') or [] |  | ||||||
|             self.channel_map = kwargs.pop('channel_map') or self.default_channel_map |  | ||||||
|             self.labels = (kwargs.pop('labels') or |  | ||||||
|                            ['PORT_{}.csv'.format(i) for i in xrange(len(self.resistor_values))]) |  | ||||||
|         except KeyError, e: |  | ||||||
|             raise ConfigurationError('Missing config: {}'.format(e.message)) |  | ||||||
|         if kwargs: |  | ||||||
|             raise ConfigurationError('Unexpected config: {}'.format(kwargs)) |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         if not self.number_of_ports: |  | ||||||
|             raise ConfigurationError('No resistor values were specified.') |  | ||||||
|         if len(self.resistor_values) != len(self.labels): |  | ||||||
|             message = 'The number  of resistors ({}) does not match the number of labels ({})' |  | ||||||
|             raise ConfigurationError(message.format(len(self.resistor_values), len(self.labels))) |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return self.serialize() |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ServerConfiguration(object): |  | ||||||
|     """Client-side server configuration.""" |  | ||||||
|  |  | ||||||
|     valid_settings = ['host', 'port'] |  | ||||||
|  |  | ||||||
|     default_host = '127.0.0.1' |  | ||||||
|     default_port = 45677 |  | ||||||
|  |  | ||||||
|     def __init__(self, **kwargs): |  | ||||||
|         self.host = kwargs.pop('host', None) or self.default_host |  | ||||||
|         self.port = kwargs.pop('port', None) or self.default_port |  | ||||||
|         if kwargs: |  | ||||||
|             raise ConfigurationError('Unexpected config: {}'.format(kwargs)) |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         if not self.host: |  | ||||||
|             raise ConfigurationError('Server host not specified.') |  | ||||||
|         if not self.port: |  | ||||||
|             raise ConfigurationError('Server port not specified.') |  | ||||||
|         elif not isinstance(self.port, int): |  | ||||||
|             raise ConfigurationError('Server port must be an integer.') |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class UpdateDeviceConfig(argparse.Action): |  | ||||||
|  |  | ||||||
|     def __call__(self, parser, namespace, values, option_string=None): |  | ||||||
|         setting = option_string.strip('-').replace('-', '_') |  | ||||||
|         if setting not in DeviceConfiguration.valid_settings: |  | ||||||
|             raise ConfigurationError('Unkown option: {}'.format(option_string)) |  | ||||||
|         setattr(namespace._device_config, setting, values)  # pylint: disable=protected-access |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class UpdateServerConfig(argparse.Action): |  | ||||||
|  |  | ||||||
|     def __call__(self, parser, namespace, values, option_string=None): |  | ||||||
|         setting = option_string.strip('-').replace('-', '_') |  | ||||||
|         if setting not in namespace.server_config.valid_settings: |  | ||||||
|             raise ConfigurationError('Unkown option: {}'.format(option_string)) |  | ||||||
|         setattr(namespace.server_config, setting, values) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ConfigNamespace(object): |  | ||||||
|  |  | ||||||
|     class _N(object): |  | ||||||
|         def __init__(self): |  | ||||||
|             self.device_id = None |  | ||||||
|             self.v_range = None |  | ||||||
|             self.dv_range = None |  | ||||||
|             self.sampling_rate = None |  | ||||||
|             self.resistor_values = None |  | ||||||
|             self.labels = None |  | ||||||
|             self.channel_map = None |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def device_config(self): |  | ||||||
|         return DeviceConfiguration(**self._device_config.__dict__) |  | ||||||
|  |  | ||||||
|     def __init__(self): |  | ||||||
|         self._device_config = self._N() |  | ||||||
|         self.server_config = ServerConfiguration() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ConfigArgumentParser(argparse.ArgumentParser): |  | ||||||
|  |  | ||||||
|     def parse_args(self, *args, **kwargs): |  | ||||||
|         kwargs['namespace'] = ConfigNamespace() |  | ||||||
|         return super(ConfigArgumentParser, self).parse_args(*args, **kwargs) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_config_parser(server=True, device=True): |  | ||||||
|     parser = ConfigArgumentParser() |  | ||||||
|     if device: |  | ||||||
|         parser.add_argument('--device-id', action=UpdateDeviceConfig) |  | ||||||
|         parser.add_argument('--v-range', action=UpdateDeviceConfig, type=float) |  | ||||||
|         parser.add_argument('--dv-range', action=UpdateDeviceConfig, type=float) |  | ||||||
|         parser.add_argument('--sampling-rate', action=UpdateDeviceConfig, type=int) |  | ||||||
|         parser.add_argument('--resistor-values', action=UpdateDeviceConfig, type=float, nargs='*') |  | ||||||
|         parser.add_argument('--labels', action=UpdateDeviceConfig, nargs='*') |  | ||||||
|     if server: |  | ||||||
|         parser.add_argument('--host', action=UpdateServerConfig) |  | ||||||
|         parser.add_argument('--port', action=UpdateServerConfig, type=int) |  | ||||||
|     return parser |  | ||||||
							
								
								
									
										347
									
								
								wlauto/external/daq_server/src/daqpower/daq.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										347
									
								
								wlauto/external/daq_server/src/daqpower/daq.py
									
									
									
									
										vendored
									
									
								
							| @@ -1,347 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| Creates a new DAQ device class. This class assumes that there is a |  | ||||||
| DAQ connected and mapped as Dev1. It assumes a specific syndesmology on the DAQ (it is not |  | ||||||
| meant to be a generic DAQ interface). The following diagram shows the wiring for one DaqDevice |  | ||||||
| port:: |  | ||||||
|  |  | ||||||
| Port 0 |  | ||||||
| ======== |  | ||||||
| |   A0+ <--- Vr -------------------------| |  | ||||||
| |                                        | |  | ||||||
| |   A0- <--- GND -------------------//   | |  | ||||||
| |                                        | |  | ||||||
| |   A1+ <--- V+ ------------|-------V+   | |  | ||||||
| |                   r       |            | |  | ||||||
| |   A1- <--- Vr --/\/\/\----|            | |  | ||||||
| |             |                          | |  | ||||||
| |             |                          | |  | ||||||
| |             |--------------------------| |  | ||||||
| ======== |  | ||||||
|  |  | ||||||
| :number_of_ports: The number of ports connected on the DAQ. Each port requires 2 DAQ Channels |  | ||||||
|                     one for the source voltage and one for the Voltage drop over the |  | ||||||
|                     resistor r (V+ - Vr) allows us to detect the current. |  | ||||||
| :resistor_value: The resistance of r. Typically a few milliOhm |  | ||||||
| :downsample: The number of samples combined to create one Power point. If set to one |  | ||||||
|                 each sample corresponds to one reported power point. |  | ||||||
| :sampling_rate: The rate at which DAQ takes a sample from each channel. |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| # pylint: disable=F0401,E1101,W0621,no-name-in-module,wrong-import-position,wrong-import-order |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import csv |  | ||||||
| import time |  | ||||||
| import threading |  | ||||||
| from Queue import Queue, Empty |  | ||||||
|  |  | ||||||
| import numpy |  | ||||||
|  |  | ||||||
| from PyDAQmx import Task, DAQError |  | ||||||
| try: |  | ||||||
|     from PyDAQmx.DAQmxFunctions import DAQmxGetSysDevNames |  | ||||||
|     CAN_ENUMERATE_DEVICES = True |  | ||||||
| except ImportError:  # earlier driver version |  | ||||||
|     DAQmxGetSysDevNames = None |  | ||||||
|     CAN_ENUMERATE_DEVICES = False |  | ||||||
|  |  | ||||||
| from PyDAQmx.DAQmxTypes import int32, byref, create_string_buffer |  | ||||||
| from PyDAQmx.DAQmxConstants import (DAQmx_Val_Diff, DAQmx_Val_Volts, DAQmx_Val_GroupByScanNumber, DAQmx_Val_Auto, |  | ||||||
|                                     DAQmx_Val_Rising, DAQmx_Val_ContSamps) |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     from PyDAQmx.DAQmxConstants import DAQmx_Val_Acquired_Into_Buffer |  | ||||||
|     callbacks_supported = True |  | ||||||
| except ImportError:  # earlier driver version |  | ||||||
|     DAQmx_Val_Acquired_Into_Buffer = None |  | ||||||
|     callbacks_supported = False |  | ||||||
|  |  | ||||||
|  |  | ||||||
| from daqpower import log |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def list_available_devices(): |  | ||||||
|     """Returns the list of DAQ devices visible to the driver.""" |  | ||||||
|     if DAQmxGetSysDevNames: |  | ||||||
|         bufsize = 2048  # Should be plenty for all but the most pathalogical of situations. |  | ||||||
|         buf = create_string_buffer('\000' * bufsize) |  | ||||||
|         DAQmxGetSysDevNames(buf, bufsize) |  | ||||||
|         return buf.value.split(',') |  | ||||||
|     else: |  | ||||||
|         return [] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ReadSamplesBaseTask(Task): |  | ||||||
|  |  | ||||||
|     def __init__(self, config, consumer): |  | ||||||
|         Task.__init__(self) |  | ||||||
|         self.config = config |  | ||||||
|         self.consumer = consumer |  | ||||||
|         self.sample_buffer_size = (self.config.sampling_rate + 1) * self.config.number_of_ports * 2 |  | ||||||
|         self.samples_read = int32() |  | ||||||
|         self.remainder = [] |  | ||||||
|         # create voltage channels |  | ||||||
|         for i in xrange(0, 2 * self.config.number_of_ports, 2): |  | ||||||
|             self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i]), |  | ||||||
|                                      '', DAQmx_Val_Diff, |  | ||||||
|                                      -config.v_range, config.v_range, |  | ||||||
|                                      DAQmx_Val_Volts, None) |  | ||||||
|             self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i + 1]), |  | ||||||
|                                      '', DAQmx_Val_Diff, |  | ||||||
|                                      -config.dv_range, config.dv_range, |  | ||||||
|                                      DAQmx_Val_Volts, None) |  | ||||||
|         # configure sampling rate |  | ||||||
|         self.CfgSampClkTiming('', |  | ||||||
|                               self.config.sampling_rate, |  | ||||||
|                               DAQmx_Val_Rising, |  | ||||||
|                               DAQmx_Val_ContSamps, |  | ||||||
|                               self.config.sampling_rate) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ReadSamplesCallbackTask(ReadSamplesBaseTask): |  | ||||||
|     """ |  | ||||||
|     More recent verisons of the driver (on Windows) support callbacks |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, config, consumer): |  | ||||||
|         ReadSamplesBaseTask.__init__(self, config, consumer) |  | ||||||
|         # register callbacks |  | ||||||
|         self.AutoRegisterEveryNSamplesEvent(DAQmx_Val_Acquired_Into_Buffer, self.config.sampling_rate // 2, 0) |  | ||||||
|         self.AutoRegisterDoneEvent(0) |  | ||||||
|  |  | ||||||
|     def EveryNCallback(self): |  | ||||||
|         # Note to future self: do NOT try to "optimize" this but re-using the same array and just |  | ||||||
|         # zeroing it out each time. The writes happen asynchronously and if your zero it out too soon, |  | ||||||
|         # you'll see a whole bunch of 0.0's in the output. If you wanna go down that route, you'll need |  | ||||||
|         # cycler through several arrays and have the code that's actually doing the writing zero them out |  | ||||||
|         # mark them as available to be used by this call. But, honestly, numpy array allocation does not |  | ||||||
|         # appear to be a bottleneck at the moment, so the current solution is "good enough". |  | ||||||
|         samples_buffer = numpy.zeros((self.sample_buffer_size,), dtype=numpy.float64) |  | ||||||
|         self.ReadAnalogF64(DAQmx_Val_Auto, 0.0, DAQmx_Val_GroupByScanNumber, samples_buffer, |  | ||||||
|                            self.sample_buffer_size, byref(self.samples_read), None) |  | ||||||
|         self.consumer.write((samples_buffer, self.samples_read.value)) |  | ||||||
|  |  | ||||||
|     def DoneCallback(self, status):  # pylint: disable=W0613,R0201 |  | ||||||
|         return 0  # The function should return an integer |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ReadSamplesThreadedTask(ReadSamplesBaseTask): |  | ||||||
|     """ |  | ||||||
|     Earlier verisons of the driver (on CentOS) do not support callbacks. So need |  | ||||||
|     to create a thread to periodically poll the buffer |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, config, consumer): |  | ||||||
|         ReadSamplesBaseTask.__init__(self, config, consumer) |  | ||||||
|         self.poller = DaqPoller(self) |  | ||||||
|  |  | ||||||
|     def StartTask(self): |  | ||||||
|         ReadSamplesBaseTask.StartTask(self) |  | ||||||
|         self.poller.start() |  | ||||||
|  |  | ||||||
|     def StopTask(self): |  | ||||||
|         self.poller.stop() |  | ||||||
|         ReadSamplesBaseTask.StopTask(self) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DaqPoller(threading.Thread): |  | ||||||
|  |  | ||||||
|     def __init__(self, task, wait_period=1): |  | ||||||
|         super(DaqPoller, self).__init__() |  | ||||||
|         self.task = task |  | ||||||
|         self.wait_period = wait_period |  | ||||||
|         self._stop_signal = threading.Event() |  | ||||||
|         self.samples_buffer = numpy.zeros((self.task.sample_buffer_size,), dtype=numpy.float64) |  | ||||||
|  |  | ||||||
|     def run(self): |  | ||||||
|         while not self._stop_signal.is_set(): |  | ||||||
|             # Note to future self: see the comment inside EventNCallback() above |  | ||||||
|             samples_buffer = numpy.zeros((self.task.sample_buffer_size,), dtype=numpy.float64) |  | ||||||
|             try: |  | ||||||
|                 self.task.ReadAnalogF64(DAQmx_Val_Auto, self.wait_period, DAQmx_Val_GroupByScanNumber, samples_buffer, |  | ||||||
|                                         self.task.sample_buffer_size, byref(self.task.samples_read), None) |  | ||||||
|             except DAQError: |  | ||||||
|                 pass |  | ||||||
|             self.task.consumer.write((samples_buffer, self.task.samples_read.value)) |  | ||||||
|  |  | ||||||
|     def stop(self): |  | ||||||
|         self._stop_signal.set() |  | ||||||
|         self.join() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class AsyncWriter(threading.Thread): |  | ||||||
|  |  | ||||||
|     def __init__(self, wait_period=1): |  | ||||||
|         super(AsyncWriter, self).__init__() |  | ||||||
|         self.daemon = True |  | ||||||
|         self.wait_period = wait_period |  | ||||||
|         self.running = threading.Event() |  | ||||||
|         self._stop_signal = threading.Event() |  | ||||||
|         self._queue = Queue() |  | ||||||
|  |  | ||||||
|     def write(self, stuff): |  | ||||||
|         if self._stop_signal.is_set(): |  | ||||||
|             raise IOError('Attempting to writer to {} after it has been closed.'.format(self.__class__.__name__)) |  | ||||||
|         self._queue.put(stuff) |  | ||||||
|  |  | ||||||
|     def do_write(self, stuff): |  | ||||||
|         raise NotImplementedError() |  | ||||||
|  |  | ||||||
|     def run(self): |  | ||||||
|         self.running.set() |  | ||||||
|         while True: |  | ||||||
|             if self._stop_signal.is_set() and self._queue.empty(): |  | ||||||
|                 break |  | ||||||
|             try: |  | ||||||
|                 self.do_write(self._queue.get(block=True, timeout=self.wait_period)) |  | ||||||
|             except Empty: |  | ||||||
|                 pass  # carry on |  | ||||||
|         self.running.clear() |  | ||||||
|  |  | ||||||
|     def stop(self): |  | ||||||
|         self._stop_signal.set() |  | ||||||
|  |  | ||||||
|     def wait(self): |  | ||||||
|         while self.running.is_set(): |  | ||||||
|             time.sleep(self.wait_period) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class PortWriter(object): |  | ||||||
|  |  | ||||||
|     def __init__(self, path): |  | ||||||
|         self.path = path |  | ||||||
|         self.fh = open(path, 'w', 0) |  | ||||||
|         self.writer = csv.writer(self.fh) |  | ||||||
|         self.writer.writerow(['power', 'voltage']) |  | ||||||
|  |  | ||||||
|     def write(self, row): |  | ||||||
|         self.writer.writerow(row) |  | ||||||
|  |  | ||||||
|     def close(self): |  | ||||||
|         self.fh.close() |  | ||||||
|  |  | ||||||
|     def __del__(self): |  | ||||||
|         self.close() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class SamplePorcessorError(Exception): |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class SampleProcessor(AsyncWriter): |  | ||||||
|  |  | ||||||
|     def __init__(self, resistor_values, output_directory, labels): |  | ||||||
|         super(SampleProcessor, self).__init__() |  | ||||||
|         self.resistor_values = resistor_values |  | ||||||
|         self.output_directory = output_directory |  | ||||||
|         self.labels = labels |  | ||||||
|         self.number_of_ports = len(resistor_values) |  | ||||||
|         if len(self.labels) != self.number_of_ports: |  | ||||||
|             message = 'Number of labels ({}) does not match number of ports ({}).' |  | ||||||
|             raise SamplePorcessorError(message.format(len(self.labels), self.number_of_ports)) |  | ||||||
|         self.port_writers = [] |  | ||||||
|  |  | ||||||
|     def do_write(self, sample_tuple): |  | ||||||
|         samples, number_of_samples = sample_tuple |  | ||||||
|         for i in xrange(0, number_of_samples * self.number_of_ports * 2, self.number_of_ports * 2): |  | ||||||
|             for j in xrange(self.number_of_ports): |  | ||||||
|                 V = float(samples[i + 2 * j]) |  | ||||||
|                 DV = float(samples[i + 2 * j + 1]) |  | ||||||
|                 P = V * (DV / self.resistor_values[j]) |  | ||||||
|                 self.port_writers[j].write([P, V]) |  | ||||||
|  |  | ||||||
|     def start(self): |  | ||||||
|         for label in self.labels: |  | ||||||
|             port_file = self.get_port_file_path(label) |  | ||||||
|             writer = PortWriter(port_file) |  | ||||||
|             self.port_writers.append(writer) |  | ||||||
|         super(SampleProcessor, self).start() |  | ||||||
|  |  | ||||||
|     def stop(self): |  | ||||||
|         super(SampleProcessor, self).stop() |  | ||||||
|         self.wait() |  | ||||||
|         for writer in self.port_writers: |  | ||||||
|             writer.close() |  | ||||||
|  |  | ||||||
|     def get_port_file_path(self, port_id): |  | ||||||
|         if port_id in self.labels: |  | ||||||
|             return os.path.join(self.output_directory, port_id + '.csv') |  | ||||||
|         else: |  | ||||||
|             raise SamplePorcessorError('Invalid port ID: {}'.format(port_id)) |  | ||||||
|  |  | ||||||
|     def __del__(self): |  | ||||||
|         self.stop() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DaqRunner(object): |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def number_of_ports(self): |  | ||||||
|         return self.config.number_of_ports |  | ||||||
|  |  | ||||||
|     def __init__(self, config, output_directory): |  | ||||||
|         self.config = config |  | ||||||
|         self.processor = SampleProcessor(config.resistor_values, output_directory, config.labels) |  | ||||||
|         if callbacks_supported: |  | ||||||
|             self.task = ReadSamplesCallbackTask(config, self.processor) |  | ||||||
|         else: |  | ||||||
|             self.task = ReadSamplesThreadedTask(config, self.processor)  # pylint: disable=redefined-variable-type |  | ||||||
|         self.is_running = False |  | ||||||
|  |  | ||||||
|     def start(self): |  | ||||||
|         log.debug('Starting sample processor.') |  | ||||||
|         self.processor.start() |  | ||||||
|         log.debug('Starting DAQ Task.') |  | ||||||
|         self.task.StartTask() |  | ||||||
|         self.is_running = True |  | ||||||
|         log.debug('Runner started.') |  | ||||||
|  |  | ||||||
|     def stop(self): |  | ||||||
|         self.is_running = False |  | ||||||
|         log.debug('Stopping DAQ Task.') |  | ||||||
|         self.task.StopTask() |  | ||||||
|         log.debug('Stopping sample processor.') |  | ||||||
|         self.processor.stop() |  | ||||||
|         log.debug('Runner stopped.') |  | ||||||
|  |  | ||||||
|     def get_port_file_path(self, port_id): |  | ||||||
|         return self.processor.get_port_file_path(port_id) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     from collections import namedtuple |  | ||||||
|     DeviceConfig = namedtuple('DeviceConfig', ['device_id', 'channel_map', 'resistor_values', |  | ||||||
|                                                'v_range', 'dv_range', 'sampling_rate', |  | ||||||
|                                                'number_of_ports', 'labels']) |  | ||||||
|     channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23) |  | ||||||
|     resistor_values = [0.005] |  | ||||||
|     labels = ['PORT_0'] |  | ||||||
|     dev_config = DeviceConfig('Dev1', channel_map, resistor_values, 2.5, 0.2, 10000, len(resistor_values), labels) |  | ||||||
|     if len(sys.argv) != 3: |  | ||||||
|         print 'Usage: {} OUTDIR DURATION'.format(os.path.basename(__file__)) |  | ||||||
|         sys.exit(1) |  | ||||||
|     output_directory = sys.argv[1] |  | ||||||
|     duration = float(sys.argv[2]) |  | ||||||
|  |  | ||||||
|     print "Avialable devices:", list_available_devices() |  | ||||||
|     runner = DaqRunner(dev_config, output_directory) |  | ||||||
|     runner.start() |  | ||||||
|     time.sleep(duration) |  | ||||||
|     runner.stop() |  | ||||||
							
								
								
									
										58
									
								
								wlauto/external/daq_server/src/daqpower/log.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										58
									
								
								wlauto/external/daq_server/src/daqpower/log.py
									
									
									
									
										vendored
									
									
								
							| @@ -1,58 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import logging |  | ||||||
|  |  | ||||||
| from twisted.python import log |  | ||||||
|  |  | ||||||
| __all__ = ['debug', 'info', 'warning', 'error', 'critical', 'start_logging'] |  | ||||||
|  |  | ||||||
| debug = lambda x: log.msg(x, logLevel=logging.DEBUG) |  | ||||||
| info = lambda x: log.msg(x, logLevel=logging.INFO) |  | ||||||
| warning = lambda x: log.msg(x, logLevel=logging.WARNING) |  | ||||||
| error = lambda x: log.msg(x, logLevel=logging.ERROR) |  | ||||||
| critical = lambda x: log.msg(x, logLevel=logging.CRITICAL) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CustomLoggingObserver(log.PythonLoggingObserver): |  | ||||||
|  |  | ||||||
|     def __init__(self, loggerName="twisted"): |  | ||||||
|         super(CustomLoggingObserver, self).__init__(loggerName) |  | ||||||
|         if hasattr(self, '_newObserver'):  # new vesions of Twisted |  | ||||||
|             self.logger = self._newObserver.logger  # pylint: disable=no-member |  | ||||||
|  |  | ||||||
|     def emit(self, eventDict): |  | ||||||
|         if 'logLevel' in eventDict: |  | ||||||
|             level = eventDict['logLevel'] |  | ||||||
|         elif eventDict['isError']: |  | ||||||
|             level = logging.ERROR |  | ||||||
|         else: |  | ||||||
|             # All of that just just to override this one line from |  | ||||||
|             # default INFO level... |  | ||||||
|             level = logging.DEBUG |  | ||||||
|         text = log.textFromEventDict(eventDict) |  | ||||||
|         if text is None: |  | ||||||
|             return |  | ||||||
|         self.logger.log(level, text) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| logObserver = CustomLoggingObserver() |  | ||||||
| logObserver.start() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def start_logging(level, fmt='%(asctime)s %(levelname)-8s: %(message)s'): |  | ||||||
|     logging.basicConfig(level=getattr(logging, level), format=fmt) |  | ||||||
|  |  | ||||||
							
								
								
									
										526
									
								
								wlauto/external/daq_server/src/daqpower/server.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										526
									
								
								wlauto/external/daq_server/src/daqpower/server.py
									
									
									
									
										vendored
									
									
								
							| @@ -1,526 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # pylint: disable=E1101,W0613,wrong-import-position |  | ||||||
| from __future__ import division |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import argparse |  | ||||||
| import shutil |  | ||||||
| import socket |  | ||||||
| import time |  | ||||||
| from datetime import datetime, timedelta |  | ||||||
|  |  | ||||||
| from zope.interface import implements |  | ||||||
| from twisted.protocols.basic import LineReceiver |  | ||||||
| from twisted.internet.protocol import Factory, Protocol |  | ||||||
| from twisted.internet import reactor, interfaces |  | ||||||
| from twisted.internet.error import ConnectionLost, ConnectionDone |  | ||||||
|  |  | ||||||
| if __name__ == "__main__":  # for debugging |  | ||||||
|     sys.path.append(os.path.join(os.path.dirname(__file__), '..')) |  | ||||||
| from daqpower import log |  | ||||||
| from daqpower.config import DeviceConfiguration |  | ||||||
| from daqpower.common import DaqServerRequest, DaqServerResponse, Status |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     from daqpower.daq import DaqRunner, list_available_devices, CAN_ENUMERATE_DEVICES |  | ||||||
|     __import_error = None |  | ||||||
| except ImportError as e: |  | ||||||
|     # May be using debug mode. |  | ||||||
|     __import_error = e |  | ||||||
|     DaqRunner = None |  | ||||||
|     list_available_devices = lambda: ['Dev1'] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ProtocolError(Exception): |  | ||||||
|     pass |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DummyDaqRunner(object): |  | ||||||
|     """Dummy stub used when running in debug mode.""" |  | ||||||
|  |  | ||||||
|     num_rows = 200 |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def number_of_ports(self): |  | ||||||
|         return self.config.number_of_ports |  | ||||||
|  |  | ||||||
|     def __init__(self, config, output_directory): |  | ||||||
|         log.info('Creating runner with {} {}'.format(config, output_directory)) |  | ||||||
|         self.config = config |  | ||||||
|         self.output_directory = output_directory |  | ||||||
|         self.is_running = False |  | ||||||
|  |  | ||||||
|     def start(self): |  | ||||||
|         import csv, random  # pylint: disable=multiple-imports |  | ||||||
|         log.info('runner started') |  | ||||||
|         for i in xrange(self.config.number_of_ports): |  | ||||||
|             rows = [['power', 'voltage']] + [[random.gauss(1.0, 1.0), random.gauss(1.0, 0.1)] |  | ||||||
|                                              for _ in xrange(self.num_rows)] |  | ||||||
|             with open(self.get_port_file_path(self.config.labels[i]), 'wb') as wfh: |  | ||||||
|                 writer = csv.writer(wfh) |  | ||||||
|                 writer.writerows(rows) |  | ||||||
|  |  | ||||||
|         self.is_running = True |  | ||||||
|  |  | ||||||
|     def stop(self): |  | ||||||
|         self.is_running = False |  | ||||||
|         log.info('runner stopped') |  | ||||||
|  |  | ||||||
|     def get_port_file_path(self, port_id): |  | ||||||
|         if port_id in self.config.labels: |  | ||||||
|             return os.path.join(self.output_directory, '{}.csv'.format(port_id)) |  | ||||||
|         else: |  | ||||||
|             raise Exception('Invalid port id: {}'.format(port_id)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DaqServer(object): |  | ||||||
|  |  | ||||||
|     def __init__(self, base_output_directory): |  | ||||||
|         self.base_output_directory = os.path.abspath(base_output_directory) |  | ||||||
|         if os.path.isdir(self.base_output_directory): |  | ||||||
|             log.info('Using output directory: {}'.format(self.base_output_directory)) |  | ||||||
|         else: |  | ||||||
|             log.info('Creating new output directory: {}'.format(self.base_output_directory)) |  | ||||||
|             os.makedirs(self.base_output_directory) |  | ||||||
|         self.runner = None |  | ||||||
|         self.output_directory = None |  | ||||||
|         self.labels = None |  | ||||||
|  |  | ||||||
|     def configure(self, config_string): |  | ||||||
|         message = None |  | ||||||
|         if self.runner: |  | ||||||
|             message = 'Configuring a new session before previous session has been terminated.' |  | ||||||
|             log.warning(message) |  | ||||||
|             if self.runner.is_running: |  | ||||||
|                 self.runner.stop() |  | ||||||
|         config = DeviceConfiguration.deserialize(config_string) |  | ||||||
|         config.validate() |  | ||||||
|         self.output_directory = self._create_output_directory() |  | ||||||
|         self.labels = config.labels |  | ||||||
|         log.info('Writing port files to {}'.format(self.output_directory)) |  | ||||||
|         self.runner = DaqRunner(config, self.output_directory) |  | ||||||
|         return message |  | ||||||
|  |  | ||||||
|     def start(self): |  | ||||||
|         if self.runner: |  | ||||||
|             if not self.runner.is_running: |  | ||||||
|                 self.runner.start() |  | ||||||
|             else: |  | ||||||
|                 message = 'Calling start() before stop() has been called. Data up to this point will be lost.' |  | ||||||
|                 log.warning(message) |  | ||||||
|                 self.runner.stop() |  | ||||||
|                 self.runner.start() |  | ||||||
|                 return message |  | ||||||
|         else: |  | ||||||
|             raise ProtocolError('Start called before a session has been configured.') |  | ||||||
|  |  | ||||||
|     def stop(self): |  | ||||||
|         if self.runner: |  | ||||||
|             if self.runner.is_running: |  | ||||||
|                 self.runner.stop() |  | ||||||
|             else: |  | ||||||
|                 message = 'Attempting to stop() before start() was invoked.' |  | ||||||
|                 log.warning(message) |  | ||||||
|                 self.runner.stop() |  | ||||||
|                 return message |  | ||||||
|         else: |  | ||||||
|             raise ProtocolError('Stop called before a session has been configured.') |  | ||||||
|  |  | ||||||
|     def list_devices(self):  # pylint: disable=no-self-use |  | ||||||
|         return list_available_devices() |  | ||||||
|  |  | ||||||
|     def list_ports(self): |  | ||||||
|         return self.labels |  | ||||||
|  |  | ||||||
|     def list_port_files(self): |  | ||||||
|         if not self.runner: |  | ||||||
|             raise ProtocolError('Attempting to list port files before session has been configured.') |  | ||||||
|         ports_with_files = [] |  | ||||||
|         for port_id in self.labels: |  | ||||||
|             path = self.get_port_file_path(port_id) |  | ||||||
|             if os.path.isfile(path): |  | ||||||
|                 ports_with_files.append(port_id) |  | ||||||
|         return ports_with_files |  | ||||||
|  |  | ||||||
|     def get_port_file_path(self, port_id): |  | ||||||
|         if not self.runner: |  | ||||||
|             raise ProtocolError('Attepting to get port file path before session has been configured.') |  | ||||||
|         return self.runner.get_port_file_path(port_id) |  | ||||||
|  |  | ||||||
|     def terminate(self): |  | ||||||
|         message = None |  | ||||||
|         if self.runner: |  | ||||||
|             if self.runner.is_running: |  | ||||||
|                 message = 'Terminating session before runner has been stopped.' |  | ||||||
|                 log.warning(message) |  | ||||||
|                 self.runner.stop() |  | ||||||
|             self.runner = None |  | ||||||
|             if self.output_directory and os.path.isdir(self.output_directory): |  | ||||||
|                 shutil.rmtree(self.output_directory) |  | ||||||
|             self.output_directory = None |  | ||||||
|             log.info('Session terminated.') |  | ||||||
|         else:  # Runner has not been created. |  | ||||||
|             message = 'Attempting to close session before it has been configured.' |  | ||||||
|             log.warning(message) |  | ||||||
|         return message |  | ||||||
|  |  | ||||||
|     def _create_output_directory(self): |  | ||||||
|         basename = datetime.now().strftime('%Y-%m-%d_%H%M%S%f') |  | ||||||
|         dirname = os.path.join(self.base_output_directory, basename) |  | ||||||
|         os.makedirs(dirname) |  | ||||||
|         return dirname |  | ||||||
|  |  | ||||||
|     def __del__(self): |  | ||||||
|         if self.runner: |  | ||||||
|             self.runner.stop() |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '({})'.format(self.base_output_directory) |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DaqControlProtocol(LineReceiver):  # pylint: disable=W0223 |  | ||||||
|  |  | ||||||
|     def __init__(self, daq_server): |  | ||||||
|         self.daq_server = daq_server |  | ||||||
|         self.factory = None |  | ||||||
|  |  | ||||||
|     def lineReceived(self, line): |  | ||||||
|         line = line.strip() |  | ||||||
|         log.info('Received: {}'.format(line)) |  | ||||||
|         try: |  | ||||||
|             request = DaqServerRequest.deserialize(line) |  | ||||||
|         except Exception, e:  # pylint: disable=W0703 |  | ||||||
|             # PyDAQmx exceptions use "mess" rather than the standard "message" |  | ||||||
|             # to pass errors... |  | ||||||
|             message = getattr(e, 'mess', e.message) |  | ||||||
|             self.sendError('Received bad request ({}: {})'.format(e.__class__.__name__, message)) |  | ||||||
|         else: |  | ||||||
|             self.processRequest(request) |  | ||||||
|  |  | ||||||
|     def processRequest(self, request): |  | ||||||
|         try: |  | ||||||
|             if request.command == 'configure': |  | ||||||
|                 self.configure(request) |  | ||||||
|             elif request.command == 'start': |  | ||||||
|                 self.start(request) |  | ||||||
|             elif request.command == 'stop': |  | ||||||
|                 self.stop(request) |  | ||||||
|             elif request.command == 'list_devices': |  | ||||||
|                 self.list_devices(request) |  | ||||||
|             elif request.command == 'list_ports': |  | ||||||
|                 self.list_ports(request) |  | ||||||
|             elif request.command == 'list_port_files': |  | ||||||
|                 self.list_port_files(request) |  | ||||||
|             elif request.command == 'pull': |  | ||||||
|                 self.pull_port_data(request) |  | ||||||
|             elif request.command == 'close': |  | ||||||
|                 self.terminate(request) |  | ||||||
|             else: |  | ||||||
|                 self.sendError('Received unknown command: {}'.format(request.command)) |  | ||||||
|         except Exception, e:  # pylint: disable=W0703 |  | ||||||
|             message = getattr(e, 'mess', e.message) |  | ||||||
|             self.sendError('{}: {}'.format(e.__class__.__name__, message)) |  | ||||||
|  |  | ||||||
|     def configure(self, request): |  | ||||||
|         if 'config' in request.params: |  | ||||||
|             result = self.daq_server.configure(request.params['config']) |  | ||||||
|             if not result: |  | ||||||
|                 self.sendResponse(Status.OK) |  | ||||||
|             else: |  | ||||||
|                 self.sendResponse(Status.OKISH, message=result) |  | ||||||
|         else: |  | ||||||
|             self.sendError('Invalid config; config string not provided.') |  | ||||||
|  |  | ||||||
|     def start(self, request): |  | ||||||
|         result = self.daq_server.start() |  | ||||||
|         if not result: |  | ||||||
|             self.sendResponse(Status.OK) |  | ||||||
|         else: |  | ||||||
|             self.sendResponse(Status.OKISH, message=result) |  | ||||||
|  |  | ||||||
|     def stop(self, request): |  | ||||||
|         result = self.daq_server.stop() |  | ||||||
|         if not result: |  | ||||||
|             self.sendResponse(Status.OK) |  | ||||||
|         else: |  | ||||||
|             self.sendResponse(Status.OKISH, message=result) |  | ||||||
|  |  | ||||||
|     def pull_port_data(self, request): |  | ||||||
|         if 'port_id' in request.params: |  | ||||||
|             port_id = request.params['port_id'] |  | ||||||
|             port_file = self.daq_server.get_port_file_path(port_id) |  | ||||||
|             if os.path.isfile(port_file): |  | ||||||
|                 port = self._initiate_file_transfer(port_file) |  | ||||||
|                 self.sendResponse(Status.OK, data={'port_number': port}) |  | ||||||
|             else: |  | ||||||
|                 self.sendError('File for port {} does not exist.'.format(port_id)) |  | ||||||
|         else: |  | ||||||
|             self.sendError('Invalid pull request; port id not provided.') |  | ||||||
|  |  | ||||||
|     def list_devices(self, request): |  | ||||||
|         if CAN_ENUMERATE_DEVICES: |  | ||||||
|             devices = self.daq_server.list_devices() |  | ||||||
|             self.sendResponse(Status.OK, data={'devices': devices}) |  | ||||||
|         else: |  | ||||||
|             message = "Server does not support DAQ device enumration" |  | ||||||
|             self.sendResponse(Status.OKISH, message=message) |  | ||||||
|  |  | ||||||
|     def list_ports(self, request): |  | ||||||
|         port_labels = self.daq_server.list_ports() |  | ||||||
|         self.sendResponse(Status.OK, data={'ports': port_labels}) |  | ||||||
|  |  | ||||||
|     def list_port_files(self, request): |  | ||||||
|         port_labels = self.daq_server.list_port_files() |  | ||||||
|         self.sendResponse(Status.OK, data={'ports': port_labels}) |  | ||||||
|  |  | ||||||
|     def terminate(self, request): |  | ||||||
|         status = Status.OK |  | ||||||
|         message = '' |  | ||||||
|         if self.factory.transfer_sessions: |  | ||||||
|             message = 'Terminating with file tranfer sessions in progress. ' |  | ||||||
|             log.warning(message) |  | ||||||
|             for session in self.factory.transfer_sessions: |  | ||||||
|                 self.factory.transferComplete(session) |  | ||||||
|         message += self.daq_server.terminate() or '' |  | ||||||
|         if message: |  | ||||||
|             status = Status.OKISH |  | ||||||
|         self.sendResponse(status, message) |  | ||||||
|  |  | ||||||
|     def sendError(self, message): |  | ||||||
|         log.error(message) |  | ||||||
|         self.sendResponse(Status.ERROR, message) |  | ||||||
|  |  | ||||||
|     def sendResponse(self, status, message=None, data=None): |  | ||||||
|         response = DaqServerResponse(status, message=message, data=data) |  | ||||||
|         self.sendLine(response.serialize()) |  | ||||||
|  |  | ||||||
|     def sendLine(self, line): |  | ||||||
|         log.info('Responding: {}'.format(line)) |  | ||||||
|         LineReceiver.sendLine(self, line.replace('\r\n', '')) |  | ||||||
|  |  | ||||||
|     def _initiate_file_transfer(self, filepath): |  | ||||||
|         sender_factory = FileSenderFactory(filepath, self.factory) |  | ||||||
|         connector = reactor.listenTCP(0, sender_factory) |  | ||||||
|         self.factory.transferInitiated(sender_factory, connector) |  | ||||||
|         return connector.getHost().port |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DaqFactory(Factory): |  | ||||||
|  |  | ||||||
|     protocol = DaqControlProtocol |  | ||||||
|     check_alive_period = 5 * 60 |  | ||||||
|     max_transfer_lifetime = 30 * 60 |  | ||||||
|  |  | ||||||
|     def __init__(self, server, cleanup_period=24 * 60 * 60, cleanup_after_days=5): |  | ||||||
|         self.server = server |  | ||||||
|         self.cleanup_period = cleanup_period |  | ||||||
|         self.cleanup_threshold = timedelta(cleanup_after_days) |  | ||||||
|         self.transfer_sessions = {} |  | ||||||
|  |  | ||||||
|     def buildProtocol(self, addr): |  | ||||||
|         proto = DaqControlProtocol(self.server) |  | ||||||
|         proto.factory = self |  | ||||||
|         reactor.callLater(self.check_alive_period, self.pulse) |  | ||||||
|         reactor.callLater(self.cleanup_period, self.perform_cleanup) |  | ||||||
|         return proto |  | ||||||
|  |  | ||||||
|     def clientConnectionLost(self, connector, reason): |  | ||||||
|         log.msg('client connection lost: {}.'.format(reason)) |  | ||||||
|         if not isinstance(reason, ConnectionLost): |  | ||||||
|             log.msg('ERROR: Client terminated connection mid-transfer.') |  | ||||||
|             for session in self.transfer_sessions: |  | ||||||
|                 self.transferComplete(session) |  | ||||||
|  |  | ||||||
|     def transferInitiated(self, session, connector): |  | ||||||
|         self.transfer_sessions[session] = (time.time(), connector) |  | ||||||
|  |  | ||||||
|     def transferComplete(self, session, reason='OK'): |  | ||||||
|         if reason != 'OK': |  | ||||||
|             log.error(reason) |  | ||||||
|         self.transfer_sessions[session][1].stopListening() |  | ||||||
|         del self.transfer_sessions[session] |  | ||||||
|  |  | ||||||
|     def pulse(self): |  | ||||||
|         """Close down any file tranfer sessions that have been open for too long.""" |  | ||||||
|         current_time = time.time() |  | ||||||
|         for session in self.transfer_sessions: |  | ||||||
|             start_time, conn = self.transfer_sessions[session] |  | ||||||
|             if (current_time - start_time) > self.max_transfer_lifetime: |  | ||||||
|                 message = '{} session on port {} timed out' |  | ||||||
|                 self.transferComplete(session, message.format(session, conn.getHost().port)) |  | ||||||
|         if self.transfer_sessions: |  | ||||||
|             reactor.callLater(self.check_alive_period, self.pulse) |  | ||||||
|  |  | ||||||
|     def perform_cleanup(self): |  | ||||||
|         """ |  | ||||||
|         Cleanup and old uncollected data files to recover disk space. |  | ||||||
|  |  | ||||||
|         """ |  | ||||||
|         log.msg('Performing cleanup of the output directory...') |  | ||||||
|         base_directory = self.server.base_output_directory |  | ||||||
|         current_time = datetime.now() |  | ||||||
|         for entry in os.listdir(base_directory): |  | ||||||
|             entry_path = os.path.join(base_directory, entry) |  | ||||||
|             entry_ctime = datetime.fromtimestamp(os.path.getctime(entry_path)) |  | ||||||
|             existence_time = current_time - entry_ctime |  | ||||||
|             if existence_time > self.cleanup_threshold: |  | ||||||
|                 log.debug('Removing {} (existed for {})'.format(entry, existence_time)) |  | ||||||
|                 shutil.rmtree(entry_path) |  | ||||||
|             else: |  | ||||||
|                 log.debug('Keeping {} (existed for {})'.format(entry, existence_time)) |  | ||||||
|         log.msg('Cleanup complete.') |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '<DAQ {}>'.format(self.server) |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class FileReader(object): |  | ||||||
|  |  | ||||||
|     implements(interfaces.IPushProducer) |  | ||||||
|  |  | ||||||
|     def __init__(self, filepath): |  | ||||||
|         self.fh = open(filepath) |  | ||||||
|         self.proto = None |  | ||||||
|         self.done = False |  | ||||||
|         self._paused = True |  | ||||||
|  |  | ||||||
|     def setProtocol(self, proto): |  | ||||||
|         self.proto = proto |  | ||||||
|  |  | ||||||
|     def resumeProducing(self): |  | ||||||
|         if not self.proto: |  | ||||||
|             raise ProtocolError('resumeProducing called with no protocol set.') |  | ||||||
|         self._paused = False |  | ||||||
|         try: |  | ||||||
|             while not self._paused: |  | ||||||
|                 line = self.fh.next().rstrip('\n') + '\r\n' |  | ||||||
|                 self.proto.transport.write(line) |  | ||||||
|         except StopIteration: |  | ||||||
|             log.debug('Sent everything.') |  | ||||||
|             self.stopProducing() |  | ||||||
|  |  | ||||||
|     def pauseProducing(self): |  | ||||||
|         self._paused = True |  | ||||||
|  |  | ||||||
|     def stopProducing(self): |  | ||||||
|         self.done = True |  | ||||||
|         self.fh.close() |  | ||||||
|         self.proto.transport.unregisterProducer() |  | ||||||
|         self.proto.transport.loseConnection() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class FileSenderProtocol(Protocol): |  | ||||||
|  |  | ||||||
|     def __init__(self, reader): |  | ||||||
|         self.reader = reader |  | ||||||
|         self.factory = None |  | ||||||
|  |  | ||||||
|     def connectionMade(self): |  | ||||||
|         self.transport.registerProducer(self.reader, True) |  | ||||||
|         self.reader.resumeProducing() |  | ||||||
|  |  | ||||||
|     def connectionLost(self, reason=ConnectionDone): |  | ||||||
|         if self.reader.done: |  | ||||||
|             self.factory.transferComplete() |  | ||||||
|         else: |  | ||||||
|             self.reader.pauseProducing() |  | ||||||
|             self.transport.unregisterProducer() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class FileSenderFactory(Factory): |  | ||||||
|  |  | ||||||
|     @property |  | ||||||
|     def done(self): |  | ||||||
|         if self.reader: |  | ||||||
|             return self.reader.done |  | ||||||
|         else: |  | ||||||
|             return None |  | ||||||
|  |  | ||||||
|     def __init__(self, path, owner): |  | ||||||
|         self.path = os.path.abspath(path) |  | ||||||
|         self.reader = None |  | ||||||
|         self.owner = owner |  | ||||||
|  |  | ||||||
|     def buildProtocol(self, addr): |  | ||||||
|         if not self.reader: |  | ||||||
|             self.reader = FileReader(self.path) |  | ||||||
|         proto = FileSenderProtocol(self.reader) |  | ||||||
|         proto.factory = self |  | ||||||
|         self.reader.setProtocol(proto) |  | ||||||
|         return proto |  | ||||||
|  |  | ||||||
|     def transferComplete(self): |  | ||||||
|         self.owner.transferComplete(self) |  | ||||||
|  |  | ||||||
|     def __hash__(self): |  | ||||||
|         return hash(self.path) |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return '<FileSender {}>'.format(self.path) |  | ||||||
|  |  | ||||||
|     __repr__ = __str__ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def run_server(): |  | ||||||
|     parser = argparse.ArgumentParser() |  | ||||||
|     parser.add_argument('-d', '--directory', help='Working directory', metavar='DIR', default='.') |  | ||||||
|     parser.add_argument('-p', '--port', help='port the server will listen on.', |  | ||||||
|                         metavar='PORT', default=45677, type=int) |  | ||||||
|     parser.add_argument('-c', '--cleanup-after', type=int, default=5, metavar='DAYS', |  | ||||||
|                         help=""" |  | ||||||
|                         Sever will perodically clean up data files that are older than the number of |  | ||||||
|                         days specfied by this parameter. |  | ||||||
|                         """) |  | ||||||
|     parser.add_argument('--cleanup-period', type=int, default=1, metavar='DAYS', |  | ||||||
|                         help='Specifies how ofte the server will attempt to clean up old files.') |  | ||||||
|     parser.add_argument('--debug', help='Run in debug mode (no DAQ connected).', |  | ||||||
|                         action='store_true', default=False) |  | ||||||
|     parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False) |  | ||||||
|     args = parser.parse_args() |  | ||||||
|  |  | ||||||
|     if args.debug: |  | ||||||
|         global DaqRunner  # pylint: disable=W0603 |  | ||||||
|         DaqRunner = DummyDaqRunner |  | ||||||
|     else: |  | ||||||
|         if not DaqRunner: |  | ||||||
|             raise __import_error  # pylint: disable=raising-bad-type |  | ||||||
|     if args.verbose or args.debug: |  | ||||||
|         log.start_logging('DEBUG') |  | ||||||
|     else: |  | ||||||
|         log.start_logging('INFO') |  | ||||||
|  |  | ||||||
|     # days to seconds |  | ||||||
|     cleanup_period = args.cleanup_period * 24 * 60 * 60 |  | ||||||
|  |  | ||||||
|     server = DaqServer(args.directory) |  | ||||||
|     factory = DaqFactory(server, cleanup_period, args.cleanup_after) |  | ||||||
|     reactor.listenTCP(args.port, factory).getHost() |  | ||||||
|     try: |  | ||||||
|         hostname = socket.gethostbyname(socket.gethostname()) |  | ||||||
|     except socket.gaierror: |  | ||||||
|         hostname = 'localhost' |  | ||||||
|     log.info('Listening on {}:{}'.format(hostname, args.port)) |  | ||||||
|     reactor.run() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == "__main__": |  | ||||||
|     run_server() |  | ||||||
| @@ -1,3 +0,0 @@ | |||||||
| #!/usr/bin/env python |  | ||||||
| from daqpower.server import run_server |  | ||||||
| run_server() |  | ||||||
| @@ -1,3 +0,0 @@ | |||||||
| #!/usr/bin/env python |  | ||||||
| from daqpower.client import run_send_command |  | ||||||
| run_send_command() |  | ||||||
							
								
								
									
										52
									
								
								wlauto/external/daq_server/src/setup.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										52
									
								
								wlauto/external/daq_server/src/setup.py
									
									
									
									
										vendored
									
									
								
							| @@ -1,52 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import warnings |  | ||||||
| from distutils.core import setup |  | ||||||
|  |  | ||||||
| import daqpower |  | ||||||
|  |  | ||||||
|  |  | ||||||
| warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'") |  | ||||||
|  |  | ||||||
| params = dict( |  | ||||||
|     name='daqpower', |  | ||||||
|     version=daqpower.__version__, |  | ||||||
|     packages=[ |  | ||||||
|         'daqpower', |  | ||||||
|     ], |  | ||||||
|     scripts=[ |  | ||||||
|         'scripts/run-daq-server', |  | ||||||
|         'scripts/send-daq-command', |  | ||||||
|     ], |  | ||||||
|     url='N/A', |  | ||||||
|     maintainer='workload-automation', |  | ||||||
|     maintainer_email='workload-automation@arm.com', |  | ||||||
|     install_requires=[ |  | ||||||
|         'twisted', |  | ||||||
|         'PyDAQmx', |  | ||||||
|     ], |  | ||||||
|     # https://pypi.python.org/pypi?%3Aaction=list_classifiers |  | ||||||
|     classifiers=[ |  | ||||||
|         'Development Status :: 3 - Alpha', |  | ||||||
|         'Environment :: Console', |  | ||||||
|         'License :: Other/Proprietary License', |  | ||||||
|         'Operating System :: Unix', |  | ||||||
|         'Programming Language :: Python :: 2.7', |  | ||||||
|     ], |  | ||||||
| ) |  | ||||||
|  |  | ||||||
| setup(**params) |  | ||||||
							
								
								
									
										7
									
								
								wlauto/external/pmu_logger/Makefile
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								wlauto/external/pmu_logger/Makefile
									
									
									
									
										vendored
									
									
								
							| @@ -1,7 +0,0 @@ | |||||||
| # To build the pmu_logger module use the following command line |  | ||||||
| # make ARCH=arm CROSS_COMPILE=arm-linux-gnueabi- -C ../kernel/out SUBDIRS=$PWD modules |  | ||||||
| # where |  | ||||||
| # CROSS_COMPILE - prefix of the arm linux compiler |  | ||||||
| # -C - location of the configured kernel source tree |  | ||||||
|  |  | ||||||
| obj-m := pmu_logger.o |  | ||||||
							
								
								
									
										35
									
								
								wlauto/external/pmu_logger/README
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										35
									
								
								wlauto/external/pmu_logger/README
									
									
									
									
										vendored
									
									
								
							| @@ -1,35 +0,0 @@ | |||||||
| The pmu_logger module provides the ability to periodically trace CCI PMU counters. The trace destinations can be ftrace buffer and/or kernel logs. This file gives a quick overview of the funcationality provided by the module and how to use it. |  | ||||||
|  |  | ||||||
| The pmu_logger module creates a directory in the debugfs filesystem called cci_pmu_logger which can be used to enable/disable the counters and control the events that are counted. |  | ||||||
|  |  | ||||||
| To configure the events being counted write the corresponding event id to the counter*  files. The list of CCI PMU events can be found at http://arminfo.emea.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0470d/CJHICFBF.html. |  | ||||||
|  |  | ||||||
| The "period_jiffies" can be used to control the periodicity of tracing. It accepts values in kernel jiffies. |  | ||||||
|  |  | ||||||
| To enable tracing, write a 1 to "control". To disable write another 1 to "control". The files "enable_console" and "enable_ftrace" control where the trace is written to. To check if the counters are currently running or not, you can read the control file. |  | ||||||
|  |  | ||||||
| The current values of the counters can be read from the "values" file. |  | ||||||
|  |  | ||||||
| Eg. To trace, A15 and A7 snoop hit rate every 10 jiffies the following command are required -  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| trace-cmd reset |  | ||||||
|  |  | ||||||
| echo 0x63 > counter0 |  | ||||||
| echo 0x6A > counter1 |  | ||||||
| echo 0x83 > counter2 |  | ||||||
| echo 0x8A > counter3 |  | ||||||
|  |  | ||||||
| echo 10 > period_jiffies |  | ||||||
|  |  | ||||||
| trace-cmd start -b 20000 -e "sched:sched_wakeup" |  | ||||||
|  |  | ||||||
| echo 1 > control |  | ||||||
|  |  | ||||||
| # perform the activity for which you would like to collect the CCI PMU trace. |  | ||||||
|  |  | ||||||
| trace-cmd stop && trace-cmd extract |  | ||||||
|  |  | ||||||
| echo 1 > control |  | ||||||
|  |  | ||||||
| trace-cmd report trace.dat | grep print # shows the trace of the CCI PMU counters along with the cycle counter values. |  | ||||||
							
								
								
									
										294
									
								
								wlauto/external/pmu_logger/pmu_logger.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										294
									
								
								wlauto/external/pmu_logger/pmu_logger.c
									
									
									
									
										vendored
									
									
								
							| @@ -1,294 +0,0 @@ | |||||||
| /*    Copyright 2013-2015 ARM Limited |  | ||||||
|  * |  | ||||||
|  * Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
|  * you may not use this file except in compliance with the License. |  | ||||||
|  * You may obtain a copy of the License at |  | ||||||
|  * |  | ||||||
|  *     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
|  * |  | ||||||
|  * Unless required by applicable law or agreed to in writing, software |  | ||||||
|  * distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
|  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
|  * See the License for the specific language governing permissions and |  | ||||||
|  * limitations under the License. |  | ||||||
| */ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| /* |  | ||||||
|  * pmu_logger.c - Kernel module to log the CCI PMU counters |  | ||||||
|  */ |  | ||||||
|  |  | ||||||
| #include <linux/init.h> |  | ||||||
| #include <linux/kernel.h> |  | ||||||
| #include <linux/module.h> |  | ||||||
| #include <linux/debugfs.h> |  | ||||||
| #include <linux/timer.h> |  | ||||||
| #include <asm/io.h> |  | ||||||
|  |  | ||||||
| #define MODULE_NAME "cci_pmu_logger" |  | ||||||
|  |  | ||||||
| // CCI_BASE needs to be modified to point to the mapped location of CCI in |  | ||||||
| // memory on your device. |  | ||||||
| #define CCI_BASE 0x2C090000  // TC2 |  | ||||||
| //#define CCI_BASE 0x10D20000  |  | ||||||
| #define CCI_SIZE 0x00010000 |  | ||||||
|  |  | ||||||
| #define PMCR 0x100 |  | ||||||
|  |  | ||||||
| #define PMCR_CEN (1 << 0) |  | ||||||
| #define PMCR_RST (1 << 1) |  | ||||||
| #define PMCR_CCR (1 << 2) |  | ||||||
| #define PMCR_CCD (1 << 3) |  | ||||||
| #define PMCR_EX  (1 << 4) |  | ||||||
| #define PMCR_DP  (1 << 5) |  | ||||||
|  |  | ||||||
| #define CC_BASE  0x9000 |  | ||||||
| #define PC0_BASE 0xA000 |  | ||||||
| #define PC1_BASE 0xB000 |  | ||||||
| #define PC2_BASE 0xC000 |  | ||||||
| #define PC3_BASE 0xD000 |  | ||||||
|  |  | ||||||
| #define PC_ESR      0x0 |  | ||||||
| #define CNT_VALUE   0x4 |  | ||||||
| #define CNT_CONTROL 0x8 |  | ||||||
|  |  | ||||||
| #define CNT_ENABLE (1 << 0) |  | ||||||
|  |  | ||||||
| u32 counter0_event = 0x6A; |  | ||||||
| u32 counter1_event = 0x63; |  | ||||||
| u32 counter2_event = 0x8A; |  | ||||||
| u32 counter3_event = 0x83; |  | ||||||
|  |  | ||||||
| u32 enable_console = 0; |  | ||||||
| u32 enable_ftrace = 1; |  | ||||||
|  |  | ||||||
| void *cci_base = 0; |  | ||||||
|  |  | ||||||
| static struct dentry *module_debugfs_root; |  | ||||||
| static int enabled = false; |  | ||||||
|  |  | ||||||
| u32 delay = 10; //jiffies. This translates to 1 sample every 100 ms |  | ||||||
| struct timer_list timer; |  | ||||||
|  |  | ||||||
| static void call_after_delay(void) |  | ||||||
| { |  | ||||||
| 	timer.expires = jiffies + delay; |  | ||||||
| 	add_timer(&timer); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| 	 |  | ||||||
| static void setup_and_call_after_delay(void (*fn)(unsigned long)) |  | ||||||
| { |  | ||||||
| 	init_timer(&timer); |  | ||||||
| 	timer.data = (unsigned long)&timer; |  | ||||||
| 	timer.function = fn; |  | ||||||
|  |  | ||||||
| 	call_after_delay(); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| static void print_counter_configuration(void) |  | ||||||
| { |  | ||||||
| 	if (enable_ftrace) |  | ||||||
| 		trace_printk("Counter_0: %02x Counter_1: %02x Counter_2: %02x Counter_3: %02x\n", \ |  | ||||||
| 			     counter0_event, counter1_event, counter2_event, counter3_event); |  | ||||||
|  |  | ||||||
| 	if (enable_console) |  | ||||||
| 		printk("Counter_0: %02x Counter_1: %02x Counter_2: %02x Counter_3: %02x\n", \ |  | ||||||
| 		       counter0_event, counter1_event, counter2_event, counter3_event); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| static void initialize_cci_pmu(void) |  | ||||||
| { |  | ||||||
| 	u32 val; |  | ||||||
|  |  | ||||||
| 	// Select the events counted |  | ||||||
| 	iowrite32(counter0_event, cci_base + PC0_BASE + PC_ESR); |  | ||||||
| 	iowrite32(counter1_event, cci_base + PC1_BASE + PC_ESR); |  | ||||||
| 	iowrite32(counter2_event, cci_base + PC2_BASE + PC_ESR); |  | ||||||
| 	iowrite32(counter3_event, cci_base + PC3_BASE + PC_ESR); |  | ||||||
|  |  | ||||||
| 	// Enable the individual PMU counters |  | ||||||
| 	iowrite32(CNT_ENABLE, cci_base + PC0_BASE + CNT_CONTROL); |  | ||||||
| 	iowrite32(CNT_ENABLE, cci_base + PC1_BASE + CNT_CONTROL); |  | ||||||
| 	iowrite32(CNT_ENABLE, cci_base + PC2_BASE + CNT_CONTROL); |  | ||||||
| 	iowrite32(CNT_ENABLE, cci_base + PC3_BASE + CNT_CONTROL); |  | ||||||
| 	iowrite32(CNT_ENABLE, cci_base + CC_BASE + CNT_CONTROL); |  | ||||||
|  |  | ||||||
| 	// Reset the counters and configure the Cycle Count Divider |  | ||||||
| 	val = ioread32(cci_base + PMCR); |  | ||||||
| 	iowrite32(val | PMCR_RST | PMCR_CCR | PMCR_CCD, cci_base + PMCR); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| static void enable_cci_pmu_counters(void) |  | ||||||
| { |  | ||||||
| 	u32 val = ioread32(cci_base + PMCR); |  | ||||||
| 	iowrite32(val | PMCR_CEN, cci_base + PMCR); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| static void disable_cci_pmu_counters(void) |  | ||||||
| { |  | ||||||
| 	u32 val = ioread32(cci_base + PMCR); |  | ||||||
| 	iowrite32(val & ~PMCR_CEN, cci_base + PMCR); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| static void trace_values(unsigned long arg) |  | ||||||
| { |  | ||||||
| 	u32 cycles; |  | ||||||
| 	u32 counter[4]; |  | ||||||
|  |  | ||||||
| 	cycles = ioread32(cci_base + CC_BASE + CNT_VALUE); |  | ||||||
| 	counter[0] = ioread32(cci_base + PC0_BASE + CNT_VALUE); |  | ||||||
| 	counter[1] = ioread32(cci_base + PC1_BASE + CNT_VALUE); |  | ||||||
| 	counter[2] = ioread32(cci_base + PC2_BASE + CNT_VALUE); |  | ||||||
| 	counter[3] = ioread32(cci_base + PC3_BASE + CNT_VALUE); |  | ||||||
|  |  | ||||||
| 	if (enable_ftrace) |  | ||||||
| 		trace_printk("Cycles: %08x Counter_0: %08x" |  | ||||||
| 			     " Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \ |  | ||||||
| 			     cycles, counter[0], counter[1], counter[2], counter[3]); |  | ||||||
|  |  | ||||||
| 	if (enable_console) |  | ||||||
| 		printk("Cycles: %08x Counter_0: %08x" |  | ||||||
| 		       " Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \ |  | ||||||
| 		       cycles, counter[0], counter[1], counter[2], counter[3]); |  | ||||||
|  |  | ||||||
| 	if (enabled) { |  | ||||||
| 		u32 val; |  | ||||||
| 		// Reset the counters |  | ||||||
| 		val = ioread32(cci_base + PMCR); |  | ||||||
| 		iowrite32(val | PMCR_RST | PMCR_CCR, cci_base + PMCR); |  | ||||||
|  |  | ||||||
| 		call_after_delay(); |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
|  |  | ||||||
| static ssize_t read_control(struct file *file, char __user *buf, size_t count, loff_t *ppos) |  | ||||||
| { |  | ||||||
| 	char status[16]; |  | ||||||
| 	/* printk(KERN_DEBUG "%s\n", __func__); */ |  | ||||||
|  |  | ||||||
| 	if (enabled) |  | ||||||
| 		snprintf(status, 16, "enabled\n"); |  | ||||||
| 	else |  | ||||||
| 		snprintf(status, 16, "disabled\n"); |  | ||||||
|  |  | ||||||
| 	return simple_read_from_buffer(buf, count, ppos, status, strlen(status)); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| static ssize_t write_control(struct file *file, const char __user *buf, size_t count, loff_t *ppos) |  | ||||||
| { |  | ||||||
| 	if (enabled) { |  | ||||||
| 		disable_cci_pmu_counters(); |  | ||||||
| 		enabled = false; |  | ||||||
| 	} else { |  | ||||||
| 		initialize_cci_pmu(); |  | ||||||
| 		enable_cci_pmu_counters(); |  | ||||||
| 		enabled = true; |  | ||||||
|  |  | ||||||
| 		print_counter_configuration(); |  | ||||||
| 		setup_and_call_after_delay(trace_values); |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	return count; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| static ssize_t read_values(struct file *file, char __user *buf, size_t count, loff_t *ppos) |  | ||||||
| { |  | ||||||
| 	char values[256]; |  | ||||||
| 	/* u32 val; */ |  | ||||||
|  |  | ||||||
| 	snprintf(values, 256, "Cycles: %08x Counter_0: %08x" |  | ||||||
| 		 " Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \ |  | ||||||
| 		 ioread32(cci_base + CC_BASE + CNT_VALUE),  \ |  | ||||||
| 		 ioread32(cci_base + PC0_BASE + CNT_VALUE), \ |  | ||||||
| 		 ioread32(cci_base + PC1_BASE + CNT_VALUE), \ |  | ||||||
| 		 ioread32(cci_base + PC2_BASE + CNT_VALUE), \ |  | ||||||
| 		 ioread32(cci_base + PC3_BASE + CNT_VALUE)); |  | ||||||
|  |  | ||||||
| 	return simple_read_from_buffer(buf, count, ppos, values, strlen(values)); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| static const struct file_operations control_fops = { |  | ||||||
| 	.owner = THIS_MODULE, |  | ||||||
| 	.read = read_control, |  | ||||||
| 	.write = write_control, |  | ||||||
| }; |  | ||||||
|  |  | ||||||
| static const struct file_operations value_fops = { |  | ||||||
| 	.owner = THIS_MODULE, |  | ||||||
| 	.read = read_values, |  | ||||||
| }; |  | ||||||
|  |  | ||||||
| static int __init pmu_logger_init(void) |  | ||||||
| { |  | ||||||
| 	struct dentry *retval; |  | ||||||
| 	 |  | ||||||
| 	module_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL); |  | ||||||
| 	if (!module_debugfs_root || IS_ERR(module_debugfs_root)) { |  | ||||||
| 		printk(KERN_ERR "error creating debugfs dir.\n"); |  | ||||||
| 		goto out; |  | ||||||
| 	} |  | ||||||
| 	 |  | ||||||
| 	retval = debugfs_create_file("control", S_IRUGO | S_IWUGO, module_debugfs_root, NULL, &control_fops); |  | ||||||
| 	if (!retval) |  | ||||||
| 		goto out; |  | ||||||
|  |  | ||||||
| 	retval = debugfs_create_file("values", S_IRUGO, module_debugfs_root, NULL, &value_fops); |  | ||||||
| 	if (!retval) |  | ||||||
| 		goto out; |  | ||||||
|  |  | ||||||
| 	retval = debugfs_create_bool("enable_console", S_IRUGO | S_IWUGO, module_debugfs_root, &enable_console); |  | ||||||
| 	if (!retval) |  | ||||||
| 		goto out; |  | ||||||
|  |  | ||||||
| 	retval = debugfs_create_bool("enable_ftrace", S_IRUGO | S_IWUGO, module_debugfs_root, &enable_ftrace); |  | ||||||
| 	if (!retval) |  | ||||||
| 		goto out; |  | ||||||
|  |  | ||||||
| 	retval = debugfs_create_u32("period_jiffies", S_IRUGO | S_IWUGO, module_debugfs_root, &delay); |  | ||||||
| 	if (!retval) |  | ||||||
| 		goto out; |  | ||||||
|  |  | ||||||
| 	retval = debugfs_create_x32("counter0", S_IRUGO | S_IWUGO, module_debugfs_root, &counter0_event); |  | ||||||
| 	if (!retval) |  | ||||||
| 		goto out; |  | ||||||
| 	retval = debugfs_create_x32("counter1", S_IRUGO | S_IWUGO, module_debugfs_root, &counter1_event); |  | ||||||
| 	if (!retval) |  | ||||||
| 		goto out; |  | ||||||
| 	retval = debugfs_create_x32("counter2", S_IRUGO | S_IWUGO, module_debugfs_root, &counter2_event); |  | ||||||
| 	if (!retval) |  | ||||||
| 		goto out; |  | ||||||
| 	retval = debugfs_create_x32("counter3", S_IRUGO | S_IWUGO, module_debugfs_root, &counter3_event); |  | ||||||
| 	if (!retval) |  | ||||||
| 		goto out; |  | ||||||
|  |  | ||||||
| 	cci_base = ioremap(CCI_BASE, CCI_SIZE); |  | ||||||
| 	if (!cci_base) |  | ||||||
| 		goto out; |  | ||||||
|  |  | ||||||
| 	printk(KERN_INFO "CCI PMU Logger loaded.\n"); |  | ||||||
| 	return 0; |  | ||||||
| 	 |  | ||||||
| out: |  | ||||||
| 	debugfs_remove_recursive(module_debugfs_root); |  | ||||||
| 	return 1; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| static void __exit pmu_logger_exit(void) |  | ||||||
| { |  | ||||||
| 	if (module_debugfs_root) { |  | ||||||
| 		debugfs_remove_recursive(module_debugfs_root); |  | ||||||
| 		module_debugfs_root = NULL; |  | ||||||
| 	} |  | ||||||
| 	if (cci_base) |  | ||||||
| 		iounmap(cci_base); |  | ||||||
|  |  | ||||||
| 	printk(KERN_INFO "CCI PMU Logger removed.\n"); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| module_init(pmu_logger_init); |  | ||||||
| module_exit(pmu_logger_exit); |  | ||||||
|  |  | ||||||
| MODULE_LICENSE("GPL"); |  | ||||||
| MODULE_AUTHOR("Punit Agrawal"); |  | ||||||
| MODULE_DESCRIPTION("logger for CCI PMU counters"); |  | ||||||
							
								
								
									
										
											BIN
										
									
								
								wlauto/external/pmu_logger/pmu_logger.ko
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								wlauto/external/pmu_logger/pmu_logger.ko
									
									
									
									
										vendored
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										11
									
								
								wlauto/external/readenergy/Makefile
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								wlauto/external/readenergy/Makefile
									
									
									
									
										vendored
									
									
								
							| @@ -1,11 +0,0 @@ | |||||||
| # To build: |  | ||||||
| # |  | ||||||
| # CROSS_COMPILE=aarch64-linux-gnu- make |  | ||||||
| # |  | ||||||
| CROSS_COMPILE?=aarch64-linux-gnu- |  | ||||||
| CC=$(CROSS_COMPILE)gcc |  | ||||||
| CFLAGS='-Wl,-static -Wl,-lc' |  | ||||||
|  |  | ||||||
| readenergy: readenergy.c |  | ||||||
| 	$(CC) $(CFLAGS) readenergy.c -o readenergy |  | ||||||
| 	cp readenergy ../../instrumentation/juno_energy/readenergy |  | ||||||
							
								
								
									
										
											BIN
										
									
								
								wlauto/external/readenergy/readenergy
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								wlauto/external/readenergy/readenergy
									
									
									
									
										vendored
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										345
									
								
								wlauto/external/readenergy/readenergy.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										345
									
								
								wlauto/external/readenergy/readenergy.c
									
									
									
									
										vendored
									
									
								
							| @@ -1,345 +0,0 @@ | |||||||
| /*    Copyright 2014-2015 ARM Limited |  | ||||||
|  * |  | ||||||
|  * Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
|  * you may not use this file except in compliance with the License. |  | ||||||
|  * You may obtain a copy of the License at |  | ||||||
|  * |  | ||||||
|  *     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
|  * |  | ||||||
|  * Unless required by applicable law or agreed to in writing, software |  | ||||||
|  * distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
|  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
|  * See the License for the specific language governing permissions and |  | ||||||
|  * limitations under the License. |  | ||||||
| */ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| /* |  | ||||||
|  * readenergy.c  |  | ||||||
|  * |  | ||||||
|  * Reads APB energy registers in Juno and outputs the measurements (converted to appropriate units). |  | ||||||
|  * |  | ||||||
| */ |  | ||||||
| #include <errno.h> |  | ||||||
| #include <fcntl.h> |  | ||||||
| #include <stdint.h> |  | ||||||
| #include <stdio.h> |  | ||||||
| #include <stdlib.h> |  | ||||||
| #include <string.h> |  | ||||||
| #include <signal.h> |  | ||||||
| #include <sys/mman.h> |  | ||||||
| #include <sys/stat.h> |  | ||||||
| #include <sys/types.h> |  | ||||||
| #include <time.h> |  | ||||||
| #include <unistd.h> |  | ||||||
|  |  | ||||||
| // The following values obtained from Juno TRM 2014/03/04 section 4.5 |  | ||||||
|  |  | ||||||
| // Location of APB registers in memory |  | ||||||
| #define APB_BASE_MEMORY 0x1C010000 |  | ||||||
| // APB energy counters start at offset 0xD0 from the base APB address. |  | ||||||
| #define BASE_INDEX 0xD0 / 4 |  | ||||||
| // the one-past last APB counter |  | ||||||
| #define APB_SIZE 0x120 |  | ||||||
|  |  | ||||||
| // Masks specifying the bits that contain the actual counter values |  | ||||||
| #define CMASK 0xFFF |  | ||||||
| #define VMASK 0xFFF |  | ||||||
| #define PMASK 0xFFFFFF |  | ||||||
|  |  | ||||||
| // Sclaing factor (divisor) or getting measured values from counters |  | ||||||
| #define SYS_ADC_CH0_PM1_SYS_SCALE 761 |  | ||||||
| #define SYS_ADC_CH1_PM2_A57_SCALE 381 |  | ||||||
| #define SYS_ADC_CH2_PM3_A53_SCALE 761 |  | ||||||
| #define SYS_ADC_CH3_PM4_GPU_SCALE 381 |  | ||||||
| #define SYS_ADC_CH4_VSYS_SCALE 1622 |  | ||||||
| #define SYS_ADC_CH5_VA57_SCALE 1622 |  | ||||||
| #define SYS_ADC_CH6_VA53_SCALE 1622 |  | ||||||
| #define SYS_ADC_CH7_VGPU_SCALE 1622 |  | ||||||
| #define SYS_POW_CH04_SYS_SCALE (SYS_ADC_CH0_PM1_SYS_SCALE * SYS_ADC_CH4_VSYS_SCALE) |  | ||||||
| #define SYS_POW_CH15_A57_SCALE (SYS_ADC_CH1_PM2_A57_SCALE * SYS_ADC_CH5_VA57_SCALE) |  | ||||||
| #define SYS_POW_CH26_A53_SCALE (SYS_ADC_CH2_PM3_A53_SCALE * SYS_ADC_CH6_VA53_SCALE) |  | ||||||
| #define SYS_POW_CH37_GPU_SCALE (SYS_ADC_CH3_PM4_GPU_SCALE * SYS_ADC_CH7_VGPU_SCALE) |  | ||||||
| #define SYS_ENM_CH0_SYS_SCALE 12348030000 |  | ||||||
| #define SYS_ENM_CH1_A57_SCALE 6174020000 |  | ||||||
| #define SYS_ENM_CH0_A53_SCALE 12348030000 |  | ||||||
| #define SYS_ENM_CH0_GPU_SCALE 6174020000 |  | ||||||
|  |  | ||||||
| // Original values prior to re-callibrations. |  | ||||||
| /*#define SYS_ADC_CH0_PM1_SYS_SCALE 819.2*/ |  | ||||||
| /*#define SYS_ADC_CH1_PM2_A57_SCALE 409.6*/ |  | ||||||
| /*#define SYS_ADC_CH2_PM3_A53_SCALE 819.2*/ |  | ||||||
| /*#define SYS_ADC_CH3_PM4_GPU_SCALE 409.6*/ |  | ||||||
| /*#define SYS_ADC_CH4_VSYS_SCALE 1638.4*/ |  | ||||||
| /*#define SYS_ADC_CH5_VA57_SCALE 1638.4*/ |  | ||||||
| /*#define SYS_ADC_CH6_VA53_SCALE 1638.4*/ |  | ||||||
| /*#define SYS_ADC_CH7_VGPU_SCALE 1638.4*/ |  | ||||||
| /*#define SYS_POW_CH04_SYS_SCALE (SYS_ADC_CH0_PM1_SYS_SCALE * SYS_ADC_CH4_VSYS_SCALE)*/ |  | ||||||
| /*#define SYS_POW_CH15_A57_SCALE (SYS_ADC_CH1_PM2_A57_SCALE * SYS_ADC_CH5_VA57_SCALE)*/ |  | ||||||
| /*#define SYS_POW_CH26_A53_SCALE (SYS_ADC_CH2_PM3_A53_SCALE * SYS_ADC_CH6_VA53_SCALE)*/ |  | ||||||
| /*#define SYS_POW_CH37_GPU_SCALE (SYS_ADC_CH3_PM4_GPU_SCALE * SYS_ADC_CH7_VGPU_SCALE)*/ |  | ||||||
| /*#define SYS_ENM_CH0_SYS_SCALE 13421772800.0*/ |  | ||||||
| /*#define SYS_ENM_CH1_A57_SCALE 6710886400.0*/ |  | ||||||
| /*#define SYS_ENM_CH0_A53_SCALE 13421772800.0*/ |  | ||||||
| /*#define SYS_ENM_CH0_GPU_SCALE 6710886400.0*/ |  | ||||||
|  |  | ||||||
| // Ignore individual errors but if see too many, abort. |  | ||||||
| #define ERROR_THRESHOLD 10 |  | ||||||
|  |  | ||||||
| // Default counter poll period (in milliseconds). |  | ||||||
| #define DEFAULT_PERIOD 100 |  | ||||||
|  |  | ||||||
| // A single reading from the energy meter. The values are the proper readings converted |  | ||||||
| // to appropriate units (e.g. Watts for power); they are *not* raw counter values. |  | ||||||
| struct reading |  | ||||||
| { |  | ||||||
| 	double sys_adc_ch0_pm1_sys; |  | ||||||
| 	double sys_adc_ch1_pm2_a57; |  | ||||||
| 	double sys_adc_ch2_pm3_a53; |  | ||||||
| 	double sys_adc_ch3_pm4_gpu; |  | ||||||
| 	double sys_adc_ch4_vsys; |  | ||||||
| 	double sys_adc_ch5_va57; |  | ||||||
| 	double sys_adc_ch6_va53; |  | ||||||
| 	double sys_adc_ch7_vgpu; |  | ||||||
| 	double sys_pow_ch04_sys; |  | ||||||
| 	double sys_pow_ch15_a57; |  | ||||||
| 	double sys_pow_ch26_a53; |  | ||||||
| 	double sys_pow_ch37_gpu; |  | ||||||
| 	double sys_enm_ch0_sys; |  | ||||||
| 	double sys_enm_ch1_a57; |  | ||||||
| 	double sys_enm_ch0_a53; |  | ||||||
| 	double sys_enm_ch0_gpu; |  | ||||||
| }; |  | ||||||
|  |  | ||||||
| inline uint64_t join_64bit_register(uint32_t *buffer, int index) |  | ||||||
| { |  | ||||||
| 	uint64_t result = 0; |  | ||||||
| 	result |= buffer[index]; |  | ||||||
| 	result |= (uint64_t)(buffer[index+1]) << 32; |  | ||||||
| 	return result; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| int nsleep(const struct timespec *req, struct timespec *rem) |  | ||||||
| { |  | ||||||
| 	struct timespec temp_rem; |  | ||||||
| 	if (nanosleep(req, rem) == -1) |  | ||||||
| 	{ |  | ||||||
| 		if (errno == EINTR) |  | ||||||
| 		{ |  | ||||||
| 			nsleep(rem, &temp_rem); |  | ||||||
| 		} |  | ||||||
| 		else |  | ||||||
| 		{ |  | ||||||
| 			return errno; |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 	else |  | ||||||
| 	{ |  | ||||||
| 		return 0; |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
|   |  | ||||||
| void print_help() |  | ||||||
| { |  | ||||||
| 	fprintf(stderr, "Usage: readenergy [-t PERIOD] -o OUTFILE\n\n" |  | ||||||
| 			"Read Juno energy counters every PERIOD milliseconds, writing them\n" |  | ||||||
| 			"to OUTFILE in CSV format until SIGTERM is received.\n\n" |  | ||||||
| 			"Parameters:\n" |  | ||||||
| 			"	PERIOD is the counter poll period in milliseconds.\n" |  | ||||||
| 			"	       (Defaults to 100 milliseconds.)\n" |  | ||||||
| 			"	OUTFILE is the output file path\n"); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| // debugging only... |  | ||||||
| inline void dprint(char *msg) |  | ||||||
| { |  | ||||||
| 	fprintf(stderr, "%s\n", msg); |  | ||||||
| 	sync(); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| // -------------------------------------- config ---------------------------------------------------- |  | ||||||
|  |  | ||||||
| struct config |  | ||||||
| { |  | ||||||
| 	struct timespec period; |  | ||||||
| 	char *output_file; |  | ||||||
| }; |  | ||||||
|  |  | ||||||
| void config_init_period_from_millis(struct config *this, long millis) |  | ||||||
| { |  | ||||||
| 	this->period.tv_sec = (time_t)(millis / 1000); |  | ||||||
| 	this->period.tv_nsec = (millis % 1000) * 1000000; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| void config_init(struct config *this, int argc, char *argv[]) |  | ||||||
| { |  | ||||||
| 	this->output_file = NULL; |  | ||||||
| 	config_init_period_from_millis(this, DEFAULT_PERIOD); |  | ||||||
|  |  | ||||||
| 	int opt; |  | ||||||
| 	while ((opt = getopt(argc, argv, "ht:o:")) != -1) |  | ||||||
| 	{ |  | ||||||
| 		switch(opt) |  | ||||||
| 		{ |  | ||||||
| 			case 't': |  | ||||||
| 				config_init_period_from_millis(this, atol(optarg)); |  | ||||||
| 				break; |  | ||||||
| 			case 'o': |  | ||||||
| 				this->output_file = optarg; |  | ||||||
| 				break; |  | ||||||
| 			case 'h': |  | ||||||
| 				print_help(); |  | ||||||
| 				exit(EXIT_SUCCESS); |  | ||||||
| 				break; |  | ||||||
| 			default: |  | ||||||
| 				fprintf(stderr, "ERROR: Unexpected option %s\n\n", opt); |  | ||||||
| 				print_help(); |  | ||||||
| 				exit(EXIT_FAILURE); |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	if (this->output_file == NULL) |  | ||||||
| 	{ |  | ||||||
| 		fprintf(stderr, "ERROR: Mandatory -o option not specified.\n\n"); |  | ||||||
| 		print_help(); |  | ||||||
| 		exit(EXIT_FAILURE); |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
|  |  | ||||||
| // -------------------------------------- /config --------------------------------------------------- |  | ||||||
|  |  | ||||||
| // -------------------------------------- emeter ---------------------------------------------------- |  | ||||||
|  |  | ||||||
| struct emeter |  | ||||||
| { |  | ||||||
| 	int fd; |  | ||||||
| 	FILE *out; |  | ||||||
| 	void *mmap_base; |  | ||||||
| }; |  | ||||||
|  |  | ||||||
| void emeter_init(struct emeter *this, char *outfile) |  | ||||||
| { |  | ||||||
| 	this->out = fopen(outfile, "w"); |  | ||||||
| 	if (this->out == NULL) |  | ||||||
| 	{ |  | ||||||
| 		fprintf(stderr, "ERROR: Could not open output file %s; got %s\n", outfile, strerror(errno)); |  | ||||||
| 		exit(EXIT_FAILURE); |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
|         this->fd = open("/dev/mem", O_RDONLY); |  | ||||||
|         if(this->fd < 0) |  | ||||||
|         { |  | ||||||
|                 fprintf(stderr, "ERROR: Can't open /dev/mem; got %s\n", strerror(errno)); |  | ||||||
| 		fclose(this->out); |  | ||||||
| 		exit(EXIT_FAILURE); |  | ||||||
|         } |  | ||||||
|  |  | ||||||
| 	this->mmap_base = mmap(NULL, APB_SIZE, PROT_READ, MAP_SHARED, this->fd, APB_BASE_MEMORY); |  | ||||||
| 	if (this->mmap_base == MAP_FAILED) |  | ||||||
| 	{ |  | ||||||
| 		fprintf(stderr, "ERROR: mmap failed; got %s\n", strerror(errno)); |  | ||||||
| 		close(this->fd); |  | ||||||
| 		fclose(this->out); |  | ||||||
| 		exit(EXIT_FAILURE); |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	fprintf(this->out, "sys_curr,a57_curr,a53_curr,gpu_curr," |  | ||||||
|  			   "sys_volt,a57_volt,a53_volt,gpu_volt," |  | ||||||
| 			   "sys_pow,a57_pow,a53_pow,gpu_pow," |  | ||||||
| 			   "sys_cenr,a57_cenr,a53_cenr,gpu_cenr\n"); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| void emeter_read_measurements(struct emeter *this, struct reading *reading) |  | ||||||
| { |  | ||||||
| 	uint32_t *buffer = (uint32_t *)this->mmap_base; |  | ||||||
| 	reading->sys_adc_ch0_pm1_sys = (double)(CMASK & buffer[BASE_INDEX+0]) / SYS_ADC_CH0_PM1_SYS_SCALE; |  | ||||||
| 	reading->sys_adc_ch1_pm2_a57 = (double)(CMASK & buffer[BASE_INDEX+1]) / SYS_ADC_CH1_PM2_A57_SCALE; |  | ||||||
| 	reading->sys_adc_ch2_pm3_a53 = (double)(CMASK & buffer[BASE_INDEX+2]) / SYS_ADC_CH2_PM3_A53_SCALE; |  | ||||||
| 	reading->sys_adc_ch3_pm4_gpu = (double)(CMASK & buffer[BASE_INDEX+3]) / SYS_ADC_CH3_PM4_GPU_SCALE; |  | ||||||
| 	reading->sys_adc_ch4_vsys = (double)(VMASK & buffer[BASE_INDEX+4]) / SYS_ADC_CH4_VSYS_SCALE; |  | ||||||
| 	reading->sys_adc_ch5_va57 = (double)(VMASK & buffer[BASE_INDEX+5]) / SYS_ADC_CH5_VA57_SCALE; |  | ||||||
| 	reading->sys_adc_ch6_va53 = (double)(VMASK & buffer[BASE_INDEX+6]) / SYS_ADC_CH6_VA53_SCALE; |  | ||||||
| 	reading->sys_adc_ch7_vgpu = (double)(VMASK & buffer[BASE_INDEX+7]) / SYS_ADC_CH7_VGPU_SCALE; |  | ||||||
| 	reading->sys_pow_ch04_sys = (double)(PMASK & buffer[BASE_INDEX+8]) / SYS_POW_CH04_SYS_SCALE; |  | ||||||
| 	reading->sys_pow_ch15_a57 = (double)(PMASK & buffer[BASE_INDEX+9]) / SYS_POW_CH15_A57_SCALE; |  | ||||||
| 	reading->sys_pow_ch26_a53 = (double)(PMASK & buffer[BASE_INDEX+10]) / SYS_POW_CH26_A53_SCALE; |  | ||||||
| 	reading->sys_pow_ch37_gpu = (double)(PMASK & buffer[BASE_INDEX+11]) / SYS_POW_CH37_GPU_SCALE; |  | ||||||
| 	reading->sys_enm_ch0_sys = (double)join_64bit_register(buffer, BASE_INDEX+12) / SYS_ENM_CH0_SYS_SCALE; |  | ||||||
| 	reading->sys_enm_ch1_a57 = (double)join_64bit_register(buffer, BASE_INDEX+14) / SYS_ENM_CH1_A57_SCALE; |  | ||||||
| 	reading->sys_enm_ch0_a53 = (double)join_64bit_register(buffer, BASE_INDEX+16) / SYS_ENM_CH0_A53_SCALE; |  | ||||||
| 	reading->sys_enm_ch0_gpu = (double)join_64bit_register(buffer, BASE_INDEX+18) / SYS_ENM_CH0_GPU_SCALE; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| void emeter_take_reading(struct emeter *this) |  | ||||||
| { |  | ||||||
| 	static struct reading reading; |  | ||||||
| 	int error_count = 0; |  | ||||||
| 	emeter_read_measurements(this, &reading); |  | ||||||
| 	int ret = fprintf(this->out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n", |  | ||||||
| 			reading.sys_adc_ch0_pm1_sys, |  | ||||||
| 			reading.sys_adc_ch1_pm2_a57, |  | ||||||
| 			reading.sys_adc_ch2_pm3_a53, |  | ||||||
| 			reading.sys_adc_ch3_pm4_gpu, |  | ||||||
| 			reading.sys_adc_ch4_vsys, |  | ||||||
| 			reading.sys_adc_ch5_va57, |  | ||||||
| 			reading.sys_adc_ch6_va53, |  | ||||||
| 			reading.sys_adc_ch7_vgpu, |  | ||||||
| 			reading.sys_pow_ch04_sys, |  | ||||||
| 			reading.sys_pow_ch15_a57, |  | ||||||
| 			reading.sys_pow_ch26_a53, |  | ||||||
| 			reading.sys_pow_ch37_gpu, |  | ||||||
| 			reading.sys_enm_ch0_sys, |  | ||||||
| 			reading.sys_enm_ch1_a57, |  | ||||||
| 			reading.sys_enm_ch0_a53, |  | ||||||
| 			reading.sys_enm_ch0_gpu); |  | ||||||
| 	if (ret < 0) |  | ||||||
| 	{ |  | ||||||
| 		fprintf(stderr, "ERROR: while writing a meter reading: %s\n", strerror(errno)); |  | ||||||
| 		if (++error_count > ERROR_THRESHOLD) |  | ||||||
| 			exit(EXIT_FAILURE); |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
|  |  | ||||||
| void emeter_finalize(struct emeter *this) |  | ||||||
| { |  | ||||||
| 	if (munmap(this->mmap_base, APB_SIZE) == -1)  |  | ||||||
| 	{ |  | ||||||
| 		// Report the error but don't bother doing anything else, as we're not gonna do  |  | ||||||
| 		// anything with emeter after this point anyway. |  | ||||||
| 		fprintf(stderr, "ERROR: munmap failed; got %s\n", strerror(errno)); |  | ||||||
| 	} |  | ||||||
| 	close(this->fd); |  | ||||||
| 	fclose(this->out); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| // -------------------------------------- /emeter ---------------------------------------------------- |  | ||||||
|  |  | ||||||
| int done = 0; |  | ||||||
|  |  | ||||||
| void term_handler(int signum) |  | ||||||
| { |  | ||||||
| 	done = 1; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| int main(int argc, char *argv[]) |  | ||||||
| { |  | ||||||
| 	struct sigaction action; |  | ||||||
| 	memset(&action, 0, sizeof(struct sigaction)); |  | ||||||
| 	action.sa_handler = term_handler; |  | ||||||
| 	sigaction(SIGTERM, &action, NULL); |  | ||||||
|  |  | ||||||
| 	struct config config; |  | ||||||
| 	struct emeter emeter; |  | ||||||
| 	config_init(&config, argc, argv); |  | ||||||
| 	emeter_init(&emeter, config.output_file); |  | ||||||
|  |  | ||||||
| 	struct timespec remaining; |  | ||||||
| 	while (!done)  |  | ||||||
| 	{ |  | ||||||
| 		emeter_take_reading(&emeter); |  | ||||||
| 		nsleep(&config.period, &remaining); |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	emeter_finalize(&emeter); |  | ||||||
| 	return EXIT_SUCCESS; |  | ||||||
| } |  | ||||||
							
								
								
									
										12
									
								
								wlauto/external/revent/Makefile
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								wlauto/external/revent/Makefile
									
									
									
									
										vendored
									
									
								
							| @@ -1,12 +0,0 @@ | |||||||
| # CROSS_COMPILE=aarch64-linux-gnu- make |  | ||||||
| # |  | ||||||
| CC=gcc |  | ||||||
| CFLAGS=-static -lc |  | ||||||
|  |  | ||||||
| revent: revent.c |  | ||||||
| 	$(CROSS_COMPILE)$(CC) $(CFLAGS) revent.c -o revent |  | ||||||
|  |  | ||||||
| clean: |  | ||||||
| 	rm -rf revent |  | ||||||
|  |  | ||||||
| .PHONY: clean |  | ||||||
							
								
								
									
										636
									
								
								wlauto/external/revent/revent.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										636
									
								
								wlauto/external/revent/revent.c
									
									
									
									
										vendored
									
									
								
							| @@ -1,636 +0,0 @@ | |||||||
| /*    Copyright 2012-2015 ARM Limited |  | ||||||
|  * |  | ||||||
|  * Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
|  * you may not use this file except in compliance with the License. |  | ||||||
|  * You may obtain a copy of the License at |  | ||||||
|  * |  | ||||||
|  *     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
|  * |  | ||||||
|  * Unless required by applicable law or agreed to in writing, software |  | ||||||
|  * distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
|  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
|  * See the License for the specific language governing permissions and |  | ||||||
|  * limitations under the License. |  | ||||||
| */ |  | ||||||
|  |  | ||||||
| #include <stdio.h> |  | ||||||
| #include <stdint.h> |  | ||||||
| #include <stdlib.h> |  | ||||||
| #include <string.h> |  | ||||||
| #include <unistd.h> |  | ||||||
| #include <fcntl.h> |  | ||||||
| #include <errno.h> |  | ||||||
| #include <limits.h> |  | ||||||
| #include <linux/input.h> |  | ||||||
| #include <sys/stat.h> |  | ||||||
| #include <signal.h> |  | ||||||
| #include <ctype.h> |  | ||||||
|  |  | ||||||
| #ifdef ANDROID |  | ||||||
| #include <android/log.h> |  | ||||||
| #endif |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #define die(args...) do { \ |  | ||||||
|     fprintf(stderr, "ERROR: "); \ |  | ||||||
|     fprintf(stderr, args);   \ |  | ||||||
|     exit(EXIT_FAILURE); \ |  | ||||||
| } while(0) |  | ||||||
|  |  | ||||||
| #define dprintf(args...) if (verbose) printf(args) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #define INPDEV_MAX_DEVICES  16 |  | ||||||
| #define INPDEV_MAX_PATH     30 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #ifndef ANDROID |  | ||||||
| int strlcpy(char *dest, char *source,  size_t size) |  | ||||||
| { |  | ||||||
|         strncpy(dest, source, size-1); |  | ||||||
|         dest[size-1] = '\0'; |  | ||||||
|         return size; |  | ||||||
| } |  | ||||||
| #endif |  | ||||||
|  |  | ||||||
| typedef enum { |  | ||||||
|     FALSE=0, |  | ||||||
|     TRUE |  | ||||||
| } bool_t; |  | ||||||
|  |  | ||||||
| typedef enum  { |  | ||||||
|     RECORD=0, |  | ||||||
|     REPLAY, |  | ||||||
|     DUMP, |  | ||||||
|     INFO, |  | ||||||
|     INVALID |  | ||||||
| } revent_mode_t; |  | ||||||
|  |  | ||||||
| typedef struct { |  | ||||||
|     revent_mode_t mode; |  | ||||||
|     int32_t record_time; |  | ||||||
|     int32_t device_number; |  | ||||||
|     char *file; |  | ||||||
| } revent_args_t; |  | ||||||
|  |  | ||||||
| typedef struct { |  | ||||||
|     int32_t id_pathc;                                        /* Count of total paths so far. */ |  | ||||||
|     char   id_pathv[INPDEV_MAX_DEVICES][INPDEV_MAX_PATH];   /* List of paths matching pattern. */ |  | ||||||
| } inpdev_t; |  | ||||||
|  |  | ||||||
| typedef struct { |  | ||||||
|     int32_t dev_idx; |  | ||||||
|     int32_t _padding; |  | ||||||
|     struct input_event event; |  | ||||||
| } replay_event_t; |  | ||||||
|  |  | ||||||
| typedef struct { |  | ||||||
|     int32_t num_fds; |  | ||||||
|     int32_t num_events; |  | ||||||
|     int *fds; |  | ||||||
|     replay_event_t *events; |  | ||||||
| } replay_buffer_t; |  | ||||||
|  |  | ||||||
|  |  | ||||||
| bool_t verbose = FALSE; |  | ||||||
| bool_t wait_for_stdin = TRUE; |  | ||||||
|  |  | ||||||
| bool_t is_numeric(char *string) |  | ||||||
| { |  | ||||||
|     int len = strlen(string); |  | ||||||
|  |  | ||||||
|     int i = 0; |  | ||||||
|     while(i < len) |  | ||||||
|     { |  | ||||||
|        if(!isdigit(string[i])) |  | ||||||
|            return FALSE; |  | ||||||
|        i++; |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     return TRUE; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| off_t get_file_size(const char *filename) { |  | ||||||
|     struct stat st; |  | ||||||
|  |  | ||||||
|     if (stat(filename, &st) == 0) |  | ||||||
|         return st.st_size; |  | ||||||
|  |  | ||||||
|     die("Cannot determine size of %s: %s\n", filename, strerror(errno)); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| int inpdev_init(inpdev_t **inpdev, int devid) |  | ||||||
| { |  | ||||||
|     int32_t i; |  | ||||||
|     int fd; |  | ||||||
|     int32_t num_devices; |  | ||||||
|  |  | ||||||
|     *inpdev = malloc(sizeof(inpdev_t)); |  | ||||||
|     (*inpdev)->id_pathc = 0; |  | ||||||
|  |  | ||||||
|     if (devid == -1) { |  | ||||||
|         // device id was not specified so we want to record from all available input devices. |  | ||||||
|         for(i = 0; i < INPDEV_MAX_DEVICES; ++i) |  | ||||||
|         { |  | ||||||
|             sprintf((*inpdev)->id_pathv[(*inpdev)->id_pathc], "/dev/input/event%d", i); |  | ||||||
|             fd = open((*inpdev)->id_pathv[(*inpdev)->id_pathc], O_RDONLY); |  | ||||||
|             if(fd > 0) |  | ||||||
|             { |  | ||||||
|                 close(fd); |  | ||||||
|                 dprintf("opened %s\n", (*inpdev)->id_pathv[(*inpdev)->id_pathc]); |  | ||||||
|                 (*inpdev)->id_pathc++; |  | ||||||
|             } |  | ||||||
|             else |  | ||||||
|             { |  | ||||||
|                 dprintf("could not open %s\n", (*inpdev)->id_pathv[(*inpdev)->id_pathc]); |  | ||||||
|             } |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
|     else { |  | ||||||
|         // device id was specified so record just that device. |  | ||||||
|         sprintf((*inpdev)->id_pathv[0], "/dev/input/event%d", devid); |  | ||||||
|         fd = open((*inpdev)->id_pathv[0], O_RDONLY); |  | ||||||
|         if(fd > 0) |  | ||||||
|         { |  | ||||||
|             close(fd); |  | ||||||
|             dprintf("opened %s\n", (*inpdev)->id_pathv[0]); |  | ||||||
|             (*inpdev)->id_pathc++; |  | ||||||
|         } |  | ||||||
|         else |  | ||||||
|         { |  | ||||||
|             die("could not open %s\n", (*inpdev)->id_pathv[0]); |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     return 0; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| int inpdev_close(inpdev_t *inpdev) |  | ||||||
| { |  | ||||||
|     free(inpdev); |  | ||||||
|     return 0; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| void printDevProperties(const char* aDev) |  | ||||||
| { |  | ||||||
|     int fd = -1; |  | ||||||
|     char name[256]= "Unknown"; |  | ||||||
|     if ((fd = open(aDev, O_RDONLY)) < 0) |  | ||||||
|         die("could not open %s\n", aDev); |  | ||||||
|  |  | ||||||
|     if(ioctl(fd, EVIOCGNAME(sizeof(name)), name) < 0) |  | ||||||
|         die("evdev ioctl failed on %s\n", aDev); |  | ||||||
|  |  | ||||||
|     printf("The device on %s says its name is %s\n", |  | ||||||
|             aDev, name); |  | ||||||
|     close(fd); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| void dump(const char *logfile) |  | ||||||
| { |  | ||||||
|     int fdin = open(logfile, O_RDONLY); |  | ||||||
|     if (fdin < 0) die("Could not open eventlog %s\n", logfile); |  | ||||||
|  |  | ||||||
|     int nfds; |  | ||||||
|     size_t rb = read(fdin, &nfds, sizeof(nfds)); |  | ||||||
|     if (rb != sizeof(nfds)) die("problems reading eventlog\n"); |  | ||||||
|     int *fds = malloc(sizeof(int)*nfds); |  | ||||||
|     if (!fds) die("out of memory\n"); |  | ||||||
|  |  | ||||||
|     int32_t len; |  | ||||||
|     int32_t i; |  | ||||||
|     char buf[INPDEV_MAX_PATH]; |  | ||||||
|  |  | ||||||
|     inpdev_t *inpdev = malloc(sizeof(inpdev_t)); |  | ||||||
|     inpdev->id_pathc = 0; |  | ||||||
|     for (i=0; i<nfds; i++) { |  | ||||||
|         memset(buf, 0, sizeof(buf)); |  | ||||||
|         rb = read(fdin, &len, sizeof(len)); |  | ||||||
|         if (rb != sizeof(len)) die("problems reading eventlog\n"); |  | ||||||
|         rb = read(fdin, &buf[0], len); |  | ||||||
|         if (rb != len) die("problems reading eventlog\n"); |  | ||||||
|         strlcpy(inpdev->id_pathv[inpdev->id_pathc], buf, INPDEV_MAX_PATH); |  | ||||||
|         inpdev->id_pathv[inpdev->id_pathc][INPDEV_MAX_PATH-1] = '\0'; |  | ||||||
|         inpdev->id_pathc++; |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     struct input_event ev; |  | ||||||
|     int count = 0; |  | ||||||
|     while(1) { |  | ||||||
|         int32_t idx; |  | ||||||
|         rb = read(fdin, &idx, sizeof(idx)); |  | ||||||
|         if (rb != sizeof(idx)) break; |  | ||||||
|         rb = read(fdin, &ev, sizeof(ev)); |  | ||||||
|         if (rb < (int)sizeof(ev)) break; |  | ||||||
|  |  | ||||||
|         printf("%10u.%-6u %30s type %2d code %3d value %4d\n", |  | ||||||
|                 (unsigned int)ev.time.tv_sec, (unsigned int)ev.time.tv_usec, |  | ||||||
|                 inpdev->id_pathv[idx], ev.type, ev.code, ev.value); |  | ||||||
|         count++; |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     printf("\nTotal: %d events\n", count); |  | ||||||
|     close(fdin); |  | ||||||
|     free(inpdev); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| int replay_buffer_init(replay_buffer_t **buffer, const char *logfile) |  | ||||||
| { |  | ||||||
|     *buffer = malloc(sizeof(replay_buffer_t)); |  | ||||||
|     replay_buffer_t *buff = *buffer; |  | ||||||
|     off_t fsize  = get_file_size(logfile); |  | ||||||
|     buff->events =  (replay_event_t *)malloc((size_t)fsize); |  | ||||||
|     if (!buff->events) |  | ||||||
|         die("out of memory\n"); |  | ||||||
|  |  | ||||||
|     int fdin = open(logfile, O_RDONLY); |  | ||||||
|     if (fdin < 0) |  | ||||||
|         die("Could not open eventlog %s\n", logfile); |  | ||||||
|  |  | ||||||
|     size_t rb = read(fdin, &(buff->num_fds), sizeof(buff->num_fds)); |  | ||||||
|     if (rb!=sizeof(buff->num_fds)) |  | ||||||
|         die("problems reading eventlog\n"); |  | ||||||
|  |  | ||||||
|     buff->fds = malloc(sizeof(int) * buff->num_fds); |  | ||||||
|     if (!buff->fds) |  | ||||||
|         die("out of memory\n"); |  | ||||||
|  |  | ||||||
|     int32_t len, i; |  | ||||||
|     char path_buff[256]; // should be more than enough |  | ||||||
|     for (i = 0; i < buff->num_fds; i++) { |  | ||||||
|         memset(path_buff, 0, sizeof(path_buff)); |  | ||||||
|         rb = read(fdin, &len, sizeof(len)); |  | ||||||
|         if (rb!=sizeof(len)) |  | ||||||
|             die("problems reading eventlog\n"); |  | ||||||
|         rb = read(fdin, &path_buff[0], len); |  | ||||||
|         if (rb != len) |  | ||||||
|             die("problems reading eventlog\n"); |  | ||||||
|  |  | ||||||
|         buff->fds[i] = open(path_buff, O_WRONLY | O_NDELAY); |  | ||||||
|         if (buff->fds[i] < 0) |  | ||||||
|             die("could not open device file %s\n", path_buff); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     struct timeval start_time; |  | ||||||
|     replay_event_t rep_ev; |  | ||||||
|     i = 0; |  | ||||||
|     while(1) { |  | ||||||
|         rb = read(fdin, &rep_ev, sizeof(rep_ev)); |  | ||||||
|         if (rb < (int)sizeof(rep_ev)) |  | ||||||
|             break; |  | ||||||
|  |  | ||||||
|         if (i == 0) { |  | ||||||
|             start_time = rep_ev.event.time; |  | ||||||
|         } |  | ||||||
|         timersub(&(rep_ev.event.time), &start_time, &(rep_ev.event.time)); |  | ||||||
|         memcpy(&(buff->events[i]), &rep_ev, sizeof(rep_ev)); |  | ||||||
|         i++; |  | ||||||
|     } |  | ||||||
|     buff->num_events = i - 1; |  | ||||||
|     close(fdin); |  | ||||||
|     return 0; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| int replay_buffer_close(replay_buffer_t *buff) |  | ||||||
| { |  | ||||||
|     free(buff->fds); |  | ||||||
|     free(buff->events); |  | ||||||
|     free(buff); |  | ||||||
|     return 0; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| int replay_buffer_play(replay_buffer_t *buff) |  | ||||||
| { |  | ||||||
|     int32_t i = 0, rb; |  | ||||||
|     struct timeval start_time, now, desired_time, last_event_delta, delta; |  | ||||||
|     memset(&last_event_delta, 0, sizeof(struct timeval)); |  | ||||||
|     gettimeofday(&start_time, NULL); |  | ||||||
|  |  | ||||||
|     while (i < buff->num_events) { |  | ||||||
|         gettimeofday(&now, NULL); |  | ||||||
|         timeradd(&start_time, &last_event_delta, &desired_time); |  | ||||||
|  |  | ||||||
|         if (timercmp(&desired_time, &now, >)) { |  | ||||||
|             timersub(&desired_time, &now, &delta); |  | ||||||
|             useconds_t d = (useconds_t)delta.tv_sec * 1000000 + delta.tv_usec; |  | ||||||
|             dprintf("now %u.%u desiredtime %u.%u sleeping %u uS\n", |  | ||||||
|                     (unsigned int)now.tv_sec, (unsigned int)now.tv_usec, |  | ||||||
|                     (unsigned int)desired_time.tv_sec, (unsigned int)desired_time.tv_usec, d); |  | ||||||
|             usleep(d); |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|         int32_t idx = (buff->events[i]).dev_idx; |  | ||||||
|         struct input_event ev = (buff->events[i]).event; |  | ||||||
|         while((i < buff->num_events) && !timercmp(&ev.time, &last_event_delta, !=)) { |  | ||||||
|             rb = write(buff->fds[idx], &ev, sizeof(ev)); |  | ||||||
|             if (rb!=sizeof(ev)) |  | ||||||
|                 die("problems writing\n"); |  | ||||||
|             dprintf("replayed event: type %d code %d value %d\n", ev.type, ev.code, ev.value); |  | ||||||
|  |  | ||||||
|             i++; |  | ||||||
|             idx = (buff->events[i]).dev_idx; |  | ||||||
|             ev = (buff->events[i]).event; |  | ||||||
|         } |  | ||||||
|         last_event_delta = ev.time; |  | ||||||
|     } |  | ||||||
| } |  | ||||||
|  |  | ||||||
| void replay(const char *logfile) |  | ||||||
| { |  | ||||||
|     replay_buffer_t *replay_buffer; |  | ||||||
|     replay_buffer_init(&replay_buffer, logfile); |  | ||||||
| #ifdef ANDROID |  | ||||||
|     __android_log_write(ANDROID_LOG_INFO, "REVENT", "Replay starting"); |  | ||||||
| #endif |  | ||||||
|     replay_buffer_play(replay_buffer); |  | ||||||
| #ifdef ANDROID |  | ||||||
|     __android_log_write(ANDROID_LOG_INFO, "REVENT", "Replay complete"); |  | ||||||
| #endif |  | ||||||
|     replay_buffer_close(replay_buffer); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| void usage() |  | ||||||
| { |  | ||||||
|     printf("usage:\n    revent [-h] [-v] COMMAND [OPTIONS] \n" |  | ||||||
|            "\n" |  | ||||||
|            "    Options:\n" |  | ||||||
|            "        -h  print this help message and quit.\n" |  | ||||||
|            "        -v  enable verbose output.\n" |  | ||||||
|            "\n" |  | ||||||
|            "    Commands:\n" |  | ||||||
|            "        record [-t SECONDS] [-d DEVICE] FILE\n" |  | ||||||
|            "            Record input event. stops after return on STDIN (or, optionally, \n" |  | ||||||
|            "            a fixed delay)\n" |  | ||||||
|            "\n" |  | ||||||
|            "                FILE       file into which events will be recorded.\n" |  | ||||||
|            "                -t SECONDS time, in seconds, for which to record events.\n" |  | ||||||
|            "                           if not specifed, recording will continue until\n" |  | ||||||
|            "                           return key is pressed.\n" |  | ||||||
|            "                -d DEVICE  the number of the input device form which\n" |  | ||||||
|            "                           events will be recoreded. If not specified, \n" |  | ||||||
|            "                           all available inputs will be used.\n" |  | ||||||
|            "\n" |  | ||||||
|            "        replay FILE\n" |  | ||||||
|            "            replays previously recorded events from the specified file.\n" |  | ||||||
|            "\n" |  | ||||||
|            "                FILE       file into which events will be recorded.\n" |  | ||||||
|            "\n" |  | ||||||
|            "        dump FILE\n" |  | ||||||
|            "            dumps the contents of the specified event log to STDOUT in\n" |  | ||||||
|            "            human-readable form.\n" |  | ||||||
|            "\n" |  | ||||||
|            "                FILE       event log which will be dumped.\n" |  | ||||||
|            "\n" |  | ||||||
|            "        info\n" |  | ||||||
|            "             shows info about each event char device\n" |  | ||||||
|            "\n" |  | ||||||
|        ); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| void revent_args_init(revent_args_t **rargs, int argc, char** argv) |  | ||||||
| { |  | ||||||
|     *rargs = malloc(sizeof(revent_args_t)); |  | ||||||
|     revent_args_t *revent_args = *rargs; |  | ||||||
|     revent_args->mode = INVALID; |  | ||||||
|     revent_args->record_time = INT_MAX; |  | ||||||
|     revent_args->device_number = -1; |  | ||||||
|     revent_args->file = NULL; |  | ||||||
|  |  | ||||||
|     int opt; |  | ||||||
|     while ((opt = getopt(argc, argv, "ht:d:vs")) != -1) |  | ||||||
|     { |  | ||||||
|         switch (opt) { |  | ||||||
|             case 'h': |  | ||||||
|                 usage(); |  | ||||||
|                 exit(0); |  | ||||||
|                 break; |  | ||||||
|             case 't': |  | ||||||
|                 if (is_numeric(optarg)) { |  | ||||||
|                     revent_args->record_time = atoi(optarg); |  | ||||||
|                     dprintf("timeout: %d\n", revent_args->record_time); |  | ||||||
|                 } else { |  | ||||||
|                     die("-t parameter must be numeric; got %s.\n", optarg); |  | ||||||
|                 } |  | ||||||
|                 break; |  | ||||||
|             case 'd': |  | ||||||
|                 if (is_numeric(optarg)) { |  | ||||||
|                     revent_args->device_number = atoi(optarg); |  | ||||||
|                     dprintf("device: %d\n", revent_args->device_number); |  | ||||||
|                 } else { |  | ||||||
|                     die("-d parameter must be numeric; got %s.\n", optarg); |  | ||||||
|                 } |  | ||||||
|                 break; |  | ||||||
|             case 'v': |  | ||||||
|                 verbose = TRUE; |  | ||||||
|                 break; |  | ||||||
|             case 's': |  | ||||||
|                 wait_for_stdin = FALSE; |  | ||||||
|                 break; |  | ||||||
|  |  | ||||||
|             default: |  | ||||||
|                 die("Unexpected option: %c", opt); |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     int next_arg = optind; |  | ||||||
|     if (next_arg == argc) { |  | ||||||
|         usage(); |  | ||||||
|         die("Must specify a command.\n"); |  | ||||||
|     } |  | ||||||
|     if (!strcmp(argv[next_arg], "record")) |  | ||||||
|         revent_args->mode = RECORD; |  | ||||||
|     else if (!strcmp(argv[next_arg], "replay")) |  | ||||||
|         revent_args->mode = REPLAY; |  | ||||||
|     else if (!strcmp(argv[next_arg], "dump")) |  | ||||||
|         revent_args->mode = DUMP; |  | ||||||
|     else if (!strcmp(argv[next_arg], "info")) |  | ||||||
|         revent_args->mode = INFO; |  | ||||||
|     else { |  | ||||||
|         usage(); |  | ||||||
|         die("Unknown command -- %s\n", argv[next_arg]); |  | ||||||
|     } |  | ||||||
|     next_arg++; |  | ||||||
|  |  | ||||||
|     if (next_arg != argc) { |  | ||||||
|         revent_args->file = argv[next_arg]; |  | ||||||
|         dprintf("file: %s\n", revent_args->file); |  | ||||||
|         next_arg++; |  | ||||||
|         if (next_arg != argc) { |  | ||||||
|             die("Trailling arguments (use -h for help).\n"); |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     if ((revent_args->mode != RECORD) && (revent_args->record_time != INT_MAX)) { |  | ||||||
|         die("-t parameter is only valid for \"record\" command.\n"); |  | ||||||
|     } |  | ||||||
|     if ((revent_args->mode != RECORD) && (revent_args->device_number != -1)) { |  | ||||||
|         die("-d parameter is only valid for \"record\" command.\n"); |  | ||||||
|     } |  | ||||||
|     if ((revent_args->mode == INFO) && (revent_args->file != NULL)) { |  | ||||||
|         die("File path cannot be specified for \"info\" command.\n"); |  | ||||||
|     } |  | ||||||
|     if (((revent_args->mode == RECORD) || (revent_args->mode == REPLAY)) && (revent_args->file == NULL)) { |  | ||||||
|         die("Must specify a file for recording/replaying (use -h for help).\n"); |  | ||||||
|     } |  | ||||||
| } |  | ||||||
|  |  | ||||||
| int revent_args_close(revent_args_t *rargs) |  | ||||||
| { |  | ||||||
|         free(rargs); |  | ||||||
|         return 0; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| int* fds = NULL; |  | ||||||
| FILE* fdout = NULL; |  | ||||||
| revent_args_t *rargs = NULL; |  | ||||||
| inpdev_t *inpdev = NULL; |  | ||||||
| int count; |  | ||||||
|  |  | ||||||
| void term_handler(int signum) |  | ||||||
| { |  | ||||||
|     int32_t i; |  | ||||||
|     for (i=0; i < inpdev->id_pathc; i++) |  | ||||||
|     { |  | ||||||
|         close(fds[i]); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     fclose(fdout); |  | ||||||
|     free(fds); |  | ||||||
|     dprintf("Recorded %d events\n", count); |  | ||||||
|  |  | ||||||
|     inpdev_close(inpdev); |  | ||||||
|     revent_args_close(rargs); |  | ||||||
|     exit(0); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| void record(inpdev_t *inpdev, int delay, const char *logfile) |  | ||||||
| { |  | ||||||
|     fd_set readfds; |  | ||||||
|     struct input_event ev; |  | ||||||
|     int32_t i; |  | ||||||
|     int32_t _padding = 0xdeadbeef; |  | ||||||
|     int32_t maxfd = 0; |  | ||||||
|     int32_t keydev=0; |  | ||||||
|  |  | ||||||
|     //signal handler |  | ||||||
|     struct sigaction action; |  | ||||||
|     memset(&action, 0, sizeof(struct sigaction)); |  | ||||||
|     action.sa_handler = term_handler; |  | ||||||
|     sigaction(SIGTERM, &action, NULL); |  | ||||||
|  |  | ||||||
|     fds = malloc(sizeof(int)*inpdev->id_pathc); |  | ||||||
|     if (!fds) die("out of memory\n"); |  | ||||||
|  |  | ||||||
|     fdout = fopen(logfile, "wb"); |  | ||||||
|     if (!fdout) die("Could not open eventlog %s\n", logfile); |  | ||||||
|  |  | ||||||
|     fwrite(&inpdev->id_pathc, sizeof(inpdev->id_pathc), 1, fdout); |  | ||||||
|     for (i=0; i<inpdev->id_pathc; i++) { |  | ||||||
|         int32_t len = strlen(inpdev->id_pathv[i]); |  | ||||||
|         fwrite(&len, sizeof(len), 1, fdout); |  | ||||||
|         fwrite(inpdev->id_pathv[i], len, 1, fdout); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     for (i=0; i < inpdev->id_pathc; i++) |  | ||||||
|     { |  | ||||||
|         fds[i] = open(inpdev->id_pathv[i], O_RDONLY); |  | ||||||
|         if (fds[i]>maxfd) maxfd = fds[i]; |  | ||||||
|         dprintf("opened %s with %d\n", inpdev->id_pathv[i], fds[i]); |  | ||||||
|         if (fds[i]<0) die("could not open \%s\n", inpdev->id_pathv[i]); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     count = 0; |  | ||||||
|     struct timeval tout; |  | ||||||
|     while(1) |  | ||||||
|     { |  | ||||||
|         FD_ZERO(&readfds); |  | ||||||
|         if (wait_for_stdin) |  | ||||||
|         { |  | ||||||
|             FD_SET(STDIN_FILENO, &readfds); |  | ||||||
|         } |  | ||||||
|         for (i=0; i < inpdev->id_pathc; i++) |  | ||||||
|             FD_SET(fds[i], &readfds); |  | ||||||
|         /* wait for input */ |  | ||||||
|         tout.tv_sec = delay; |  | ||||||
|         tout.tv_usec = 0; |  | ||||||
|         int32_t r = select(maxfd+1, &readfds, NULL, NULL, &tout); |  | ||||||
|         /* dprintf("got %d (err %d)\n", r, errno); */ |  | ||||||
|         if (!r) break; |  | ||||||
|         if (wait_for_stdin && FD_ISSET(STDIN_FILENO, &readfds)) { |  | ||||||
|             // in this case the key down for the return key will be recorded |  | ||||||
|             // so we need to up the key up |  | ||||||
|             memset(&ev, 0, sizeof(ev)); |  | ||||||
|             ev.type = EV_KEY; |  | ||||||
|             ev.code = KEY_ENTER; |  | ||||||
|             ev.value = 0; |  | ||||||
|             gettimeofday(&ev.time, NULL); |  | ||||||
|             fwrite(&keydev, sizeof(keydev), 1, fdout); |  | ||||||
|             fwrite(&_padding, sizeof(_padding), 1, fdout); |  | ||||||
|             fwrite(&ev, sizeof(ev), 1, fdout); |  | ||||||
|             memset(&ev, 0, sizeof(ev)); // SYN |  | ||||||
|             gettimeofday(&ev.time, NULL); |  | ||||||
|             fwrite(&keydev, sizeof(keydev), 1, fdout); |  | ||||||
|             fwrite(&_padding, sizeof(_padding), 1, fdout); |  | ||||||
|             fwrite(&ev, sizeof(ev), 1, fdout); |  | ||||||
|             dprintf("added fake return exiting...\n"); |  | ||||||
|             break; |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|         for (i=0; i < inpdev->id_pathc; i++) |  | ||||||
|         { |  | ||||||
|             if (FD_ISSET(fds[i], &readfds)) |  | ||||||
|             { |  | ||||||
|                 dprintf("Got event from %s\n", inpdev->id_pathv[i]); |  | ||||||
|                 memset(&ev, 0, sizeof(ev)); |  | ||||||
|                 size_t rb = read(fds[i], (void*) &ev, sizeof(ev)); |  | ||||||
|                 dprintf("%d event: type %d code %d value %d\n", |  | ||||||
|                         (unsigned int)rb, ev.type, ev.code, ev.value); |  | ||||||
|                 if (ev.type == EV_KEY && ev.code == KEY_ENTER && ev.value == 1) |  | ||||||
|                     keydev = i; |  | ||||||
|                 fwrite(&i, sizeof(i), 1, fdout); |  | ||||||
|                 fwrite(&_padding, sizeof(_padding), 1, fdout); |  | ||||||
|                 fwrite(&ev, sizeof(ev), 1, fdout); |  | ||||||
|                 count++; |  | ||||||
|             } |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     for (i=0; i < inpdev->id_pathc; i++) |  | ||||||
|     { |  | ||||||
|         close(fds[i]); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     fclose(fdout); |  | ||||||
|     free(fds); |  | ||||||
|     dprintf("Recorded %d events\n", count); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| int main(int argc, char** argv) |  | ||||||
| { |  | ||||||
|     int i; |  | ||||||
|     char *logfile = NULL; |  | ||||||
|  |  | ||||||
|     revent_args_init(&rargs, argc, argv); |  | ||||||
|  |  | ||||||
|     inpdev_init(&inpdev, rargs->device_number); |  | ||||||
|  |  | ||||||
|     switch(rargs->mode) { |  | ||||||
|         case RECORD: |  | ||||||
|             record(inpdev, rargs->record_time, rargs->file); |  | ||||||
|             break; |  | ||||||
|         case REPLAY: |  | ||||||
|             replay(rargs->file); |  | ||||||
|             break; |  | ||||||
|         case DUMP: |  | ||||||
|             dump(rargs->file); |  | ||||||
|             break; |  | ||||||
|         case INFO: |  | ||||||
|             for (i = 0; i < inpdev->id_pathc; i++) { |  | ||||||
|                 printDevProperties(inpdev->id_pathv[i]); |  | ||||||
|             } |  | ||||||
|     }; |  | ||||||
|  |  | ||||||
|     inpdev_close(inpdev); |  | ||||||
|     revent_args_close(rargs); |  | ||||||
|     return 0; |  | ||||||
| } |  | ||||||
							
								
								
									
										21
									
								
								wlauto/external/uiauto/build.sh
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										21
									
								
								wlauto/external/uiauto/build.sh
									
									
									
									
										vendored
									
									
								
							| @@ -1,21 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| ant build |  | ||||||
|  |  | ||||||
| cp bin/classes/com/arm/wlauto/uiauto/BaseUiAutomation.class ../../common |  | ||||||
							
								
								
									
										92
									
								
								wlauto/external/uiauto/build.xml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										92
									
								
								wlauto/external/uiauto/build.xml
									
									
									
									
										vendored
									
									
								
							| @@ -1,92 +0,0 @@ | |||||||
| <?xml version="1.0" encoding="UTF-8"?> |  | ||||||
| <project name="com.arm.wlauto.uiauto" default="help"> |  | ||||||
|  |  | ||||||
|     <!-- The local.properties file is created and updated by the 'android' tool. |  | ||||||
|          It contains the path to the SDK. It should *NOT* be checked into |  | ||||||
|          Version Control Systems. --> |  | ||||||
|     <property file="local.properties" /> |  | ||||||
|  |  | ||||||
|     <!-- The ant.properties file can be created by you. It is only edited by the |  | ||||||
|          'android' tool to add properties to it. |  | ||||||
|          This is the place to change some Ant specific build properties. |  | ||||||
|          Here are some properties you may want to change/update: |  | ||||||
|  |  | ||||||
|          source.dir |  | ||||||
|              The name of the source directory. Default is 'src'. |  | ||||||
|          out.dir |  | ||||||
|              The name of the output directory. Default is 'bin'. |  | ||||||
|  |  | ||||||
|          For other overridable properties, look at the beginning of the rules |  | ||||||
|          files in the SDK, at tools/ant/build.xml |  | ||||||
|  |  | ||||||
|          Properties related to the SDK location or the project target should |  | ||||||
|          be updated using the 'android' tool with the 'update' action. |  | ||||||
|  |  | ||||||
|          This file is an integral part of the build system for your |  | ||||||
|          application and should be checked into Version Control Systems. |  | ||||||
|  |  | ||||||
|          --> |  | ||||||
|     <property file="ant.properties" /> |  | ||||||
|  |  | ||||||
|     <!-- if sdk.dir was not set from one of the property file, then |  | ||||||
|          get it from the ANDROID_HOME env var. |  | ||||||
|          This must be done before we load project.properties since |  | ||||||
|          the proguard config can use sdk.dir --> |  | ||||||
|     <property environment="env" /> |  | ||||||
|     <condition property="sdk.dir" value="${env.ANDROID_HOME}"> |  | ||||||
|         <isset property="env.ANDROID_HOME" /> |  | ||||||
|     </condition> |  | ||||||
|  |  | ||||||
|     <!-- The project.properties file is created and updated by the 'android' |  | ||||||
|          tool, as well as ADT. |  | ||||||
|  |  | ||||||
|          This contains project specific properties such as project target, and library |  | ||||||
|          dependencies. Lower level build properties are stored in ant.properties |  | ||||||
|          (or in .classpath for Eclipse projects). |  | ||||||
|  |  | ||||||
|          This file is an integral part of the build system for your |  | ||||||
|          application and should be checked into Version Control Systems. --> |  | ||||||
|     <loadproperties srcFile="project.properties" /> |  | ||||||
|  |  | ||||||
|     <!-- quick check on sdk.dir --> |  | ||||||
|     <fail |  | ||||||
|             message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable." |  | ||||||
|             unless="sdk.dir" |  | ||||||
|     /> |  | ||||||
|  |  | ||||||
|     <!-- |  | ||||||
|         Import per project custom build rules if present at the root of the project. |  | ||||||
|         This is the place to put custom intermediary targets such as: |  | ||||||
|             -pre-build |  | ||||||
|             -pre-compile |  | ||||||
|             -post-compile (This is typically used for code obfuscation. |  | ||||||
|                            Compiled code location: ${out.classes.absolute.dir} |  | ||||||
|                            If this is not done in place, override ${out.dex.input.absolute.dir}) |  | ||||||
|             -post-package |  | ||||||
|             -post-build |  | ||||||
|             -pre-clean |  | ||||||
|     --> |  | ||||||
|     <import file="custom_rules.xml" optional="true" /> |  | ||||||
|  |  | ||||||
|     <!-- Import the actual build file. |  | ||||||
|  |  | ||||||
|          To customize existing targets, there are two options: |  | ||||||
|          - Customize only one target: |  | ||||||
|              - copy/paste the target into this file, *before* the |  | ||||||
|                <import> task. |  | ||||||
|              - customize it to your needs. |  | ||||||
|          - Customize the whole content of build.xml |  | ||||||
|              - copy/paste the content of the rules files (minus the top node) |  | ||||||
|                into this file, replacing the <import> task. |  | ||||||
|              - customize to your needs. |  | ||||||
|  |  | ||||||
|          *********************** |  | ||||||
|          ****** IMPORTANT ****** |  | ||||||
|          *********************** |  | ||||||
|          In all cases you must update the value of version-tag below to read 'custom' instead of an integer, |  | ||||||
|          in order to avoid having your file be overridden by tools such as "android update project" |  | ||||||
|     --> |  | ||||||
|     <!-- version-tag: VERSION_TAG --> |  | ||||||
|     <import file="${sdk.dir}/tools/ant/uibuild.xml" /> |  | ||||||
|  |  | ||||||
| </project> |  | ||||||
							
								
								
									
										14
									
								
								wlauto/external/uiauto/project.properties
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								wlauto/external/uiauto/project.properties
									
									
									
									
										vendored
									
									
								
							| @@ -1,14 +0,0 @@ | |||||||
| # This file is automatically generated by Android Tools. |  | ||||||
| # Do not modify this file -- YOUR CHANGES WILL BE ERASED! |  | ||||||
| # |  | ||||||
| # This file must be checked in Version Control Systems. |  | ||||||
| # |  | ||||||
| # To customize properties used by the Ant build system edit |  | ||||||
| # "ant.properties", and override values to adapt the script to your |  | ||||||
| # project structure. |  | ||||||
| # |  | ||||||
| # To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home): |  | ||||||
| #proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt |  | ||||||
|  |  | ||||||
| # Project target. |  | ||||||
| target=android-17 |  | ||||||
| @@ -1,113 +0,0 @@ | |||||||
| /*    Copyright 2013-2015 ARM Limited |  | ||||||
|  * |  | ||||||
|  * Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
|  * you may not use this file except in compliance with the License. |  | ||||||
|  * You may obtain a copy of the License at |  | ||||||
|  * |  | ||||||
|  *     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
|  * |  | ||||||
|  * Unless required by applicable law or agreed to in writing, software |  | ||||||
|  * distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
|  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
|  * See the License for the specific language governing permissions and |  | ||||||
|  * limitations under the License. |  | ||||||
| */ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| package com.arm.wlauto.uiauto; |  | ||||||
|  |  | ||||||
| import java.io.File; |  | ||||||
| import java.io.BufferedReader; |  | ||||||
| import java.io.InputStreamReader; |  | ||||||
| import java.util.concurrent.TimeoutException; |  | ||||||
|  |  | ||||||
| import android.app.Activity; |  | ||||||
| import android.os.Bundle; |  | ||||||
|  |  | ||||||
| // Import the uiautomator libraries |  | ||||||
| import com.android.uiautomator.core.UiObject; |  | ||||||
| import com.android.uiautomator.core.UiObjectNotFoundException; |  | ||||||
| import com.android.uiautomator.core.UiScrollable; |  | ||||||
| import com.android.uiautomator.core.UiSelector; |  | ||||||
| import com.android.uiautomator.testrunner.UiAutomatorTestCase; |  | ||||||
|  |  | ||||||
| public class BaseUiAutomation extends UiAutomatorTestCase {    |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     public void sleep(int second) { |  | ||||||
|         super.sleep(second * 1000); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     public boolean takeScreenshot(String name) { |  | ||||||
|         Bundle params = getParams(); |  | ||||||
| 	String png_dir = params.getString("workdir"); |  | ||||||
|  |  | ||||||
|         try { |  | ||||||
|             return getUiDevice().takeScreenshot(new File(png_dir, name + ".png")); |  | ||||||
|         } catch(NoSuchMethodError e) { |  | ||||||
|             return true; |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     public void waitText(String text) throws UiObjectNotFoundException { |  | ||||||
|         waitText(text, 600); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     public void waitText(String text, int second) throws UiObjectNotFoundException { |  | ||||||
|         UiSelector selector = new UiSelector(); |  | ||||||
|         UiObject text_obj = new UiObject(selector.text(text) |  | ||||||
|                                        .className("android.widget.TextView")); |  | ||||||
|         waitObject(text_obj, second); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     public void waitObject(UiObject obj) throws UiObjectNotFoundException { |  | ||||||
|         waitObject(obj, 600); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     public void waitObject(UiObject obj, int second) throws UiObjectNotFoundException { |  | ||||||
|         if (! obj.waitForExists(second * 1000)){ |  | ||||||
|             throw new UiObjectNotFoundException("UiObject is not found: " |  | ||||||
|                     + obj.getSelector().toString()); |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     public boolean waitUntilNoObject(UiObject obj, int second) { |  | ||||||
|         return obj.waitUntilGone(second * 1000); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     public void clearLogcat() throws Exception { |  | ||||||
|         Runtime.getRuntime().exec("logcat -c"); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     public void waitForLogcatText(String searchText, long timeout) throws Exception { |  | ||||||
|         long startTime = System.currentTimeMillis(); |  | ||||||
|         Process process = Runtime.getRuntime().exec("logcat"); |  | ||||||
|         BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); |  | ||||||
|         String line; |  | ||||||
|  |  | ||||||
|         long currentTime = System.currentTimeMillis(); |  | ||||||
|         boolean found = false; |  | ||||||
|         while ((currentTime - startTime) < timeout){  |  | ||||||
|             sleep(2);  // poll every two seconds |  | ||||||
|  |  | ||||||
|             while((line=reader.readLine())!=null) { |  | ||||||
|                 if (line.contains(searchText)) { |  | ||||||
|                     found = true; |  | ||||||
|                     break; |  | ||||||
|                 } |  | ||||||
|             } |  | ||||||
|  |  | ||||||
|             if (found) { |  | ||||||
|                 break; |  | ||||||
|             } |  | ||||||
|             currentTime = System.currentTimeMillis(); |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|         process.destroy(); |  | ||||||
|  |  | ||||||
|         if ((currentTime - startTime) >= timeout) { |  | ||||||
|             throw new TimeoutException("Timed out waiting for Logcat text \"%s\"".format(searchText)); |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| } |  | ||||||
|  |  | ||||||
| @@ -1,35 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
| from wlauto.core import instrumentation |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def instrument_is_installed(instrument): |  | ||||||
|     """Returns ``True`` if the specified instrument is installed, and ``False`` |  | ||||||
|     other wise. The insturment maybe specified either as a name or a subclass (or |  | ||||||
|     instance of subclass) of :class:`wlauto.core.Instrument`.""" |  | ||||||
|     return instrumentation.is_installed(instrument) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def instrument_is_enabled(instrument): |  | ||||||
|     """Returns ``True`` if the specified instrument is installed and is currently |  | ||||||
|     enabled, and ``False`` other wise. The insturment maybe specified either |  | ||||||
|     as a name or a subclass (or instance of subclass) of |  | ||||||
|     :class:`wlauto.core.Instrument`.""" |  | ||||||
|     return instrumentation.is_enabled(instrument) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def clear_instrumentation(): |  | ||||||
|     instrumentation.installed = [] |  | ||||||
| @@ -1,278 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import re |  | ||||||
| import time |  | ||||||
| import shutil |  | ||||||
| import logging |  | ||||||
| import threading |  | ||||||
| import subprocess |  | ||||||
| import tempfile |  | ||||||
| import csv |  | ||||||
|  |  | ||||||
| from wlauto import Instrument, Parameter |  | ||||||
| from wlauto.core.execution import ExecutionContext |  | ||||||
| from wlauto.exceptions import InstrumentError, WorkerThreadError |  | ||||||
| from wlauto.core import signal |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CoreUtilization(Instrument): |  | ||||||
|  |  | ||||||
|     name = 'coreutil' |  | ||||||
|     description = """ |  | ||||||
|     Measures CPU core activity during workload execution in terms of the percentage of time a number |  | ||||||
|     of cores were utilized above the specfied threshold. |  | ||||||
|  |  | ||||||
|     This workload generates ``coreutil.csv`` report in the workload's output directory. The report is |  | ||||||
|     formatted as follows:: |  | ||||||
|  |  | ||||||
|         <threshold,1core,2core,3core,4core |  | ||||||
|         18.098132,38.650248000000005,10.736180000000001,3.6809760000000002,28.834312000000001 |  | ||||||
|  |  | ||||||
|     Interpretation of the result: |  | ||||||
|  |  | ||||||
|      - 38.65% of total time only single core is running above or equal to threshold value |  | ||||||
|      - 10.736% of total time two cores are running simultaneously above or equal to threshold value |  | ||||||
|      - 3.6809% of total time three cores are running simultaneously above or equal to threshold value |  | ||||||
|      - 28.8314% of total time four cores are running simultaneously above or equal to threshold value |  | ||||||
|      - 18.098% of time all core are running below threshold value. |  | ||||||
|  |  | ||||||
|     ..note : This instrument doesn't work on ARM big.LITTLE IKS implementation |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         Parameter('threshold', kind=int, default=50, |  | ||||||
|                   constraint=lambda x: 0 < x <= 100, |  | ||||||
|                   description='Cores with percentage utilization above this value will be considered ' |  | ||||||
|                               'as "utilized". This value may need to be adjusted based on the background ' |  | ||||||
|                               'activity and the intensity of the workload being instrumented (e.g. it may ' |  | ||||||
|                               'need to be lowered for low-intensity workloads such as video playback).' |  | ||||||
|                   ) |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def __init__(self, device, **kwargs): |  | ||||||
|         super(CoreUtilization, self).__init__(device, **kwargs) |  | ||||||
|         self.collector = None |  | ||||||
|         self.output_dir = None |  | ||||||
|         self.cores = None |  | ||||||
|         self.output_artifact_registered = False |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         ''' Calls ProcCollect class ''' |  | ||||||
|         self.output_dir = context.output_directory |  | ||||||
|         self.collector = ProcCollect(self.device, self.logger, self.output_dir) |  | ||||||
|         self.cores = self.device.number_of_cores |  | ||||||
|  |  | ||||||
|     def start(self, context):  # pylint: disable=W0613 |  | ||||||
|         ''' Starts collecting data once the workload starts ''' |  | ||||||
|         self.logger.debug('Starting to collect /proc/stat data') |  | ||||||
|         self.collector.start() |  | ||||||
|  |  | ||||||
|     def stop(self, context):  # pylint: disable=W0613 |  | ||||||
|         ''' Stops collecting data once the workload stops ''' |  | ||||||
|         self.logger.debug('Stopping /proc/stat data collection') |  | ||||||
|         self.collector.stop() |  | ||||||
|  |  | ||||||
|     def update_result(self, context): |  | ||||||
|         ''' updates result into coreutil.csv ''' |  | ||||||
|         self.collector.join()    # wait for "proc.txt" to generate. |  | ||||||
|         context.add_artifact('proctxt', 'proc.txt', 'raw') |  | ||||||
|         calc = Calculator(self.cores, self.threshold, context)  # pylint: disable=E1101 |  | ||||||
|         calc.calculate() |  | ||||||
|         if not self.output_artifact_registered: |  | ||||||
|             context.add_run_artifact('cpuutil', 'coreutil.csv', 'data') |  | ||||||
|             self.output_artifact_registered = True |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ProcCollect(threading.Thread): |  | ||||||
|     ''' Dumps data into proc.txt ''' |  | ||||||
|  |  | ||||||
|     def __init__(self, device, logger, out_dir): |  | ||||||
|         super(ProcCollect, self).__init__() |  | ||||||
|         self.device = device |  | ||||||
|         self.logger = logger |  | ||||||
|         self.dire = out_dir |  | ||||||
|         self.stop_signal = threading.Event() |  | ||||||
|         self.command = 'cat /proc/stat' |  | ||||||
|         self.exc = None |  | ||||||
|  |  | ||||||
|     def run(self): |  | ||||||
|         try: |  | ||||||
|             self.stop_signal.clear() |  | ||||||
|             _, temp_file = tempfile.mkstemp() |  | ||||||
|             self.logger.debug('temp file : {}'.format(temp_file)) |  | ||||||
|             with open(temp_file, 'wb') as tempfp: |  | ||||||
|                 while not self.stop_signal.is_set(): |  | ||||||
|                     tempfp.write(self.device.execute(self.command)) |  | ||||||
|                     tempfp.write('\n') |  | ||||||
|                     time.sleep(0.5) |  | ||||||
|             raw_file = os.path.join(self.dire, 'proc.txt') |  | ||||||
|             shutil.copy(temp_file, raw_file) |  | ||||||
|             os.unlink(temp_file) |  | ||||||
|         except Exception, error:  # pylint: disable=W0703 |  | ||||||
|             self.logger.warning('Exception on collector thread : {}({})'.format(error.__class__.__name__, error)) |  | ||||||
|             self.exc = WorkerThreadError(self.name, sys.exc_info()) |  | ||||||
|  |  | ||||||
|     def stop(self): |  | ||||||
|         '''Executed once the workload stops''' |  | ||||||
|         self.stop_signal.set() |  | ||||||
|         if self.exc is not None: |  | ||||||
|             raise self.exc  # pylint: disable=E0702 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Calculator(object): |  | ||||||
|     """ |  | ||||||
|     Read /proc/stat and dump data into ``proc.txt`` which is parsed to generate ``coreutil.csv`` |  | ||||||
|     Sample output from 'proc.txt' :: |  | ||||||
|  |  | ||||||
|         ---------------------------------------------------------------------- |  | ||||||
|         cpu  9853753 51448 3248855 12403398 4241 111 14996 0 0 0 |  | ||||||
|         cpu0 1585220 7756 1103883 4977224 552 97 10505 0 0 0 |  | ||||||
|         cpu1 2141168 7243 564347 972273 504 4 1442 0 0 0 |  | ||||||
|         cpu2 1940681 7994 651946 1005534 657 3 1424 0 0 0 |  | ||||||
|         cpu3 1918013 8833 667782 1012249 643 3 1326 0 0 0 |  | ||||||
|         cpu4 165429 5363 50289 1118910 474 0 148 0 0 0 |  | ||||||
|         cpu5 1661299 4910 126654 1104018 480 0 53 0 0 0 |  | ||||||
|         cpu6 333642 4657 48296 1102531 482 2 55 0 0 0 |  | ||||||
|         cpu7 108299 4691 35656 1110658 448 0 41 0 0 0 |  | ||||||
|         ---------------------------------------------------------------------- |  | ||||||
|         Description: |  | ||||||
|  |  | ||||||
|         1st column  : cpu_id( cpu0, cpu1, cpu2,......) |  | ||||||
|         Next all column represents the amount of time, measured in units of USER_HZ |  | ||||||
|         2nd column  : Time spent in user mode |  | ||||||
|         3rd column  : Time spent in user mode with low priority |  | ||||||
|         4th column  : Time spent in system mode |  | ||||||
|         5th column  : Time spent in idle task |  | ||||||
|         6th column  : Time waiting for i/o to compelete |  | ||||||
|         7th column  : Time servicing interrupts |  | ||||||
|         8th column  : Time servicing softirqs |  | ||||||
|         9th column  : Stolen time is the time spent in other operating systems |  | ||||||
|         10th column : Time spent running a virtual CPU |  | ||||||
|         11th column : Time spent running a niced guest |  | ||||||
|  |  | ||||||
|         ---------------------------------------------------------------------------- |  | ||||||
|  |  | ||||||
|     Procedure to calculate instantaneous CPU utilization: |  | ||||||
|  |  | ||||||
|     1) Subtract two consecutive samples for every column( except 1st ) |  | ||||||
|     2) Sum all the values except "Time spent in idle task" |  | ||||||
|     3) CPU utilization(%) = ( value obtained in 2 )/sum of all the values)*100 |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     idle_time_index = 3 |  | ||||||
|  |  | ||||||
|     def __init__(self, cores, threshold, context): |  | ||||||
|         self.cores = cores |  | ||||||
|         self.threshold = threshold |  | ||||||
|         self.context = context |  | ||||||
|         self.cpu_util = None  # Store CPU utilization for each core |  | ||||||
|         self.active = None  # Store active time(total time - idle) |  | ||||||
|         self.total = None   # Store the total amount of time (in USER_HZ) |  | ||||||
|         self.output = None |  | ||||||
|         self.cpuid_regex = re.compile(r'cpu(\d+)') |  | ||||||
|         self.outfile = os.path.join(context.run_output_directory, 'coreutil.csv') |  | ||||||
|         self.infile = os.path.join(context.output_directory, 'proc.txt') |  | ||||||
|  |  | ||||||
|     def calculate(self): |  | ||||||
|         self.calculate_total_active() |  | ||||||
|         self.calculate_core_utilization() |  | ||||||
|         self.generate_csv(self.context) |  | ||||||
|  |  | ||||||
|     def calculate_total_active(self): |  | ||||||
|         """ Read proc.txt file and calculate 'self.active' and 'self.total' """ |  | ||||||
|         all_cores = set(xrange(self.cores)) |  | ||||||
|         self.total = [[] for _ in all_cores] |  | ||||||
|         self.active = [[] for _ in all_cores] |  | ||||||
|         with open(self.infile, "r") as fh: |  | ||||||
|             # parsing logic: |  | ||||||
|             # - keep spinning through lines until see the cpu summary line |  | ||||||
|             #   (taken to indicate start of new record). |  | ||||||
|             # - extract values for individual cores after the summary line, |  | ||||||
|             #   keeping  track of seen cores until no more lines match 'cpu\d+' |  | ||||||
|             #   pattern. |  | ||||||
|             # - For every core not seen in this record, pad zeros. |  | ||||||
|             # - Loop |  | ||||||
|             try: |  | ||||||
|                 while True: |  | ||||||
|                     line = fh.next() |  | ||||||
|                     if not line.startswith('cpu '): |  | ||||||
|                         continue |  | ||||||
|  |  | ||||||
|                     seen_cores = set([]) |  | ||||||
|                     line = fh.next() |  | ||||||
|                     match = self.cpuid_regex.match(line) |  | ||||||
|                     while match: |  | ||||||
|                         cpu_id = int(match.group(1)) |  | ||||||
|                         seen_cores.add(cpu_id) |  | ||||||
|                         times = map(int, line.split()[1:])  # first column is the cpu_id |  | ||||||
|                         self.total[cpu_id].append(sum(times)) |  | ||||||
|                         self.active[cpu_id].append(sum(times) - times[self.idle_time_index]) |  | ||||||
|                         line = fh.next() |  | ||||||
|                         match = self.cpuid_regex.match(line) |  | ||||||
|  |  | ||||||
|                     for unseen_core in all_cores - seen_cores: |  | ||||||
|                         self.total[unseen_core].append(0) |  | ||||||
|                         self.active[unseen_core].append(0) |  | ||||||
|             except StopIteration:  # EOF |  | ||||||
|                 pass |  | ||||||
|  |  | ||||||
|     def calculate_core_utilization(self): |  | ||||||
|         """Calculates CPU utilization""" |  | ||||||
|         diff_active = [[] for _ in xrange(self.cores)] |  | ||||||
|         diff_total = [[] for _ in xrange(self.cores)] |  | ||||||
|         self.cpu_util = [[] for _ in xrange(self.cores)] |  | ||||||
|         for i in xrange(self.cores): |  | ||||||
|             for j in xrange(len(self.active[i]) - 1): |  | ||||||
|                 temp = self.active[i][j + 1] - self.active[i][j] |  | ||||||
|                 diff_active[i].append(temp) |  | ||||||
|                 diff_total[i].append(self.total[i][j + 1] - self.total[i][j]) |  | ||||||
|                 if diff_total[i][j] == 0: |  | ||||||
|                     self.cpu_util[i].append(0) |  | ||||||
|                 else: |  | ||||||
|                     temp = float(diff_active[i][j]) / diff_total[i][j] |  | ||||||
|                     self.cpu_util[i].append(round((float(temp)) * 100, 2)) |  | ||||||
|  |  | ||||||
|     def generate_csv(self, context): |  | ||||||
|         """ generates ``coreutil.csv``""" |  | ||||||
|         self.output = [0 for _ in xrange(self.cores + 1)] |  | ||||||
|         for i in range(len(self.cpu_util[0])): |  | ||||||
|             count = 0 |  | ||||||
|             for j in xrange(len(self.cpu_util)): |  | ||||||
|                 if self.cpu_util[j][i] > round(float(self.threshold), 2): |  | ||||||
|                     count = count + 1 |  | ||||||
|             self.output[count] += 1 |  | ||||||
|         if self.cpu_util[0]: |  | ||||||
|             scale_factor = round((float(1) / len(self.cpu_util[0])) * 100, 6) |  | ||||||
|         else: |  | ||||||
|             scale_factor = 0 |  | ||||||
|         for i in xrange(len(self.output)): |  | ||||||
|             self.output[i] = self.output[i] * scale_factor |  | ||||||
|         with open(self.outfile, 'a+') as tem: |  | ||||||
|             writer = csv.writer(tem) |  | ||||||
|             reader = csv.reader(tem) |  | ||||||
|             if sum(1 for row in reader) == 0: |  | ||||||
|                 row = ['workload', 'iteration', '<threshold'] |  | ||||||
|                 for i in xrange(1, self.cores + 1): |  | ||||||
|                     row.append('{}core'.format(i)) |  | ||||||
|                 writer.writerow(row) |  | ||||||
|             row = [context.result.workload.name, context.result.iteration] |  | ||||||
|             row.extend(self.output) |  | ||||||
|             writer.writerow(row) |  | ||||||
| @@ -1,416 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
| # pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init |  | ||||||
| from __future__ import division |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import csv |  | ||||||
| import shutil |  | ||||||
| import tempfile |  | ||||||
| from collections import OrderedDict, defaultdict |  | ||||||
| from string import ascii_lowercase |  | ||||||
|  |  | ||||||
| from multiprocessing import Process, Queue |  | ||||||
|  |  | ||||||
| from wlauto import Instrument, Parameter |  | ||||||
| from wlauto.core import signal |  | ||||||
| from wlauto.exceptions import ConfigError, InstrumentError, DeviceError |  | ||||||
| from wlauto.utils.misc import ensure_directory_exists as _d |  | ||||||
| from wlauto.utils.types import list_of_ints, list_of_strs, boolean |  | ||||||
|  |  | ||||||
| # pylint: disable=wrong-import-position,wrong-import-order |  | ||||||
| daqpower_path = os.path.join(os.path.dirname(__file__), '..', '..', 'external', 'daq_server', 'src') |  | ||||||
| sys.path.insert(0, daqpower_path) |  | ||||||
| try: |  | ||||||
|     import daqpower.client as daq  # pylint: disable=F0401 |  | ||||||
|     from daqpower.config import DeviceConfiguration, ServerConfiguration, ConfigurationError  # pylint: disable=F0401 |  | ||||||
| except ImportError, e: |  | ||||||
|     daq, DeviceConfiguration, ServerConfiguration, ConfigurationError = None, None, None, None |  | ||||||
|     import_error_mesg = e.message |  | ||||||
| sys.path.pop(0) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| UNITS = { |  | ||||||
|     'energy': 'Joules', |  | ||||||
|     'power': 'Watts', |  | ||||||
|     'voltage': 'Volts', |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| GPIO_ROOT = '/sys/class/gpio' |  | ||||||
| TRACE_MARKER_PATH = '/sys/kernel/debug/tracing/trace_marker' |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def dict_or_bool(value): |  | ||||||
|     """ |  | ||||||
|     Ensures that either a dictionary or a boolean is used as a parameter. |  | ||||||
|     """ |  | ||||||
|     if isinstance(value, dict): |  | ||||||
|         return value |  | ||||||
|     return boolean(value) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Daq(Instrument): |  | ||||||
|  |  | ||||||
|     name = 'daq' |  | ||||||
|     description = """ |  | ||||||
|     DAQ instrument obtains the power consumption of the target device's core |  | ||||||
|     measured by National Instruments Data Acquisition(DAQ) device. |  | ||||||
|  |  | ||||||
|     WA communicates with a DAQ device server running on a Windows machine |  | ||||||
|     (Please refer to :ref:`daq_setup`) over a network. You must specify the IP |  | ||||||
|     address and port the server is listening on in the config file as follows :: |  | ||||||
|  |  | ||||||
|         daq_server_host = '10.1.197.176' |  | ||||||
|         daq_server_port = 45677 |  | ||||||
|  |  | ||||||
|     These values will be output by the server when you run it on Windows. |  | ||||||
|  |  | ||||||
|     You must also specify the values of resistors (in Ohms) across which the |  | ||||||
|     voltages are measured (Please refer to :ref:`daq_setup`). The values should be |  | ||||||
|     specified as a list with an entry for each resistor, e.g.:: |  | ||||||
|  |  | ||||||
|         daq_resistor_values = [0.005, 0.005] |  | ||||||
|  |  | ||||||
|     In addition to this mandatory configuration, you can also optionally specify the |  | ||||||
|     following:: |  | ||||||
|  |  | ||||||
|         :daq_labels: Labels to be used for ports. Defaults to ``'PORT_<pnum>'``, where |  | ||||||
|                      'pnum' is the number of the port. |  | ||||||
|         :daq_device_id: The ID under which the DAQ is registered with the driver. |  | ||||||
|                         Defaults to ``'Dev1'``. |  | ||||||
|         :daq_v_range: Specifies the voltage range for the SOC voltage channel on the DAQ |  | ||||||
|                       (please refer to :ref:`daq_setup` for details). Defaults to ``2.5``. |  | ||||||
|         :daq_dv_range: Specifies the voltage range for the resistor voltage channel on |  | ||||||
|                        the DAQ (please refer to :ref:`daq_setup` for details). |  | ||||||
|                        Defaults to ``0.2``. |  | ||||||
|         :daq_sampling_rate: DAQ sampling rate. DAQ will take this many samples each |  | ||||||
|                             second. Please note that this maybe limitted by your DAQ model |  | ||||||
|                             and then number of ports you're measuring (again, see |  | ||||||
|                             :ref:`daq_setup`). Defaults to ``10000``. |  | ||||||
|         :daq_channel_map: Represents mapping from  logical AI channel number to physical |  | ||||||
|                           connector on the DAQ (varies between DAQ models). The default |  | ||||||
|                           assumes DAQ 6363 and similar with AI channels on connectors |  | ||||||
|                           0-7 and 16-23. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         Parameter('server_host', kind=str, default='localhost', |  | ||||||
|                   global_alias='daq_server_host', |  | ||||||
|                   description='The host address of the machine that runs the daq Server which the ' |  | ||||||
|                               'insturment communicates with.'), |  | ||||||
|         Parameter('server_port', kind=int, default=45677, |  | ||||||
|                   global_alias='daq_server_port', |  | ||||||
|                   description='The port number for daq Server in which daq insturment communicates ' |  | ||||||
|                               'with.'), |  | ||||||
|         Parameter('device_id', kind=str, default='Dev1', |  | ||||||
|                   global_alias='daq_device_id', |  | ||||||
|                   description='The ID under which the DAQ is registered with the driver.'), |  | ||||||
|         Parameter('v_range', kind=float, default=2.5, |  | ||||||
|                   global_alias='daq_v_range', |  | ||||||
|                   description='Specifies the voltage range for the SOC voltage channel on the DAQ ' |  | ||||||
|                               '(please refer to :ref:`daq_setup` for details).'), |  | ||||||
|         Parameter('dv_range', kind=float, default=0.2, |  | ||||||
|                   global_alias='daq_dv_range', |  | ||||||
|                   description='Specifies the voltage range for the resistor voltage channel on ' |  | ||||||
|                               'the DAQ (please refer to :ref:`daq_setup` for details).'), |  | ||||||
|         Parameter('sampling_rate', kind=int, default=10000, |  | ||||||
|                   global_alias='daq_sampling_rate', |  | ||||||
|                   description='DAQ sampling rate. DAQ will take this many samples each ' |  | ||||||
|                               'second. Please note that this maybe limitted by your DAQ model ' |  | ||||||
|                               'and then number of ports you\'re measuring (again, see ' |  | ||||||
|                               ':ref:`daq_setup`)'), |  | ||||||
|         Parameter('resistor_values', kind=list, mandatory=True, |  | ||||||
|                   global_alias='daq_resistor_values', |  | ||||||
|                   description='The values of resistors (in Ohms) across which the voltages are measured on ' |  | ||||||
|                               'each port.'), |  | ||||||
|         Parameter('channel_map', kind=list_of_ints, default=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23), |  | ||||||
|                   global_alias='daq_channel_map', |  | ||||||
|                   description='Represents mapping from  logical AI channel number to physical ' |  | ||||||
|                               'connector on the DAQ (varies between DAQ models). The default ' |  | ||||||
|                               'assumes DAQ 6363 and similar with AI channels on connectors ' |  | ||||||
|                               '0-7 and 16-23.'), |  | ||||||
|         Parameter('labels', kind=list_of_strs, |  | ||||||
|                   global_alias='daq_labels', |  | ||||||
|                   description='List of port labels. If specified, the lenght of the list must match ' |  | ||||||
|                               'the length of ``resistor_values``. Defaults to "PORT_<pnum>", where ' |  | ||||||
|                               '"pnum" is the number of the port.'), |  | ||||||
|         Parameter('negative_samples', default='keep', allowed_values=['keep', 'zero', 'drop', 'abs'], |  | ||||||
|                   global_alias='daq_negative_samples', |  | ||||||
|                   description=""" |  | ||||||
|                   Specifies how negative power samples should be handled. The following |  | ||||||
|                   methods are possible: |  | ||||||
|  |  | ||||||
|                     :keep: keep them as they are |  | ||||||
|                     :zero: turn negative values to zero |  | ||||||
|                     :drop: drop samples if they contain negative values. *warning:* this may result in |  | ||||||
|                            port files containing different numbers of samples |  | ||||||
|                     :abs: take the absoulte value of negave samples |  | ||||||
|  |  | ||||||
|                   """), |  | ||||||
|         Parameter('gpio_sync', kind=int, constraint=lambda x: x > 0, |  | ||||||
|                   description=""" |  | ||||||
|                   If specified, the instrument will simultaneously set the |  | ||||||
|                   specified GPIO pin high and put a marker into ftrace. This is |  | ||||||
|                   to facillitate syncing kernel trace events to DAQ power |  | ||||||
|                   trace. |  | ||||||
|                   """), |  | ||||||
|         Parameter('merge_channels', kind=dict_or_bool, default=False, |  | ||||||
|                   description=""" |  | ||||||
|                   If set to ``True``, channels with consecutive letter suffixes will be summed. |  | ||||||
|                   e.g. If you have channels A7a, A7b, A7c, A15a, A15b they will be summed to A7, A15 |  | ||||||
|  |  | ||||||
|                   You can also manually specify the name of channels to be merged and the name of the |  | ||||||
|                   result like so: |  | ||||||
|  |  | ||||||
|                   merge_channels: |  | ||||||
|                        A15: [A15dvfs, A15ram] |  | ||||||
|                        NonCPU: [GPU, RoS, Mem] |  | ||||||
|  |  | ||||||
|                   In the above exaples the DAQ channels labeled A15a and A15b will be summed together |  | ||||||
|                   with the results being saved as 'channel' ''a''. A7, GPU and RoS will be summed to 'c' |  | ||||||
|                   """) |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         status, devices = self._execute_command('list_devices') |  | ||||||
|         if status == daq.Status.OK and not devices: |  | ||||||
|             raise InstrumentError('DAQ: server did not report any devices registered with the driver.') |  | ||||||
|         self._results = OrderedDict() |  | ||||||
|         self.gpio_path = None |  | ||||||
|         if self.gpio_sync: |  | ||||||
|             if not self.device.file_exists(GPIO_ROOT): |  | ||||||
|                 raise InstrumentError('GPIO sysfs not enabled on the device.') |  | ||||||
|             try: |  | ||||||
|                 export_path = self.device.path.join(GPIO_ROOT, 'export') |  | ||||||
|                 self.device.write_value(export_path, self.gpio_sync, verify=False) |  | ||||||
|                 pin_root = self.device.path.join(GPIO_ROOT, 'gpio{}'.format(self.gpio_sync)) |  | ||||||
|                 direction_path = self.device.path.join(pin_root, 'direction') |  | ||||||
|                 self.device.write_value(direction_path, 'out') |  | ||||||
|                 self.gpio_path = self.device.path.join(pin_root, 'value') |  | ||||||
|                 self.device.write_value(self.gpio_path, 0, verify=False) |  | ||||||
|                 signal.connect(self.insert_start_marker, signal.BEFORE_WORKLOAD_EXECUTION, priority=11) |  | ||||||
|                 signal.connect(self.insert_stop_marker, signal.AFTER_WORKLOAD_EXECUTION, priority=11) |  | ||||||
|             except DeviceError as e: |  | ||||||
|                 raise InstrumentError('Could not configure GPIO on device: {}'.format(e)) |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         self.logger.debug('Initialising session.') |  | ||||||
|         self._execute_command('configure', config=self.device_config) |  | ||||||
|  |  | ||||||
|     def slow_start(self, context): |  | ||||||
|         self.logger.debug('Starting collecting measurements.') |  | ||||||
|         self._execute_command('start') |  | ||||||
|  |  | ||||||
|     def slow_stop(self, context): |  | ||||||
|         self.logger.debug('Stopping collecting measurements.') |  | ||||||
|         self._execute_command('stop') |  | ||||||
|  |  | ||||||
|     def update_result(self, context):  # pylint: disable=R0914 |  | ||||||
|         self.logger.debug('Downloading data files.') |  | ||||||
|         output_directory = _d(os.path.join(context.output_directory, 'daq')) |  | ||||||
|         self._execute_command('get_data', output_directory=output_directory) |  | ||||||
|  |  | ||||||
|         if self.merge_channels: |  | ||||||
|             self._merge_channels(context) |  | ||||||
|  |  | ||||||
|         for entry in os.listdir(output_directory): |  | ||||||
|             context.add_iteration_artifact('DAQ_{}'.format(os.path.splitext(entry)[0]), |  | ||||||
|                                            path=os.path.join('daq', entry), |  | ||||||
|                                            kind='data', |  | ||||||
|                                            description='DAQ power measurments.') |  | ||||||
|             port = os.path.splitext(entry)[0] |  | ||||||
|             path = os.path.join(output_directory, entry) |  | ||||||
|             key = (context.spec.id, context.spec.label, context.current_iteration) |  | ||||||
|             if key not in self._results: |  | ||||||
|                 self._results[key] = {} |  | ||||||
|  |  | ||||||
|             temp_file = os.path.join(tempfile.gettempdir(), entry) |  | ||||||
|             writer, wfh = None, None |  | ||||||
|  |  | ||||||
|             with open(path) as fh: |  | ||||||
|                 if self.negative_samples != 'keep': |  | ||||||
|                     wfh = open(temp_file, 'wb') |  | ||||||
|                     writer = csv.writer(wfh) |  | ||||||
|  |  | ||||||
|                 reader = csv.reader(fh) |  | ||||||
|                 metrics = reader.next() |  | ||||||
|                 if writer: |  | ||||||
|                     writer.writerow(metrics) |  | ||||||
|                 self._metrics |= set(metrics) |  | ||||||
|  |  | ||||||
|                 rows = _get_rows(reader, writer, self.negative_samples) |  | ||||||
|                 data = zip(*rows) |  | ||||||
|  |  | ||||||
|                 if writer: |  | ||||||
|                     wfh.close() |  | ||||||
|                     shutil.move(temp_file, os.path.join(output_directory, entry)) |  | ||||||
|  |  | ||||||
|                 n = len(data[0]) |  | ||||||
|                 means = [s / n for s in map(sum, data)] |  | ||||||
|                 for metric, value in zip(metrics, means): |  | ||||||
|                     metric_name = '{}_{}'.format(port, metric) |  | ||||||
|                     context.result.add_metric(metric_name, round(value, 3), UNITS[metric]) |  | ||||||
|                     self._results[key][metric_name] = round(value, 3) |  | ||||||
|                 energy = sum(data[metrics.index('power')]) * (self.sampling_rate / 1000000) |  | ||||||
|                 context.result.add_metric('{}_energy'.format(port), round(energy, 3), UNITS['energy']) |  | ||||||
|  |  | ||||||
|     def teardown(self, context): |  | ||||||
|         self.logger.debug('Terminating session.') |  | ||||||
|         self._execute_command('close') |  | ||||||
|  |  | ||||||
|     def finalize(self, context): |  | ||||||
|         if self.gpio_path: |  | ||||||
|             unexport_path = self.device.path.join(GPIO_ROOT, 'unexport') |  | ||||||
|             self.device.write_value(unexport_path, self.gpio_sync, verify=False) |  | ||||||
|  |  | ||||||
|     def validate(self):  # pylint: disable=too-many-branches |  | ||||||
|         if not daq: |  | ||||||
|             raise ImportError(import_error_mesg) |  | ||||||
|         self._results = None |  | ||||||
|         self._metrics = set() |  | ||||||
|         if self.labels: |  | ||||||
|             if len(self.labels) != len(self.resistor_values): |  | ||||||
|                 raise ConfigError('Number of DAQ port labels does not match the number of resistor values.') |  | ||||||
|         else: |  | ||||||
|             self.labels = ['PORT_{}'.format(i) for i, _ in enumerate(self.resistor_values)] |  | ||||||
|         self.server_config = ServerConfiguration(host=self.server_host, |  | ||||||
|                                                  port=self.server_port) |  | ||||||
|         self.device_config = DeviceConfiguration(device_id=self.device_id, |  | ||||||
|                                                  v_range=self.v_range, |  | ||||||
|                                                  dv_range=self.dv_range, |  | ||||||
|                                                  sampling_rate=self.sampling_rate, |  | ||||||
|                                                  resistor_values=self.resistor_values, |  | ||||||
|                                                  channel_map=self.channel_map, |  | ||||||
|                                                  labels=self.labels) |  | ||||||
|         try: |  | ||||||
|             self.server_config.validate() |  | ||||||
|             self.device_config.validate() |  | ||||||
|         except ConfigurationError, ex: |  | ||||||
|             raise ConfigError('DAQ configuration: ' + ex.message)  # Re-raise as a WA error |  | ||||||
|         self.grouped_suffixes = defaultdict(str) |  | ||||||
|         if isinstance(self.merge_channels, bool): |  | ||||||
|             if self.merge_channels: |  | ||||||
|                 # Create a dict of potential prefixes and a list of their suffixes |  | ||||||
|                 grouped_suffixes = {label[:-1]: label for label in sorted(self.labels) if len(label) > 1} |  | ||||||
|                 # Only merge channels if more than one channel has the same prefix and the prefixes |  | ||||||
|                 # are consecutive letters starting with 'a'. |  | ||||||
|                 self.label_map = {} |  | ||||||
|                 for channel, suffixes in grouped_suffixes.iteritems(): |  | ||||||
|                     if len(suffixes) > 1: |  | ||||||
|                         if "".join([s[-1] for s in suffixes]) in ascii_lowercase[:len(suffixes)]: |  | ||||||
|                             self.label_map[channel] = suffixes |  | ||||||
|  |  | ||||||
|         elif isinstance(self.merge_channels, dict): |  | ||||||
|             # Check if given channel names match labels |  | ||||||
|             for old_names in self.merge_channels.values(): |  | ||||||
|                 for name in old_names: |  | ||||||
|                     if name not in self.labels: |  | ||||||
|                         raise ConfigError("No channel with label {} specified".format(name)) |  | ||||||
|             self.label_map = self.merge_channels  # pylint: disable=redefined-variable-type |  | ||||||
|             self.merge_channels = True |  | ||||||
|         else:  # Should never reach here |  | ||||||
|             raise AssertionError("``merge_channels`` is of invalid type") |  | ||||||
|  |  | ||||||
|     def before_overall_results_processing(self, context): |  | ||||||
|         if self._results: |  | ||||||
|             headers = ['id', 'workload', 'iteration'] |  | ||||||
|             metrics = ['{}_{}'.format(p, m) for p in self.labels for m in sorted(self._metrics)] |  | ||||||
|             headers += metrics |  | ||||||
|             rows = [headers] |  | ||||||
|             for key, value in self._results.iteritems(): |  | ||||||
|                 rows.append(list(key) + [value[m] for m in metrics]) |  | ||||||
|  |  | ||||||
|             outfile = os.path.join(context.output_directory, 'daq_power.csv') |  | ||||||
|             with open(outfile, 'wb') as fh: |  | ||||||
|                 writer = csv.writer(fh) |  | ||||||
|                 writer.writerows(rows) |  | ||||||
|  |  | ||||||
|     def insert_start_marker(self, context): |  | ||||||
|         if self.gpio_path: |  | ||||||
|             command = 'echo DAQ_START_MARKER > {}; echo 1 > {}'.format(TRACE_MARKER_PATH, self.gpio_path) |  | ||||||
|             self.device.execute(command, as_root=self.device.is_rooted) |  | ||||||
|  |  | ||||||
|     def insert_stop_marker(self, context): |  | ||||||
|         if self.gpio_path: |  | ||||||
|             command = 'echo DAQ_STOP_MARKER > {}; echo 0 > {}'.format(TRACE_MARKER_PATH, self.gpio_path) |  | ||||||
|             self.device.execute(command, as_root=self.device.is_rooted) |  | ||||||
|  |  | ||||||
|     def _execute_command(self, command, **kwargs): |  | ||||||
|         # pylint: disable=E1101 |  | ||||||
|         q = Queue() |  | ||||||
|         p = Process(target=_send_daq_command, args=(q, self.server_config, command), kwargs=kwargs) |  | ||||||
|         p.start() |  | ||||||
|         result = q.get() |  | ||||||
|         p.join() |  | ||||||
|         if result.status == daq.Status.OK: |  | ||||||
|             pass  # all good |  | ||||||
|         elif result.status == daq.Status.OKISH: |  | ||||||
|             self.logger.debug(result.message) |  | ||||||
|         elif result.status == daq.Status.ERROR: |  | ||||||
|             raise InstrumentError('DAQ: {}'.format(result.message)) |  | ||||||
|         else: |  | ||||||
|             raise InstrumentError('DAQ: Unexpected result: {} - {}'.format(result.status, result.message)) |  | ||||||
|         return (result.status, result.data) |  | ||||||
|  |  | ||||||
|     def _merge_channels(self, context):  # pylint: disable=r0914 |  | ||||||
|         output_directory = _d(os.path.join(context.output_directory, 'daq')) |  | ||||||
|         for name, labels in self.label_map.iteritems(): |  | ||||||
|             summed = None |  | ||||||
|             for label in labels: |  | ||||||
|                 path = os.path.join(output_directory, "{}.csv".format(label)) |  | ||||||
|                 with open(path) as fh: |  | ||||||
|                     reader = csv.reader(fh) |  | ||||||
|                     metrics = reader.next() |  | ||||||
|                     rows = _get_rows(reader, None, self.negative_samples) |  | ||||||
|                     if summed: |  | ||||||
|                         summed = [[x + y for x, y in zip(a, b)] for a, b in zip(rows, summed)] |  | ||||||
|                     else: |  | ||||||
|                         summed = rows |  | ||||||
|             output_path = os.path.join(output_directory, "{}.csv".format(name)) |  | ||||||
|             with open(output_path, 'wb') as wfh: |  | ||||||
|                 writer = csv.writer(wfh) |  | ||||||
|                 writer.writerow(metrics) |  | ||||||
|                 for row in summed: |  | ||||||
|                     writer.writerow(row) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _send_daq_command(q, *args, **kwargs): |  | ||||||
|     result = daq.execute_command(*args, **kwargs) |  | ||||||
|     q.put(result) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _get_rows(reader, writer, negative_samples): |  | ||||||
|     rows = [] |  | ||||||
|     for row in reader: |  | ||||||
|         row = map(float, row) |  | ||||||
|         if negative_samples == 'keep': |  | ||||||
|             rows.append(row) |  | ||||||
|         elif negative_samples == 'zero': |  | ||||||
|             def nonneg(v): |  | ||||||
|                 return v if v >= 0 else 0 |  | ||||||
|             rows.append([nonneg(v) for v in row]) |  | ||||||
|         elif negative_samples == 'drop': |  | ||||||
|             if all(v >= 0 for v in row): |  | ||||||
|                 rows.append(row) |  | ||||||
|         elif negative_samples == 'abs': |  | ||||||
|             rows.append([abs(v) for v in row]) |  | ||||||
|         else: |  | ||||||
|             raise AssertionError(negative_samples)  # should never get here |  | ||||||
|         if writer: |  | ||||||
|             writer.writerow(row) |  | ||||||
|     return rows |  | ||||||
| @@ -1,199 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #pylint: disable=W0613,E1101,E0203,W0201 |  | ||||||
| import time |  | ||||||
|  |  | ||||||
| from wlauto import Instrument, Parameter |  | ||||||
| from wlauto.exceptions import ConfigError, InstrumentError |  | ||||||
| from wlauto.utils.types import boolean |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DelayInstrument(Instrument): |  | ||||||
|  |  | ||||||
|     name = 'delay' |  | ||||||
|     description = """ |  | ||||||
|     This instrument introduces a delay before executing either an iteration |  | ||||||
|     or all iterations for a spec. |  | ||||||
|  |  | ||||||
|     The delay may be specified as either a fixed period or a temperature |  | ||||||
|     threshold that must be reached. |  | ||||||
|  |  | ||||||
|     Optionally, if an active cooling solution is employed to speed up temperature drop between |  | ||||||
|     runs, it may be controlled using this instrument. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         Parameter('temperature_file', default='/sys/devices/virtual/thermal/thermal_zone0/temp', |  | ||||||
|                   global_alias='thermal_temp_file', |  | ||||||
|                   description="""Full path to the sysfile on the device that contains the device's |  | ||||||
|                   temperature."""), |  | ||||||
|         Parameter('temperature_timeout', kind=int, default=600, |  | ||||||
|                   global_alias='thermal_timeout', |  | ||||||
|                   description=""" |  | ||||||
|                   The timeout after which the instrument will stop waiting even if the specified threshold |  | ||||||
|                   temperature is not reached. If this timeout is hit, then a warning will be logged stating |  | ||||||
|                   the actual temperature at which the timeout has ended. |  | ||||||
|                   """), |  | ||||||
|         Parameter('temperature_poll_period', kind=int, default=5, |  | ||||||
|                   global_alias='thermal_sleep_time', |  | ||||||
|                   description="""How long to sleep (in seconds) between polling current device temperature."""), |  | ||||||
|         Parameter('temperature_between_specs', kind=int, default=None, |  | ||||||
|                   global_alias='thermal_threshold_between_specs', |  | ||||||
|                   description=""" |  | ||||||
|                   Temperature (in device-specific units) the device must cool down to before |  | ||||||
|                   the iteration spec will be run. |  | ||||||
|  |  | ||||||
|                   .. note:: This cannot be specified at the same time as ``fixed_between_specs`` |  | ||||||
|  |  | ||||||
|                   """), |  | ||||||
|         Parameter('temperature_between_iterations', kind=int, default=None, |  | ||||||
|                   global_alias='thermal_threshold_between_iterations', |  | ||||||
|                   description=""" |  | ||||||
|                   Temperature (in device-specific units) the device must cool down to before |  | ||||||
|                   the next spec will be run. |  | ||||||
|  |  | ||||||
|                   .. note:: This cannot be specified at the same time as ``fixed_between_iterations`` |  | ||||||
|  |  | ||||||
|                   """), |  | ||||||
|         Parameter('temperature_before_start', kind=int, default=None, |  | ||||||
|                   global_alias='thermal_threshold_before_start', |  | ||||||
|                   description=""" |  | ||||||
|                   Temperature (in device-specific units) the device must cool down to just before |  | ||||||
|                   the actual workload execution (after setup has been performed). |  | ||||||
|  |  | ||||||
|                   .. note:: This cannot be specified at the same time as ``fixed_between_iterations`` |  | ||||||
|  |  | ||||||
|                   """), |  | ||||||
|         Parameter('fixed_between_specs', kind=int, default=None, |  | ||||||
|                   global_alias='fixed_delay_between_specs', |  | ||||||
|                   description=""" |  | ||||||
|                   How long to sleep (in seconds) after all iterations for a workload spec have |  | ||||||
|                   executed. |  | ||||||
|  |  | ||||||
|                   .. note:: This cannot be specified at the same time as ``temperature_between_specs`` |  | ||||||
|  |  | ||||||
|                   """), |  | ||||||
|         Parameter('fixed_between_iterations', kind=int, default=None, |  | ||||||
|                   global_alias='fixed_delay_between_iterations', |  | ||||||
|                   description=""" |  | ||||||
|                   How long to sleep (in seconds) after each iterations for a workload spec has |  | ||||||
|                   executed. |  | ||||||
|  |  | ||||||
|                   .. note:: This cannot be specified at the same time as ``temperature_between_iterations`` |  | ||||||
|  |  | ||||||
|                   """), |  | ||||||
|         Parameter('fixed_before_start', kind=int, default=None, |  | ||||||
|                   global_alias='fixed_delay_before_start', |  | ||||||
|                   description=""" |  | ||||||
|  |  | ||||||
|                   How long to sleep (in seconds) after setup for an iteration has been perfromed but |  | ||||||
|                   before running the workload. |  | ||||||
|  |  | ||||||
|                   .. note:: This cannot be specified at the same time as ``temperature_before_start`` |  | ||||||
|  |  | ||||||
|                   """), |  | ||||||
|         Parameter('active_cooling', kind=boolean, default=False, |  | ||||||
|                   global_alias='thermal_active_cooling', |  | ||||||
|                   description=""" |  | ||||||
|                   This instrument supports an active cooling solution while waiting for the device temperature |  | ||||||
|                   to drop to the threshold. The solution involves an mbed controlling a fan. The mbed is signaled |  | ||||||
|                   over a serial port. If this solution is present in the setup, this should be set to ``True``. |  | ||||||
|                   """), |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         if self.temperature_between_iterations == 0: |  | ||||||
|             temp = self.device.get_sysfile_value(self.temperature_file, int) |  | ||||||
|             self.logger.debug('Setting temperature threshold between iterations to {}'.format(temp)) |  | ||||||
|             self.temperature_between_iterations = temp |  | ||||||
|         if self.temperature_between_specs == 0: |  | ||||||
|             temp = self.device.get_sysfile_value(self.temperature_file, int) |  | ||||||
|             self.logger.debug('Setting temperature threshold between workload specs to {}'.format(temp)) |  | ||||||
|             self.temperature_between_specs = temp |  | ||||||
|  |  | ||||||
|     def very_slow_on_iteration_start(self, context): |  | ||||||
|         if self.active_cooling: |  | ||||||
|             self.device.stop_active_cooling() |  | ||||||
|         if self.fixed_between_iterations: |  | ||||||
|             self.logger.debug('Waiting for a fixed period after iteration...') |  | ||||||
|             time.sleep(self.fixed_between_iterations) |  | ||||||
|         elif self.temperature_between_iterations: |  | ||||||
|             self.logger.debug('Waiting for temperature drop before iteration...') |  | ||||||
|             self.wait_for_temperature(self.temperature_between_iterations) |  | ||||||
|  |  | ||||||
|     def very_slow_on_spec_start(self, context): |  | ||||||
|         if self.active_cooling: |  | ||||||
|             self.device.stop_active_cooling() |  | ||||||
|         if self.fixed_between_specs: |  | ||||||
|             self.logger.debug('Waiting for a fixed period after spec execution...') |  | ||||||
|             time.sleep(self.fixed_between_specs) |  | ||||||
|         elif self.temperature_between_specs: |  | ||||||
|             self.logger.debug('Waiting for temperature drop before spec execution...') |  | ||||||
|             self.wait_for_temperature(self.temperature_between_specs) |  | ||||||
|  |  | ||||||
|     def very_slow_start(self, context): |  | ||||||
|         if self.active_cooling: |  | ||||||
|             self.device.stop_active_cooling() |  | ||||||
|         if self.fixed_before_start: |  | ||||||
|             self.logger.debug('Waiting for a fixed period after iteration...') |  | ||||||
|             time.sleep(self.fixed_before_start) |  | ||||||
|         elif self.temperature_before_start: |  | ||||||
|             self.logger.debug('Waiting for temperature drop before commencing execution...') |  | ||||||
|             self.wait_for_temperature(self.temperature_before_start) |  | ||||||
|  |  | ||||||
|     def wait_for_temperature(self, temperature): |  | ||||||
|         if self.active_cooling: |  | ||||||
|             self.device.start_active_cooling() |  | ||||||
|             self.do_wait_for_temperature(temperature) |  | ||||||
|             self.device.stop_active_cooling() |  | ||||||
|         else: |  | ||||||
|             self.do_wait_for_temperature(temperature) |  | ||||||
|  |  | ||||||
|     def do_wait_for_temperature(self, temperature): |  | ||||||
|         reading = self.device.get_sysfile_value(self.temperature_file, int) |  | ||||||
|         waiting_start_time = time.time() |  | ||||||
|         while reading > temperature: |  | ||||||
|             self.logger.debug('Device temperature: {}'.format(reading)) |  | ||||||
|             if time.time() - waiting_start_time > self.temperature_timeout: |  | ||||||
|                 self.logger.warning('Reached timeout; current temperature: {}'.format(reading)) |  | ||||||
|                 break |  | ||||||
|             time.sleep(self.temperature_poll_period) |  | ||||||
|             reading = self.device.get_sysfile_value(self.temperature_file, int) |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         if (self.temperature_between_specs is not None and |  | ||||||
|                 self.fixed_between_specs is not None): |  | ||||||
|             raise ConfigError('Both fixed delay and thermal threshold specified for specs.') |  | ||||||
|  |  | ||||||
|         if (self.temperature_between_iterations is not None and |  | ||||||
|                 self.fixed_between_iterations is not None): |  | ||||||
|             raise ConfigError('Both fixed delay and thermal threshold specified for iterations.') |  | ||||||
|  |  | ||||||
|         if (self.temperature_before_start is not None and |  | ||||||
|                 self.fixed_before_start is not None): |  | ||||||
|             raise ConfigError('Both fixed delay and thermal threshold specified before start.') |  | ||||||
|  |  | ||||||
|         if not any([self.temperature_between_specs, self.fixed_between_specs, self.temperature_before_start, |  | ||||||
|                     self.temperature_between_iterations, self.fixed_between_iterations, |  | ||||||
|                     self.fixed_before_start]): |  | ||||||
|             raise ConfigError('delay instrument is enabled, but no delay is specified.') |  | ||||||
|  |  | ||||||
|         if self.active_cooling and not self.device.has('active_cooling'): |  | ||||||
|             message = 'Your device does not support active cooling. Did you configure it with an approprite module?' |  | ||||||
|             raise InstrumentError(message) |  | ||||||
|  |  | ||||||
| @@ -1,62 +0,0 @@ | |||||||
| #    Copyright 2014-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import os |  | ||||||
|  |  | ||||||
| from wlauto import Instrument, Parameter |  | ||||||
| from wlauto.utils.misc import ensure_file_directory_exists as _f |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DmesgInstrument(Instrument): |  | ||||||
|     # pylint: disable=no-member,attribute-defined-outside-init |  | ||||||
|     """ |  | ||||||
|     Collected dmesg output before and during the run. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     name = 'dmesg' |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         Parameter('loglevel', kind=int, allowed_values=range(8), |  | ||||||
|                   description='Set loglevel for console output.') |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     loglevel_file = '/proc/sys/kernel/printk' |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         if self.loglevel: |  | ||||||
|             self.old_loglevel = self.device.get_sysfile_value(self.loglevel_file) |  | ||||||
|             self.device.write_value(self.loglevel_file, self.loglevel, verify=False) |  | ||||||
|         self.before_file = _f(os.path.join(context.output_directory, 'dmesg', 'before')) |  | ||||||
|         self.after_file = _f(os.path.join(context.output_directory, 'dmesg', 'after')) |  | ||||||
|  |  | ||||||
|     def slow_start(self, context): |  | ||||||
|         with open(self.before_file, 'w') as wfh: |  | ||||||
|             wfh.write(self.device.execute('dmesg')) |  | ||||||
|         context.add_artifact('dmesg_before', self.before_file, kind='data') |  | ||||||
|         if self.device.is_rooted: |  | ||||||
|             self.device.execute('dmesg -c', as_root=True) |  | ||||||
|  |  | ||||||
|     def slow_stop(self, context): |  | ||||||
|         with open(self.after_file, 'w') as wfh: |  | ||||||
|             wfh.write(self.device.execute('dmesg')) |  | ||||||
|         context.add_artifact('dmesg_after', self.after_file, kind='data') |  | ||||||
|  |  | ||||||
|     def teardown(self, context):  # pylint: disable=unused-argument |  | ||||||
|         if self.loglevel: |  | ||||||
|             self.device.write_value(self.loglevel_file, self.old_loglevel, verify=False) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -1,850 +0,0 @@ | |||||||
| #    Copyright 2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #pylint: disable=attribute-defined-outside-init,access-member-before-definition,redefined-outer-name |  | ||||||
| from __future__ import division |  | ||||||
| import os |  | ||||||
| import math |  | ||||||
| import time |  | ||||||
| from tempfile import mktemp |  | ||||||
| from base64 import b64encode |  | ||||||
| from collections import Counter, namedtuple |  | ||||||
|  |  | ||||||
| try: |  | ||||||
|     import jinja2 |  | ||||||
|     import pandas as pd |  | ||||||
|     import matplotlib |  | ||||||
|     matplotlib.use('AGG') |  | ||||||
|     import matplotlib.pyplot as plt |  | ||||||
|     import numpy as np |  | ||||||
|     low_filter = np.vectorize(lambda x: x > 0 and x or 0)  # pylint: disable=no-member |  | ||||||
|     import_error = None |  | ||||||
| except ImportError as e: |  | ||||||
|     import_error = e |  | ||||||
|     jinja2 = None |  | ||||||
|     pd = None |  | ||||||
|     plt = None |  | ||||||
|     np = None |  | ||||||
|     low_filter = None |  | ||||||
|  |  | ||||||
| from wlauto import Instrument, Parameter, File |  | ||||||
| from wlauto.exceptions import ConfigError, InstrumentError, DeviceError |  | ||||||
| from wlauto.instrumentation import instrument_is_installed |  | ||||||
| from wlauto.utils.types import caseless_string, list_or_caseless_string, list_of_ints |  | ||||||
| from wlauto.utils.misc import list_to_mask |  | ||||||
|  |  | ||||||
| FREQ_TABLE_FILE = 'frequency_power_perf_data.csv' |  | ||||||
| CPUS_TABLE_FILE = 'projected_cap_power.csv' |  | ||||||
| MEASURED_CPUS_TABLE_FILE = 'measured_cap_power.csv' |  | ||||||
| IDLE_TABLE_FILE = 'idle_power_perf_data.csv' |  | ||||||
| REPORT_TEMPLATE_FILE = 'report.template' |  | ||||||
| EM_TEMPLATE_FILE = 'em.template' |  | ||||||
|  |  | ||||||
| IdlePowerState = namedtuple('IdlePowerState', ['power']) |  | ||||||
| CapPowerState = namedtuple('CapPowerState', ['cap', 'power']) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class EnergyModel(object): |  | ||||||
|  |  | ||||||
|     def __init__(self): |  | ||||||
|         self.big_cluster_idle_states = [] |  | ||||||
|         self.little_cluster_idle_states = [] |  | ||||||
|         self.big_cluster_cap_states = [] |  | ||||||
|         self.little_cluster_cap_states = [] |  | ||||||
|         self.big_core_idle_states = [] |  | ||||||
|         self.little_core_idle_states = [] |  | ||||||
|         self.big_core_cap_states = [] |  | ||||||
|         self.little_core_cap_states = [] |  | ||||||
|  |  | ||||||
|     def add_cap_entry(self, cluster, perf, clust_pow, core_pow): |  | ||||||
|         if cluster == 'big': |  | ||||||
|             self.big_cluster_cap_states.append(CapPowerState(perf, clust_pow)) |  | ||||||
|             self.big_core_cap_states.append(CapPowerState(perf, core_pow)) |  | ||||||
|         elif cluster == 'little': |  | ||||||
|             self.little_cluster_cap_states.append(CapPowerState(perf, clust_pow)) |  | ||||||
|             self.little_core_cap_states.append(CapPowerState(perf, core_pow)) |  | ||||||
|         else: |  | ||||||
|             raise ValueError('Unexpected cluster: {}'.format(cluster)) |  | ||||||
|  |  | ||||||
|     def add_cluster_idle(self, cluster, values): |  | ||||||
|         for value in values: |  | ||||||
|             if cluster == 'big': |  | ||||||
|                 self.big_cluster_idle_states.append(IdlePowerState(value)) |  | ||||||
|             elif cluster == 'little': |  | ||||||
|                 self.little_cluster_idle_states.append(IdlePowerState(value)) |  | ||||||
|             else: |  | ||||||
|                 raise ValueError('Unexpected cluster: {}'.format(cluster)) |  | ||||||
|  |  | ||||||
|     def add_core_idle(self, cluster, values): |  | ||||||
|         for value in values: |  | ||||||
|             if cluster == 'big': |  | ||||||
|                 self.big_core_idle_states.append(IdlePowerState(value)) |  | ||||||
|             elif cluster == 'little': |  | ||||||
|                 self.little_core_idle_states.append(IdlePowerState(value)) |  | ||||||
|             else: |  | ||||||
|                 raise ValueError('Unexpected cluster: {}'.format(cluster)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class PowerPerformanceAnalysis(object): |  | ||||||
|  |  | ||||||
|     def __init__(self, data): |  | ||||||
|         self.summary = {} |  | ||||||
|         big_freqs = data[data.cluster == 'big'].frequency.unique() |  | ||||||
|         little_freqs = data[data.cluster == 'little'].frequency.unique() |  | ||||||
|         self.summary['frequency'] = max(set(big_freqs).intersection(set(little_freqs))) |  | ||||||
|  |  | ||||||
|         big_sc = data[(data.cluster == 'big') & |  | ||||||
|                       (data.frequency == self.summary['frequency']) & |  | ||||||
|                       (data.cpus == 1)] |  | ||||||
|         little_sc = data[(data.cluster == 'little') & |  | ||||||
|                          (data.frequency == self.summary['frequency']) & |  | ||||||
|                          (data.cpus == 1)] |  | ||||||
|         self.summary['performance_ratio'] = big_sc.performance.item() / little_sc.performance.item() |  | ||||||
|         self.summary['power_ratio'] = big_sc.power.item() / little_sc.power.item() |  | ||||||
|         self.summary['max_performance'] = data[data.cpus == 1].performance.max() |  | ||||||
|         self.summary['max_power'] = data[data.cpus == 1].power.max() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def build_energy_model(freq_power_table, cpus_power, idle_power, first_cluster_idle_state): |  | ||||||
|     # pylint: disable=too-many-locals |  | ||||||
|     em = EnergyModel() |  | ||||||
|     idle_power_sc = idle_power[idle_power.cpus == 1] |  | ||||||
|     perf_data = get_normalized_single_core_data(freq_power_table) |  | ||||||
|  |  | ||||||
|     for cluster in ['little', 'big']: |  | ||||||
|         cluster_cpus_power = cpus_power[cluster].dropna() |  | ||||||
|         cluster_power = cluster_cpus_power['cluster'].apply(int) |  | ||||||
|         core_power = (cluster_cpus_power['1'] - cluster_power).apply(int) |  | ||||||
|         performance = (perf_data[perf_data.cluster == cluster].performance_norm * 1024 / 100).apply(int) |  | ||||||
|         for perf, clust_pow, core_pow in zip(performance, cluster_power, core_power): |  | ||||||
|             em.add_cap_entry(cluster, perf, clust_pow, core_pow) |  | ||||||
|  |  | ||||||
|         all_idle_power = idle_power_sc[idle_power_sc.cluster == cluster].power.values |  | ||||||
|         # CORE idle states |  | ||||||
|         # We want the delta of each state w.r.t. the power |  | ||||||
|         # consumption of the shallowest one at this level (core_ref) |  | ||||||
|         idle_core_power = low_filter(all_idle_power[:first_cluster_idle_state] - |  | ||||||
|                                      all_idle_power[first_cluster_idle_state - 1]) |  | ||||||
|         # CLUSTER idle states |  | ||||||
|         # We want the absolute value of each idle state |  | ||||||
|         idle_cluster_power = low_filter(all_idle_power[first_cluster_idle_state - 1:]) |  | ||||||
|         em.add_cluster_idle(cluster, idle_cluster_power) |  | ||||||
|         em.add_core_idle(cluster, idle_core_power) |  | ||||||
|  |  | ||||||
|     return em |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def generate_em_c_file(em, big_core, little_core, em_template_file, outfile): |  | ||||||
|     with open(em_template_file) as fh: |  | ||||||
|         em_template = jinja2.Template(fh.read()) |  | ||||||
|     em_text = em_template.render( |  | ||||||
|         big_core=big_core, |  | ||||||
|         little_core=little_core, |  | ||||||
|         em=em, |  | ||||||
|     ) |  | ||||||
|     with open(outfile, 'w') as wfh: |  | ||||||
|         wfh.write(em_text) |  | ||||||
|     return em_text |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def generate_report(freq_power_table, measured_cpus_table, cpus_table, idle_power_table,  # pylint: disable=unused-argument |  | ||||||
|                     report_template_file, device_name, em_text, outfile): |  | ||||||
|     # pylint: disable=too-many-locals |  | ||||||
|     cap_power_analysis = PowerPerformanceAnalysis(freq_power_table) |  | ||||||
|     single_core_norm = get_normalized_single_core_data(freq_power_table) |  | ||||||
|     cap_power_plot = get_cap_power_plot(single_core_norm) |  | ||||||
|     idle_power_plot = get_idle_power_plot(idle_power_table) |  | ||||||
|  |  | ||||||
|     fig, axes = plt.subplots(1, 2) |  | ||||||
|     fig.set_size_inches(16, 8) |  | ||||||
|     for i, cluster in enumerate(reversed(cpus_table.columns.levels[0])): |  | ||||||
|         projected = cpus_table[cluster].dropna(subset=['1']) |  | ||||||
|         plot_cpus_table(projected, axes[i], cluster) |  | ||||||
|     cpus_plot_data = get_figure_data(fig) |  | ||||||
|  |  | ||||||
|     with open(report_template_file) as fh: |  | ||||||
|         report_template = jinja2.Template(fh.read()) |  | ||||||
|     html = report_template.render( |  | ||||||
|         device_name=device_name, |  | ||||||
|         freq_power_table=freq_power_table.set_index(['cluster', 'cpus', 'frequency']).to_html(), |  | ||||||
|         cap_power_analysis=cap_power_analysis, |  | ||||||
|         cap_power_plot=get_figure_data(cap_power_plot), |  | ||||||
|         idle_power_table=idle_power_table.set_index(['cluster', 'cpus', 'state']).to_html(), |  | ||||||
|         idle_power_plot=get_figure_data(idle_power_plot), |  | ||||||
|         cpus_table=cpus_table.to_html(), |  | ||||||
|         cpus_plot=cpus_plot_data, |  | ||||||
|         em_text=em_text, |  | ||||||
|     ) |  | ||||||
|     with open(outfile, 'w') as wfh: |  | ||||||
|         wfh.write(html) |  | ||||||
|     return html |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def wa_result_to_power_perf_table(df, performance_metric, index): |  | ||||||
|     table = df.pivot_table(index=index + ['iteration'], |  | ||||||
|                            columns='metric', values='value').reset_index() |  | ||||||
|     result_mean = table.groupby(index).mean() |  | ||||||
|     result_std = table.groupby(index).std() |  | ||||||
|     result_std.columns = [c + ' std' for c in result_std.columns] |  | ||||||
|     result_count = table.groupby(index).count() |  | ||||||
|     result_count.columns = [c + ' count' for c in result_count.columns] |  | ||||||
|     count_sqrt = result_count.apply(lambda x: x.apply(math.sqrt)) |  | ||||||
|     count_sqrt.columns = result_std.columns  # match column names for division |  | ||||||
|     result_error = 1.96 * result_std / count_sqrt  # 1.96 == 95% confidence interval |  | ||||||
|     result_error.columns = [c + ' error' for c in result_mean.columns] |  | ||||||
|  |  | ||||||
|     result = pd.concat([result_mean, result_std, result_count, result_error], axis=1) |  | ||||||
|     del result['iteration'] |  | ||||||
|     del result['iteration std'] |  | ||||||
|     del result['iteration count'] |  | ||||||
|     del result['iteration error'] |  | ||||||
|  |  | ||||||
|     updated_columns = [] |  | ||||||
|     for column in result.columns: |  | ||||||
|         if column == performance_metric: |  | ||||||
|             updated_columns.append('performance') |  | ||||||
|         elif column == performance_metric + ' std': |  | ||||||
|             updated_columns.append('performance_std') |  | ||||||
|         elif column == performance_metric + ' error': |  | ||||||
|             updated_columns.append('performance_error') |  | ||||||
|         else: |  | ||||||
|             updated_columns.append(column.replace(' ', '_')) |  | ||||||
|     result.columns = updated_columns |  | ||||||
|     result = result[sorted(result.columns)] |  | ||||||
|     result.reset_index(inplace=True) |  | ||||||
|  |  | ||||||
|     return result |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_figure_data(fig, fmt='png'): |  | ||||||
|     tmp = mktemp() |  | ||||||
|     fig.savefig(tmp, format=fmt, bbox_inches='tight') |  | ||||||
|     with open(tmp, 'rb') as fh: |  | ||||||
|         image_data = b64encode(fh.read()) |  | ||||||
|     os.remove(tmp) |  | ||||||
|     return image_data |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_normalized_single_core_data(data): |  | ||||||
|     finite_power = np.isfinite(data.power)  # pylint: disable=no-member |  | ||||||
|     finite_perf = np.isfinite(data.performance)  # pylint: disable=no-member |  | ||||||
|     data_single_core = data[(data.cpus == 1) & finite_perf & finite_power].copy() |  | ||||||
|     data_single_core['performance_norm'] = (data_single_core.performance / |  | ||||||
|                                             data_single_core.performance.max() * 100).apply(int) |  | ||||||
|     data_single_core['power_norm'] = (data_single_core.power / |  | ||||||
|                                       data_single_core.power.max() * 100).apply(int) |  | ||||||
|     return data_single_core |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_cap_power_plot(data_single_core): |  | ||||||
|     big_single_core = data_single_core[(data_single_core.cluster == 'big') & |  | ||||||
|                                        (data_single_core.cpus == 1)] |  | ||||||
|     little_single_core = data_single_core[(data_single_core.cluster == 'little') & |  | ||||||
|                                           (data_single_core.cpus == 1)] |  | ||||||
|  |  | ||||||
|     fig, axes = plt.subplots(1, 1, figsize=(12, 8)) |  | ||||||
|     axes.plot(big_single_core.performance_norm, |  | ||||||
|               big_single_core.power_norm, |  | ||||||
|               marker='o') |  | ||||||
|     axes.plot(little_single_core.performance_norm, |  | ||||||
|               little_single_core.power_norm, |  | ||||||
|               marker='o') |  | ||||||
|     axes.set_xlim(0, 105) |  | ||||||
|     axes.set_ylim(0, 105) |  | ||||||
|     axes.set_xlabel('Performance (Normalized)') |  | ||||||
|     axes.set_ylabel('Power (Normalized)') |  | ||||||
|     axes.grid() |  | ||||||
|     axes.legend(['big cluster', 'little cluster'], loc=0) |  | ||||||
|     return fig |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_idle_power_plot(df): |  | ||||||
|     fig, axes = plt.subplots(1, 2, figsize=(15, 7)) |  | ||||||
|     for cluster, ax in zip(['little', 'big'], axes): |  | ||||||
|         data = df[df.cluster == cluster].pivot_table(index=['state'], columns='cpus', values='power') |  | ||||||
|         err = df[df.cluster == cluster].pivot_table(index=['state'], columns='cpus', values='power_error') |  | ||||||
|         data.plot(kind='bar', ax=ax, rot=30, yerr=err) |  | ||||||
|         ax.set_title('{} cluster'.format(cluster)) |  | ||||||
|         ax.set_xlim(-1, len(data.columns) - 0.5) |  | ||||||
|         ax.set_ylabel('Power (mW)') |  | ||||||
|     return fig |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def fit_polynomial(s, n): |  | ||||||
|     # pylint: disable=no-member |  | ||||||
|     coeffs = np.polyfit(s.index, s.values, n) |  | ||||||
|     poly = np.poly1d(coeffs) |  | ||||||
|     return poly(s.index) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_cpus_power_table(data, index, opps, leak_factors):  # pylint: disable=too-many-locals |  | ||||||
|     # pylint: disable=no-member |  | ||||||
|     power_table = data[[index, 'cluster', 'cpus', 'power']].pivot_table(index=index, |  | ||||||
|                                                                         columns=['cluster', 'cpus'], |  | ||||||
|                                                                         values='power') |  | ||||||
|     bs_power_table = pd.DataFrame(index=power_table.index, columns=power_table.columns) |  | ||||||
|     for cluster in power_table.columns.levels[0]: |  | ||||||
|         power_table[cluster, 0] = (power_table[cluster, 1] - |  | ||||||
|                                    (power_table[cluster, 2] - |  | ||||||
|                                     power_table[cluster, 1])) |  | ||||||
|         bs_power_table.loc[power_table[cluster, 1].notnull(), (cluster, 1)] = fit_polynomial(power_table[cluster, 1].dropna(), 2) |  | ||||||
|         bs_power_table.loc[power_table[cluster, 2].notnull(), (cluster, 2)] = fit_polynomial(power_table[cluster, 2].dropna(), 2) |  | ||||||
|  |  | ||||||
|         if opps[cluster] is None: |  | ||||||
|             bs_power_table.loc[bs_power_table[cluster, 1].notnull(), (cluster, 0)] = \ |  | ||||||
|                 (2 * power_table[cluster, 1] - power_table[cluster, 2]).values |  | ||||||
|         else: |  | ||||||
|             voltages = opps[cluster].set_index('frequency').sort_index() |  | ||||||
|             leakage = leak_factors[cluster] * 2 * voltages['voltage']**3 / 0.9**3 |  | ||||||
|             leakage_delta = leakage - leakage[leakage.index[0]] |  | ||||||
|             bs_power_table.loc[:, (cluster, 0)] = \ |  | ||||||
|                 (2 * bs_power_table[cluster, 1] + leakage_delta - bs_power_table[cluster, 2]) |  | ||||||
|  |  | ||||||
|     # re-order columns and rename colum '0' to  'cluster' |  | ||||||
|     power_table = power_table[sorted(power_table.columns, |  | ||||||
|                                      cmp=lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]))] |  | ||||||
|     bs_power_table = bs_power_table[sorted(bs_power_table.columns, |  | ||||||
|                                            cmp=lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]))] |  | ||||||
|     old_levels = power_table.columns.levels |  | ||||||
|     power_table.columns.set_levels([old_levels[0], list(map(str, old_levels[1])[:-1]) + ['cluster']], |  | ||||||
|                                    inplace=True) |  | ||||||
|     bs_power_table.columns.set_levels([old_levels[0], list(map(str, old_levels[1])[:-1]) + ['cluster']], |  | ||||||
|                                       inplace=True) |  | ||||||
|     return power_table, bs_power_table |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def plot_cpus_table(projected, ax, cluster): |  | ||||||
|     projected.T.plot(ax=ax, marker='o') |  | ||||||
|     ax.set_title('{} cluster'.format(cluster)) |  | ||||||
|     ax.set_xticklabels(projected.columns) |  | ||||||
|     ax.set_xticks(range(0, 5)) |  | ||||||
|     ax.set_xlim(-0.5, len(projected.columns) - 0.5) |  | ||||||
|     ax.set_ylabel('Power (mW)') |  | ||||||
|     ax.grid(True) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def opp_table(d): |  | ||||||
|     if d is None: |  | ||||||
|         return None |  | ||||||
|     return pd.DataFrame(d.items(), columns=['frequency', 'voltage']) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class EnergyModelInstrument(Instrument): |  | ||||||
|  |  | ||||||
|     name = 'energy_model' |  | ||||||
|     desicription = """ |  | ||||||
|     Generates a power mode for the device based on specified workload. |  | ||||||
|  |  | ||||||
|     This insturment will execute the workload specified by the agenda (currently, only ``sysbench`` is |  | ||||||
|     supported) and will use the resulting performance and power measurments to generate a power mode for |  | ||||||
|     the device. |  | ||||||
|  |  | ||||||
|     This instrument requires certain features to be present in the kernel: |  | ||||||
|  |  | ||||||
|     1. cgroups and cpusets must be enabled. |  | ||||||
|     2. cpufreq and userspace governor must be enabled. |  | ||||||
|     3. cpuidle must be enabled. |  | ||||||
|  |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         Parameter('device_name', kind=caseless_string, |  | ||||||
|                   description="""The name of the device to be used in  generating the model. If not specified, |  | ||||||
|                                  ``device.name`` will be used. """), |  | ||||||
|         Parameter('big_core', kind=caseless_string, |  | ||||||
|                   description="""The name of the "big" core in the big.LITTLE system; must match |  | ||||||
|                                  one of the values in ``device.core_names``. """), |  | ||||||
|         Parameter('performance_metric', kind=caseless_string, mandatory=True, |  | ||||||
|                   description="""Metric to be used as the performance indicator."""), |  | ||||||
|         Parameter('power_metric', kind=list_or_caseless_string, |  | ||||||
|                   description="""Metric to be used as the power indicator. The value may contain a |  | ||||||
|                                  ``{core}`` format specifier that will be replaced with names of big |  | ||||||
|                                  and little cores to drive the name of the metric for that cluster. |  | ||||||
|                                  Ether this or ``energy_metric`` must be specified but not both."""), |  | ||||||
|         Parameter('energy_metric', kind=list_or_caseless_string, |  | ||||||
|                   description="""Metric to be used as the energy indicator. The value may contain a |  | ||||||
|                                  ``{core}`` format specifier that will be replaced with names of big |  | ||||||
|                                  and little cores to drive the name of the metric for that cluster. |  | ||||||
|                                  this metric will be used to derive power by deviding through by |  | ||||||
|                                  execution time. Either this or ``power_metric`` must be specified, but |  | ||||||
|                                  not both."""), |  | ||||||
|         Parameter('power_scaling_factor', kind=float, default=1.0, |  | ||||||
|                   description="""Power model specfies power in milliWatts. This is a scaling factor that |  | ||||||
|                                  power_metric values will be multiplied by to get milliWatts."""), |  | ||||||
|         Parameter('big_frequencies', kind=list_of_ints, |  | ||||||
|                   description="""List of frequencies to be used for big cores. These frequencies must |  | ||||||
|                                  be supported by the cores. If this is not specified, all available |  | ||||||
|                                  frequencies for the core (as read from cpufreq) will be used."""), |  | ||||||
|         Parameter('little_frequencies', kind=list_of_ints, |  | ||||||
|                   description="""List of frequencies to be used for little cores. These frequencies must |  | ||||||
|                                  be supported by the cores. If this is not specified, all available |  | ||||||
|                                  frequencies for the core (as read from cpufreq) will be used."""), |  | ||||||
|         Parameter('idle_workload', kind=str, default='idle', |  | ||||||
|                   description="Workload to be used while measuring idle power."), |  | ||||||
|         Parameter('idle_workload_params', kind=dict, default={}, |  | ||||||
|                   description="Parameter to pass to the idle workload."), |  | ||||||
|         Parameter('first_cluster_idle_state', kind=int, default=-1, |  | ||||||
|                   description='''The index of the first cluster idle state on the device. Previous states |  | ||||||
|                                  are assumed to be core idles. The default is ``-1``, i.e. only the last |  | ||||||
|                                  idle state is assumed to affect the entire cluster.'''), |  | ||||||
|         Parameter('no_hotplug', kind=bool, default=False, |  | ||||||
|                   description='''This options allows running the instrument without hotpluging cores on and off. |  | ||||||
|                                  Disabling hotplugging will most likely produce a less accurate power model.'''), |  | ||||||
|         Parameter('num_of_freqs_to_thermal_adjust', kind=int, default=0, |  | ||||||
|                   description="""The number of frequencies begining from the highest, to be adjusted for |  | ||||||
|                                  the thermal effect."""), |  | ||||||
|         Parameter('big_opps', kind=opp_table, |  | ||||||
|                   description="""OPP table mapping frequency to voltage (kHz --> mV) for the big cluster."""), |  | ||||||
|         Parameter('little_opps', kind=opp_table, |  | ||||||
|                   description="""OPP table mapping frequency to voltage (kHz --> mV) for the little cluster."""), |  | ||||||
|         Parameter('big_leakage', kind=int, default=120, |  | ||||||
|                   description=""" |  | ||||||
|                   Leakage factor for the big cluster (this is specific to a particular core implementation). |  | ||||||
|                   """), |  | ||||||
|         Parameter('little_leakage', kind=int, default=60, |  | ||||||
|                   description=""" |  | ||||||
|                   Leakage factor for the little cluster (this is specific to a particular core implementation). |  | ||||||
|                   """), |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         if import_error: |  | ||||||
|             message = 'energy_model instrument requires pandas, jinja2 and matplotlib Python packages to be installed; got: "{}"' |  | ||||||
|             raise InstrumentError(message.format(import_error.message)) |  | ||||||
|         for capability in ['cgroups', 'cpuidle']: |  | ||||||
|             if not self.device.has(capability): |  | ||||||
|                 message = 'The Device does not appear to support {}; does it have the right module installed?' |  | ||||||
|                 raise ConfigError(message.format(capability)) |  | ||||||
|         device_cores = set(self.device.core_names) |  | ||||||
|         if (self.power_metric and self.energy_metric) or not (self.power_metric or self.energy_metric): |  | ||||||
|             raise ConfigError('Either power_metric or energy_metric must be specified (but not both).') |  | ||||||
|         if not device_cores: |  | ||||||
|             raise ConfigError('The Device does not appear to have core_names configured.') |  | ||||||
|         elif len(device_cores) != 2: |  | ||||||
|             raise ConfigError('The Device does not appear to be a big.LITTLE device.') |  | ||||||
|         if self.big_core and self.big_core not in self.device.core_names: |  | ||||||
|             raise ConfigError('Specified big_core "{}" is in divice {}'.format(self.big_core, self.device.name)) |  | ||||||
|         if not self.big_core: |  | ||||||
|             self.big_core = self.device.core_names[-1]  # the last core is usually "big" in existing big.LITTLE devices |  | ||||||
|         if not self.device_name: |  | ||||||
|             self.device_name = self.device.name |  | ||||||
|         if self.num_of_freqs_to_thermal_adjust and not instrument_is_installed('daq'): |  | ||||||
|             self.logger.warn('Adjustment for thermal effect requires daq instrument. Disabling adjustment') |  | ||||||
|             self.num_of_freqs_to_thermal_adjust = 0 |  | ||||||
|  |  | ||||||
|     def initialize(self, context): |  | ||||||
|         self.number_of_cpus = {} |  | ||||||
|         self.report_template_file = context.resolver.get(File(self, REPORT_TEMPLATE_FILE)) |  | ||||||
|         self.em_template_file = context.resolver.get(File(self, EM_TEMPLATE_FILE)) |  | ||||||
|         self.little_core = (set(self.device.core_names) - set([self.big_core])).pop() |  | ||||||
|         self.perform_runtime_validation() |  | ||||||
|         self.enable_all_cores() |  | ||||||
|         self.configure_clusters() |  | ||||||
|         self.discover_idle_states() |  | ||||||
|         self.disable_thermal_management() |  | ||||||
|         self.initialize_job_queue(context) |  | ||||||
|         self.initialize_result_tracking() |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         if not context.spec.label.startswith('idle_'): |  | ||||||
|             return |  | ||||||
|         for idle_state in self.get_device_idle_states(self.measured_cluster): |  | ||||||
|             if idle_state.index > context.spec.idle_state_index: |  | ||||||
|                 idle_state.disable = 1 |  | ||||||
|             else: |  | ||||||
|                 idle_state.disable = 0 |  | ||||||
|  |  | ||||||
|     def fast_start(self, context):  # pylint: disable=unused-argument |  | ||||||
|         self.start_time = time.time() |  | ||||||
|  |  | ||||||
|     def fast_stop(self, context):  # pylint: disable=unused-argument |  | ||||||
|         self.run_time = time.time() - self.start_time |  | ||||||
|  |  | ||||||
|     def on_iteration_start(self, context): |  | ||||||
|         self.setup_measurement(context.spec.cluster) |  | ||||||
|  |  | ||||||
|     def thermal_correction(self, context): |  | ||||||
|         if not self.num_of_freqs_to_thermal_adjust or self.num_of_freqs_to_thermal_adjust > len(self.big_frequencies): |  | ||||||
|             return 0 |  | ||||||
|         freqs = self.big_frequencies[-self.num_of_freqs_to_thermal_adjust:] |  | ||||||
|         spec = context.result.spec |  | ||||||
|         if spec.frequency not in freqs: |  | ||||||
|             return 0 |  | ||||||
|         data_path = os.path.join(context.output_directory, 'daq', '{}.csv'.format(self.big_core)) |  | ||||||
|         data = pd.read_csv(data_path)['power'] |  | ||||||
|         return _adjust_for_thermal(data, filt_method=lambda x: pd.rolling_median(x, 1000), thresh=0.9, window=5000) |  | ||||||
|  |  | ||||||
|     # slow to make sure power results have been generated |  | ||||||
|     def slow_update_result(self, context):  # pylint: disable=too-many-branches |  | ||||||
|         spec = context.result.spec |  | ||||||
|         cluster = spec.cluster |  | ||||||
|         is_freq_iteration = spec.label.startswith('freq_') |  | ||||||
|         perf_metric = 0 |  | ||||||
|         power_metric = 0 |  | ||||||
|         thermal_adjusted_power = 0 |  | ||||||
|         if is_freq_iteration and cluster == 'big': |  | ||||||
|             thermal_adjusted_power = self.thermal_correction(context) |  | ||||||
|         for metric in context.result.metrics: |  | ||||||
|             if metric.name == self.performance_metric: |  | ||||||
|                 perf_metric = metric.value |  | ||||||
|             elif thermal_adjusted_power and metric.name in self.big_power_metrics: |  | ||||||
|                 power_metric += thermal_adjusted_power * self.power_scaling_factor |  | ||||||
|             elif (cluster == 'big') and metric.name in self.big_power_metrics: |  | ||||||
|                 power_metric += metric.value * self.power_scaling_factor |  | ||||||
|             elif (cluster == 'little') and metric.name in self.little_power_metrics: |  | ||||||
|                 power_metric += metric.value * self.power_scaling_factor |  | ||||||
|             elif thermal_adjusted_power and metric.name in self.big_energy_metrics: |  | ||||||
|                 power_metric += thermal_adjusted_power / self.run_time * self.power_scaling_factor |  | ||||||
|             elif (cluster == 'big') and metric.name in self.big_energy_metrics: |  | ||||||
|                 power_metric += metric.value / self.run_time * self.power_scaling_factor |  | ||||||
|             elif (cluster == 'little') and metric.name in self.little_energy_metrics: |  | ||||||
|                 power_metric += metric.value / self.run_time * self.power_scaling_factor |  | ||||||
|  |  | ||||||
|         if not (power_metric and (perf_metric or not is_freq_iteration)): |  | ||||||
|             message = 'Incomplete results for {} iteration{}' |  | ||||||
|             raise InstrumentError(message.format(context.result.spec.id, context.current_iteration)) |  | ||||||
|  |  | ||||||
|         if is_freq_iteration: |  | ||||||
|             index_matter = [cluster, spec.num_cpus, |  | ||||||
|                             spec.frequency, context.result.iteration] |  | ||||||
|             data = self.freq_data |  | ||||||
|         else: |  | ||||||
|             index_matter = [cluster, spec.num_cpus, |  | ||||||
|                             spec.idle_state_id, spec.idle_state_desc, context.result.iteration] |  | ||||||
|             data = self.idle_data |  | ||||||
|             if self.no_hotplug: |  | ||||||
|                 # due to that fact that hotpluging was disabled, power has to be artificially scaled |  | ||||||
|                 # to the number of cores that should have been active if hotplugging had occurred. |  | ||||||
|                 power_metric = spec.num_cpus * (power_metric / self.number_of_cpus[cluster]) |  | ||||||
|  |  | ||||||
|         data.append(index_matter + ['performance', perf_metric]) |  | ||||||
|         data.append(index_matter + ['power', power_metric]) |  | ||||||
|  |  | ||||||
|     def before_overall_results_processing(self, context): |  | ||||||
|         # pylint: disable=too-many-locals |  | ||||||
|         if not self.idle_data or not self.freq_data: |  | ||||||
|             self.logger.warning('Run aborted early; not generating energy_model.') |  | ||||||
|             return |  | ||||||
|         output_directory = os.path.join(context.output_directory, 'energy_model') |  | ||||||
|         os.makedirs(output_directory) |  | ||||||
|  |  | ||||||
|         df = pd.DataFrame(self.idle_data, columns=['cluster', 'cpus', 'state_id', |  | ||||||
|                                                    'state', 'iteration', 'metric', 'value']) |  | ||||||
|         idle_power_table = wa_result_to_power_perf_table(df, '', index=['cluster', 'cpus', 'state']) |  | ||||||
|         idle_output = os.path.join(output_directory, IDLE_TABLE_FILE) |  | ||||||
|         with open(idle_output, 'w') as wfh: |  | ||||||
|             idle_power_table.to_csv(wfh, index=False) |  | ||||||
|         context.add_artifact('idle_power_table', idle_output, 'export') |  | ||||||
|  |  | ||||||
|         df = pd.DataFrame(self.freq_data, |  | ||||||
|                           columns=['cluster', 'cpus', 'frequency', 'iteration', 'metric', 'value']) |  | ||||||
|         freq_power_table = wa_result_to_power_perf_table(df, self.performance_metric, |  | ||||||
|                                                          index=['cluster', 'cpus', 'frequency']) |  | ||||||
|         freq_output = os.path.join(output_directory, FREQ_TABLE_FILE) |  | ||||||
|         with open(freq_output, 'w') as wfh: |  | ||||||
|             freq_power_table.to_csv(wfh, index=False) |  | ||||||
|         context.add_artifact('freq_power_table', freq_output, 'export') |  | ||||||
|  |  | ||||||
|         if self.big_opps is None or self.little_opps is None: |  | ||||||
|             message = 'OPPs not specified for one or both clusters; cluster power will not be adjusted for leakage.' |  | ||||||
|             self.logger.warning(message) |  | ||||||
|         opps = {'big': self.big_opps, 'little': self.little_opps} |  | ||||||
|         leakages = {'big': self.big_leakage, 'little': self.little_leakage} |  | ||||||
|         try: |  | ||||||
|             measured_cpus_table, cpus_table = get_cpus_power_table(freq_power_table, 'frequency', opps, leakages) |  | ||||||
|         except (ValueError, KeyError, IndexError) as e: |  | ||||||
|             self.logger.error('Could not create cpu power tables: {}'.format(e)) |  | ||||||
|             return |  | ||||||
|         measured_cpus_output = os.path.join(output_directory, MEASURED_CPUS_TABLE_FILE) |  | ||||||
|         with open(measured_cpus_output, 'w') as wfh: |  | ||||||
|             measured_cpus_table.to_csv(wfh) |  | ||||||
|         context.add_artifact('measured_cpus_table', measured_cpus_output, 'export') |  | ||||||
|         cpus_output = os.path.join(output_directory, CPUS_TABLE_FILE) |  | ||||||
|         with open(cpus_output, 'w') as wfh: |  | ||||||
|             cpus_table.to_csv(wfh) |  | ||||||
|         context.add_artifact('cpus_table', cpus_output, 'export') |  | ||||||
|  |  | ||||||
|         em = build_energy_model(freq_power_table, cpus_table, idle_power_table, self.first_cluster_idle_state) |  | ||||||
|         em_file = os.path.join(output_directory, '{}_em.c'.format(self.device_name)) |  | ||||||
|         em_text = generate_em_c_file(em, self.big_core, self.little_core, |  | ||||||
|                                      self.em_template_file, em_file) |  | ||||||
|         context.add_artifact('em', em_file, 'data') |  | ||||||
|  |  | ||||||
|         report_file = os.path.join(output_directory, 'report.html') |  | ||||||
|         generate_report(freq_power_table, measured_cpus_table, cpus_table, |  | ||||||
|                         idle_power_table, self.report_template_file, |  | ||||||
|                         self.device_name, em_text, report_file) |  | ||||||
|         context.add_artifact('pm_report', report_file, 'export') |  | ||||||
|  |  | ||||||
|     def initialize_result_tracking(self): |  | ||||||
|         self.freq_data = [] |  | ||||||
|         self.idle_data = [] |  | ||||||
|         self.big_power_metrics = [] |  | ||||||
|         self.little_power_metrics = [] |  | ||||||
|         self.big_energy_metrics = [] |  | ||||||
|         self.little_energy_metrics = [] |  | ||||||
|         if self.power_metric: |  | ||||||
|             self.big_power_metrics = [pm.format(core=self.big_core) for pm in self.power_metric] |  | ||||||
|             self.little_power_metrics = [pm.format(core=self.little_core) for pm in self.power_metric] |  | ||||||
|         else:  # must be energy_metric |  | ||||||
|             self.big_energy_metrics = [em.format(core=self.big_core) for em in self.energy_metric] |  | ||||||
|             self.little_energy_metrics = [em.format(core=self.little_core) for em in self.energy_metric] |  | ||||||
|  |  | ||||||
|     def configure_clusters(self): |  | ||||||
|         self.measured_cores = None |  | ||||||
|         self.measuring_cores = None |  | ||||||
|         self.cpuset = self.device.get_cgroup_controller('cpuset') |  | ||||||
|         self.cpuset.create_group('big', self.big_cpus, [0]) |  | ||||||
|         self.cpuset.create_group('little', self.little_cpus, [0]) |  | ||||||
|         for cluster in set(self.device.core_clusters): |  | ||||||
|             self.device.set_cluster_governor(cluster, 'userspace') |  | ||||||
|  |  | ||||||
|     def discover_idle_states(self): |  | ||||||
|         online_cpu = self.device.get_online_cpus(self.big_core)[0] |  | ||||||
|         self.big_idle_states = self.device.get_cpuidle_states(online_cpu) |  | ||||||
|         online_cpu = self.device.get_online_cpus(self.little_core)[0] |  | ||||||
|         self.little_idle_states = self.device.get_cpuidle_states(online_cpu) |  | ||||||
|         if not (len(self.big_idle_states) >= 2 and len(self.little_idle_states) >= 2): |  | ||||||
|             raise DeviceError('There do not appeart to be at least two idle states ' |  | ||||||
|                               'on at least one of the clusters.') |  | ||||||
|  |  | ||||||
|     def setup_measurement(self, measured): |  | ||||||
|         measuring = 'big' if measured == 'little' else 'little' |  | ||||||
|         self.measured_cluster = measured |  | ||||||
|         self.measuring_cluster = measuring |  | ||||||
|         self.measured_cpus = self.big_cpus if measured == 'big' else self.little_cpus |  | ||||||
|         self.measuring_cpus = self.little_cpus if measured == 'big' else self.big_cpus |  | ||||||
|         self.reset() |  | ||||||
|  |  | ||||||
|     def reset(self): |  | ||||||
|         self.enable_all_cores() |  | ||||||
|         self.enable_all_idle_states() |  | ||||||
|         self.reset_cgroups() |  | ||||||
|         self.cpuset.move_all_tasks_to(self.measuring_cluster) |  | ||||||
|         server_process = 'adbd' if self.device.os == 'android' else 'sshd' |  | ||||||
|         server_pids = self.device.get_pids_of(server_process) |  | ||||||
|         children_ps = [e for e in self.device.ps() |  | ||||||
|                        if e.ppid in server_pids and e.name != 'sshd'] |  | ||||||
|         children_pids = [e.pid for e in children_ps] |  | ||||||
|         pids_to_move = server_pids + children_pids |  | ||||||
|         self.cpuset.root.add_tasks(pids_to_move) |  | ||||||
|         for pid in pids_to_move: |  | ||||||
|             try: |  | ||||||
|                 self.device.execute('busybox taskset -p 0x{:x} {}'.format(list_to_mask(self.measuring_cpus), pid)) |  | ||||||
|             except DeviceError: |  | ||||||
|                 pass |  | ||||||
|  |  | ||||||
|     def enable_all_cores(self): |  | ||||||
|         counter = Counter(self.device.core_names) |  | ||||||
|         for core, number in counter.iteritems(): |  | ||||||
|             self.device.set_number_of_online_cpus(core, number) |  | ||||||
|         self.big_cpus = self.device.get_online_cpus(self.big_core) |  | ||||||
|         self.little_cpus = self.device.get_online_cpus(self.little_core) |  | ||||||
|  |  | ||||||
|     def enable_all_idle_states(self): |  | ||||||
|         for cpu in self.device.online_cpus: |  | ||||||
|             for state in self.device.get_cpuidle_states(cpu): |  | ||||||
|                 state.disable = 0 |  | ||||||
|  |  | ||||||
|     def reset_cgroups(self): |  | ||||||
|         self.big_cpus = self.device.get_online_cpus(self.big_core) |  | ||||||
|         self.little_cpus = self.device.get_online_cpus(self.little_core) |  | ||||||
|         self.cpuset.big.set(self.big_cpus, 0) |  | ||||||
|         self.cpuset.little.set(self.little_cpus, 0) |  | ||||||
|  |  | ||||||
|     def perform_runtime_validation(self): |  | ||||||
|         if not self.device.is_rooted: |  | ||||||
|             raise InstrumentError('the device must be rooted to generate energy models') |  | ||||||
|         if 'userspace' not in self.device.list_available_cluster_governors(0): |  | ||||||
|             raise InstrumentError('userspace cpufreq governor must be enabled') |  | ||||||
|  |  | ||||||
|         error_message = 'Frequency {} is not supported by {} cores' |  | ||||||
|         available_frequencies = self.device.list_available_core_frequencies(self.big_core) |  | ||||||
|         if self.big_frequencies: |  | ||||||
|             for freq in self.big_frequencies: |  | ||||||
|                 if freq not in available_frequencies: |  | ||||||
|                     raise ConfigError(error_message.format(freq, self.big_core)) |  | ||||||
|         else: |  | ||||||
|             self.big_frequencies = available_frequencies |  | ||||||
|         available_frequencies = self.device.list_available_core_frequencies(self.little_core) |  | ||||||
|         if self.little_frequencies: |  | ||||||
|             for freq in self.little_frequencies: |  | ||||||
|                 if freq not in available_frequencies: |  | ||||||
|                     raise ConfigError(error_message.format(freq, self.little_core)) |  | ||||||
|         else: |  | ||||||
|             self.little_frequencies = available_frequencies |  | ||||||
|  |  | ||||||
|     def initialize_job_queue(self, context): |  | ||||||
|         old_specs = [] |  | ||||||
|         for job in context.runner.job_queue: |  | ||||||
|             if job.spec not in old_specs: |  | ||||||
|                 old_specs.append(job.spec) |  | ||||||
|         new_specs = self.get_cluster_specs(old_specs, 'big', context) |  | ||||||
|         new_specs.extend(self.get_cluster_specs(old_specs, 'little', context)) |  | ||||||
|  |  | ||||||
|         # Update config to refect jobs that will actually run. |  | ||||||
|         context.config.workload_specs = new_specs |  | ||||||
|         config_file = os.path.join(context.host_working_directory, 'run_config.json') |  | ||||||
|         with open(config_file, 'wb') as wfh: |  | ||||||
|             context.config.serialize(wfh) |  | ||||||
|  |  | ||||||
|         context.runner.init_queue(new_specs) |  | ||||||
|  |  | ||||||
|     def get_cluster_specs(self, old_specs, cluster, context): |  | ||||||
|         core = self.get_core_name(cluster) |  | ||||||
|         self.number_of_cpus[cluster] = sum([1 for c in self.device.core_names if c == core]) |  | ||||||
|  |  | ||||||
|         cluster_frequencies = self.get_frequencies_param(cluster) |  | ||||||
|         if not cluster_frequencies: |  | ||||||
|             raise InstrumentError('Could not read available frequencies for {}'.format(core)) |  | ||||||
|         min_frequency = min(cluster_frequencies) |  | ||||||
|  |  | ||||||
|         idle_states = self.get_device_idle_states(cluster) |  | ||||||
|         new_specs = [] |  | ||||||
|         for state in idle_states: |  | ||||||
|             for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1): |  | ||||||
|                 spec = old_specs[0].copy() |  | ||||||
|                 spec.workload_name = self.idle_workload |  | ||||||
|                 spec.workload_parameters = self.idle_workload_params |  | ||||||
|                 spec.idle_state_id = state.id |  | ||||||
|                 spec.idle_state_desc = state.desc |  | ||||||
|                 spec.idle_state_index = state.index |  | ||||||
|                 if not self.no_hotplug: |  | ||||||
|                     spec.runtime_parameters['{}_cores'.format(core)] = num_cpus |  | ||||||
|                 spec.runtime_parameters['{}_frequency'.format(core)] = min_frequency |  | ||||||
|                 spec.runtime_parameters['ui'] = 'off' |  | ||||||
|                 spec.cluster = cluster |  | ||||||
|                 spec.num_cpus = num_cpus |  | ||||||
|                 spec.id = '{}_idle_{}_{}'.format(cluster, state.id, num_cpus) |  | ||||||
|                 spec.label = 'idle_{}'.format(cluster) |  | ||||||
|                 spec.number_of_iterations = old_specs[0].number_of_iterations |  | ||||||
|                 spec.load(self.device, context.config.ext_loader) |  | ||||||
|                 spec.workload.init_resources(context) |  | ||||||
|                 spec.workload.validate() |  | ||||||
|                 new_specs.append(spec) |  | ||||||
|         for old_spec in old_specs: |  | ||||||
|             if old_spec.workload_name not in ['sysbench', 'dhrystone']: |  | ||||||
|                 raise ConfigError('Only sysbench and dhrystone workloads currently supported for energy_model generation.') |  | ||||||
|             for freq in cluster_frequencies: |  | ||||||
|                 for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1): |  | ||||||
|                     spec = old_spec.copy() |  | ||||||
|                     spec.runtime_parameters['{}_frequency'.format(core)] = freq |  | ||||||
|                     if not self.no_hotplug: |  | ||||||
|                         spec.runtime_parameters['{}_cores'.format(core)] = num_cpus |  | ||||||
|                     spec.runtime_parameters['ui'] = 'off' |  | ||||||
|                     spec.id = '{}_{}_{}'.format(cluster, num_cpus, freq) |  | ||||||
|                     spec.label = 'freq_{}_{}'.format(cluster, spec.label) |  | ||||||
|                     spec.workload_parameters['taskset_mask'] = list_to_mask(self.get_cpus(cluster)) |  | ||||||
|                     spec.workload_parameters['threads'] = num_cpus |  | ||||||
|                     if old_spec.workload_name == 'sysbench': |  | ||||||
|                         # max_requests set to an arbitrary high values to make sure |  | ||||||
|                         # sysbench runs for full duriation even on highly |  | ||||||
|                         # performant cores. |  | ||||||
|                         spec.workload_parameters['max_requests'] = 10000000 |  | ||||||
|                     spec.cluster = cluster |  | ||||||
|                     spec.num_cpus = num_cpus |  | ||||||
|                     spec.frequency = freq |  | ||||||
|                     spec.load(self.device, context.config.ext_loader) |  | ||||||
|                     spec.workload.init_resources(context) |  | ||||||
|                     spec.workload.validate() |  | ||||||
|                     new_specs.append(spec) |  | ||||||
|         return new_specs |  | ||||||
|  |  | ||||||
|     def disable_thermal_management(self): |  | ||||||
|         if self.device.file_exists('/sys/class/thermal/thermal_zone0'): |  | ||||||
|             tzone_paths = self.device.execute('ls /sys/class/thermal/thermal_zone*') |  | ||||||
|             for tzpath in tzone_paths.strip().split(): |  | ||||||
|                 mode_file = '{}/mode'.format(tzpath) |  | ||||||
|                 if self.device.file_exists(mode_file): |  | ||||||
|                     self.device.write_value(mode_file, 'disabled') |  | ||||||
|  |  | ||||||
|     def get_device_idle_states(self, cluster): |  | ||||||
|         if cluster == 'big': |  | ||||||
|             online_cpus = self.device.get_online_cpus(self.big_core) |  | ||||||
|         else: |  | ||||||
|             online_cpus = self.device.get_online_cpus(self.little_core) |  | ||||||
|         idle_states = [] |  | ||||||
|         for cpu in online_cpus: |  | ||||||
|             idle_states.extend(self.device.get_cpuidle_states(cpu)) |  | ||||||
|         return idle_states |  | ||||||
|  |  | ||||||
|     def get_core_name(self, cluster): |  | ||||||
|         if cluster == 'big': |  | ||||||
|             return self.big_core |  | ||||||
|         else: |  | ||||||
|             return self.little_core |  | ||||||
|  |  | ||||||
|     def get_cpus(self, cluster): |  | ||||||
|         if cluster == 'big': |  | ||||||
|             return self.big_cpus |  | ||||||
|         else: |  | ||||||
|             return self.little_cpus |  | ||||||
|  |  | ||||||
|     def get_frequencies_param(self, cluster): |  | ||||||
|         if cluster == 'big': |  | ||||||
|             return self.big_frequencies |  | ||||||
|         else: |  | ||||||
|             return self.little_frequencies |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _adjust_for_thermal(data, filt_method=lambda x: x, thresh=0.9, window=5000, tdiff_threshold=10000): |  | ||||||
|     n = filt_method(data) |  | ||||||
|     n = n[~np.isnan(n)]  # pylint: disable=no-member |  | ||||||
|  |  | ||||||
|     d = np.diff(n)  # pylint: disable=no-member |  | ||||||
|     d = d[~np.isnan(d)]  # pylint: disable=no-member |  | ||||||
|     dmin = min(d) |  | ||||||
|     dmax = max(d) |  | ||||||
|  |  | ||||||
|     index_up = np.max((d > dmax * thresh).nonzero())  # pylint: disable=no-member |  | ||||||
|     index_down = np.min((d < dmin * thresh).nonzero())  # pylint: disable=no-member |  | ||||||
|     low_average = np.average(n[index_up:index_up + window])  # pylint: disable=no-member |  | ||||||
|     high_average = np.average(n[index_down - window:index_down])  # pylint: disable=no-member |  | ||||||
|     if low_average > high_average or index_down - index_up < tdiff_threshold: |  | ||||||
|         return 0 |  | ||||||
|     else: |  | ||||||
|         return low_average |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     import sys   # pylint: disable=wrong-import-position,wrong-import-order |  | ||||||
|     indir, outdir = sys.argv[1], sys.argv[2] |  | ||||||
|     device_name = 'odroidxu3' |  | ||||||
|     big_core = 'a15' |  | ||||||
|     little_core = 'a7' |  | ||||||
|     first_cluster_idle_state = -1 |  | ||||||
|  |  | ||||||
|     this_dir = os.path.dirname(__file__) |  | ||||||
|     report_template_file = os.path.join(this_dir, REPORT_TEMPLATE_FILE) |  | ||||||
|     em_template_file = os.path.join(this_dir, EM_TEMPLATE_FILE) |  | ||||||
|  |  | ||||||
|     freq_power_table = pd.read_csv(os.path.join(indir, FREQ_TABLE_FILE)) |  | ||||||
|     measured_cpus_table, cpus_table = pd.read_csv(os.path.join(indir, CPUS_TABLE_FILE),  # pylint: disable=unbalanced-tuple-unpacking |  | ||||||
|                                                   header=range(2), index_col=0) |  | ||||||
|     idle_power_table = pd.read_csv(os.path.join(indir, IDLE_TABLE_FILE)) |  | ||||||
|  |  | ||||||
|     if not os.path.exists(outdir): |  | ||||||
|         os.makedirs(outdir) |  | ||||||
|     report_file = os.path.join(outdir, 'report.html') |  | ||||||
|     em_file = os.path.join(outdir, '{}_em.c'.format(device_name)) |  | ||||||
|  |  | ||||||
|     em = build_energy_model(freq_power_table, cpus_table, |  | ||||||
|                             idle_power_table, first_cluster_idle_state) |  | ||||||
|     em_text = generate_em_c_file(em, big_core, little_core, |  | ||||||
|                                  em_template_file, em_file) |  | ||||||
|     generate_report(freq_power_table, measured_cpus_table, cpus_table, |  | ||||||
|                     idle_power_table, report_template_file, device_name, |  | ||||||
|                     em_text, report_file) |  | ||||||
| @@ -1,51 +0,0 @@ | |||||||
| static struct idle_state idle_states_cluster_{{ little_core|lower }}[] = { |  | ||||||
| 	{% for entry in em.little_cluster_idle_states -%} |  | ||||||
| 	 { .power = {{ entry.power }}, }, |  | ||||||
| 	{% endfor %} |  | ||||||
| 	}; |  | ||||||
|  |  | ||||||
| static struct idle_state idle_states_cluster_{{ big_core|lower }}[] = { |  | ||||||
| 	{% for entry in em.big_cluster_idle_states -%} |  | ||||||
| 	 { .power = {{ entry.power }}, }, |  | ||||||
| 	{% endfor %} |  | ||||||
| 	}; |  | ||||||
|  |  | ||||||
| static struct capacity_state cap_states_cluster_{{ little_core|lower }}[] = { |  | ||||||
| 	/* Power per cluster */ |  | ||||||
| 	{% for entry in em.little_cluster_cap_states -%} |  | ||||||
| 	 { .cap = {{ entry.cap }}, .power = {{ entry.power }}, }, |  | ||||||
| 	{% endfor %} |  | ||||||
| 	}; |  | ||||||
|  |  | ||||||
| static struct capacity_state cap_states_cluster_{{ big_core|lower }}[] = { |  | ||||||
| 	/* Power per cluster */ |  | ||||||
| 	{% for entry in em.big_cluster_cap_states -%} |  | ||||||
| 	 { .cap = {{ entry.cap }}, .power = {{ entry.power }}, }, |  | ||||||
| 	{% endfor %} |  | ||||||
| 	}; |  | ||||||
|  |  | ||||||
| static struct idle_state idle_states_core_{{ little_core|lower }}[] = { |  | ||||||
| 	{% for entry in em.little_core_idle_states -%} |  | ||||||
| 	 { .power = {{ entry.power }}, }, |  | ||||||
| 	{% endfor %} |  | ||||||
| 	}; |  | ||||||
|  |  | ||||||
| static struct idle_state idle_states_core_{{ big_core|lower }}[] = { |  | ||||||
| 	{% for entry in em.big_core_idle_states -%} |  | ||||||
| 	 { .power = {{ entry.power }}, }, |  | ||||||
| 	{% endfor %} |  | ||||||
| 	}; |  | ||||||
|  |  | ||||||
| static struct capacity_state cap_states_core_{{ little_core|lower }}[] = { |  | ||||||
| 	/* Power per cpu */ |  | ||||||
| 	{% for entry in em.little_core_cap_states -%} |  | ||||||
| 	 { .cap = {{ entry.cap }}, .power = {{ entry.power }}, }, |  | ||||||
| 	{% endfor %} |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| static struct capacity_state cap_states_core_{{ big_core|lower }}[] = { |  | ||||||
| 	/* Power per cpu */ |  | ||||||
| 	{% for entry in em.big_core_cap_states -%} |  | ||||||
| 	 { .cap = {{ entry.cap }}, .power = {{ entry.power }}, }, |  | ||||||
| 	{% endfor %} |  | ||||||
| 	}; |  | ||||||
| @@ -1,123 +0,0 @@ | |||||||
| <html> |  | ||||||
| <body> |  | ||||||
| 	<style>    |  | ||||||
| 		.toggle-box { |  | ||||||
| 		  display: none; |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		.toggle-box + label { |  | ||||||
| 		  cursor: pointer; |  | ||||||
| 		  display: block; |  | ||||||
| 		  font-weight: bold; |  | ||||||
| 		  line-height: 21px; |  | ||||||
| 		  margin-bottom: 5px; |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		.toggle-box + label + div { |  | ||||||
| 		  display: none; |  | ||||||
| 		  margin-bottom: 10px; |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		.toggle-box:checked + label + div { |  | ||||||
| 		  display: block; |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		.toggle-box + label:before { |  | ||||||
| 		  background-color: #4F5150; |  | ||||||
| 		  -webkit-border-radius: 10px; |  | ||||||
| 		  -moz-border-radius: 10px; |  | ||||||
| 		  border-radius: 10px; |  | ||||||
| 		  color: #FFFFFF; |  | ||||||
| 		  content: "+"; |  | ||||||
| 		  display: block; |  | ||||||
| 		  float: left; |  | ||||||
| 		  font-weight: bold; |  | ||||||
| 		  height: 20px; |  | ||||||
| 		  line-height: 20px; |  | ||||||
| 		  margin-right: 5px; |  | ||||||
| 		  text-align: center; |  | ||||||
| 		  width: 20px; |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		.toggle-box:checked + label:before { |  | ||||||
| 		  content: "\2212"; |  | ||||||
| 		}  |  | ||||||
|  |  | ||||||
| 		.document { |  | ||||||
| 			width: 800px; |  | ||||||
| 			margin-left:auto; |  | ||||||
| 			margin-right:auto; |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		img { |  | ||||||
| 			margin-left:auto; |  | ||||||
| 			margin-right:auto; |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		h1.title { |  | ||||||
| 			text-align: center; |  | ||||||
| 		} |  | ||||||
| 	</style> |  | ||||||
|  |  | ||||||
| 	<div class="document"> |  | ||||||
| 		<h1 class="title">{{ device_name }} Energy Model Report</h1> |  | ||||||
|  |  | ||||||
| 		<h2>Power/Performance Analysis</h2> |  | ||||||
| 		<div> |  | ||||||
| 			<h3>Summary</h3> |  | ||||||
| 			At {{ cap_power_analysis.summary['frequency']|round(2) }} Hz<br /> |  | ||||||
| 			big is {{ cap_power_analysis.summary['performance_ratio']|round(2) }} times faster<br /> |  | ||||||
| 			big consumes {{ cap_power_analysis.summary['power_ratio']|round(2) }} times more power<br /> |  | ||||||
| 			<br /> |  | ||||||
| 			max performance: {{ cap_power_analysis.summary['max_performance']|round(2) }}<br /> |  | ||||||
| 			max power:  {{ cap_power_analysis.summary['max_power']|round(2) }}<br /> |  | ||||||
| 		</div> |  | ||||||
| 	 |  | ||||||
| 		<div> |  | ||||||
| 			<h3>Single Core Power/Perfromance Plot</h3> |  | ||||||
| 			These are the traditional power-performance curves for the single-core runs. |  | ||||||
| 			<img align="middle" width="600px" src="data:image/png;base64,{{ cap_power_plot }}" /> |  | ||||||
| 		</div> |  | ||||||
|  |  | ||||||
| 		<div> |  | ||||||
| 			<input class="toggle-box" id="freq_table" type="checkbox" > |  | ||||||
| 			<label for="freq_table">Expand view all power/performance data</label> |  | ||||||
| 			<div> |  | ||||||
| 				{{ freq_power_table }} |  | ||||||
| 			</div> |  | ||||||
| 		</div> |  | ||||||
|  |  | ||||||
| 		<div> |  | ||||||
| 			<h3>CPUs Power Plot</h3> |  | ||||||
| 			Each line correspond to the cluster running at a different OPP. Each |  | ||||||
| 			point corresponds to the average power with a certain number of CPUs |  | ||||||
| 			executing. To get the contribution of the cluster we have to extend the |  | ||||||
| 			lines on the left (what it would be the average power of just the cluster). |  | ||||||
| 			<img align="middle" width="600px" src="data:image/png;base64,{{ cpus_plot }}" /> |  | ||||||
| 		</div> |  | ||||||
|  |  | ||||||
| 		<div> |  | ||||||
| 			<input class="toggle-box" id="cpus_table" type="checkbox" > |  | ||||||
| 			<label for="cpus_table">Expand view CPUS power data</label> |  | ||||||
| 			<div> |  | ||||||
| 				{{ cpus_table }} |  | ||||||
| 			</div> |  | ||||||
| 		</div> |  | ||||||
| 		<div> |  | ||||||
| 			<h3>Idle Power</h3> |  | ||||||
| 			<img align="middle" width="600px" src="data:image/png;base64,{{ idle_power_plot }}" /> |  | ||||||
| 		</div> |  | ||||||
|  |  | ||||||
| 		<div> |  | ||||||
| 			<input class="toggle-box" id="idle_power_table" type="checkbox" > |  | ||||||
| 			<label for="idle_power_table">Expand view idle power data</label> |  | ||||||
| 			<div> |  | ||||||
| 				{{ idle_power_table }} |  | ||||||
| 			</div> |  | ||||||
| 		</div> |  | ||||||
| 	</div> |  | ||||||
| </body> |  | ||||||
| </html> |  | ||||||
|  |  | ||||||
| <!-- vim: ft=htmljinja  |  | ||||||
| --> |  | ||||||
| @@ -1,147 +0,0 @@ | |||||||
| #    Copyright 2013-2015 ARM Limited |  | ||||||
| # |  | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); |  | ||||||
| # you may not use this file except in compliance with the License. |  | ||||||
| # You may obtain a copy of the License at |  | ||||||
| # |  | ||||||
| #     http://www.apache.org/licenses/LICENSE-2.0 |  | ||||||
| # |  | ||||||
| # Unless required by applicable law or agreed to in writing, software |  | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, |  | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |  | ||||||
| # See the License for the specific language governing permissions and |  | ||||||
| # limitations under the License. |  | ||||||
| # |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init |  | ||||||
| import os |  | ||||||
| import subprocess |  | ||||||
| import signal |  | ||||||
| import struct |  | ||||||
| import csv |  | ||||||
| try: |  | ||||||
|     import pandas |  | ||||||
| except ImportError: |  | ||||||
|     pandas = None |  | ||||||
|  |  | ||||||
| from wlauto import Instrument, Parameter, Executable |  | ||||||
| from wlauto.exceptions import InstrumentError, ConfigError |  | ||||||
| from wlauto.utils.types import list_of_numbers |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class EnergyProbe(Instrument): |  | ||||||
|  |  | ||||||
|     name = 'energy_probe' |  | ||||||
|     description = """Collects power traces using the ARM energy probe. |  | ||||||
|  |  | ||||||
|                      This instrument requires ``caiman`` utility to be installed in the workload automation |  | ||||||
|                      host and be in the PATH. Caiman is part of DS-5 and should be in ``/path/to/DS-5/bin/`` . |  | ||||||
|                      Energy probe can simultaneously collect energy from up to 3 power rails. |  | ||||||
|  |  | ||||||
|                      To connect the energy probe on a rail, connect the white wire to the pin that is closer to the |  | ||||||
|                      Voltage source and the black wire to the pin that is closer to the load (the SoC or the device |  | ||||||
|                      you are probing). Between the pins there should be a shunt resistor of known resistance in the |  | ||||||
|                      range of 5 to 20 mOhm. The resistance of the shunt resistors is a mandatory parameter |  | ||||||
|                      ``resistor_values``. |  | ||||||
|  |  | ||||||
|                     .. note:: This instrument can process results a lot faster if python pandas is installed. |  | ||||||
|                     """ |  | ||||||
|  |  | ||||||
|     parameters = [ |  | ||||||
|         Parameter('resistor_values', kind=list_of_numbers, default=[], |  | ||||||
|                   description="""The value of shunt resistors. This is a mandatory parameter."""), |  | ||||||
|         Parameter('labels', kind=list, default=[], |  | ||||||
|                   description="""Meaningful labels for each of the monitored rails."""), |  | ||||||
|         Parameter('device_entry', kind=str, default='/dev/ttyACM0', |  | ||||||
|                   description="""Path to /dev entry for the energy probe (it should be /dev/ttyACMx)"""), |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
|     MAX_CHANNELS = 3 |  | ||||||
|  |  | ||||||
|     def __init__(self, device, **kwargs): |  | ||||||
|         super(EnergyProbe, self).__init__(device, **kwargs) |  | ||||||
|         self.attributes_per_sample = 3 |  | ||||||
|         self.bytes_per_sample = self.attributes_per_sample * 4 |  | ||||||
|         self.attributes = ['power', 'voltage', 'current'] |  | ||||||
|         for i, val in enumerate(self.resistor_values): |  | ||||||
|             self.resistor_values[i] = int(1000 * float(val)) |  | ||||||
|  |  | ||||||
|     def validate(self): |  | ||||||
|         if subprocess.call('which caiman', stdout=subprocess.PIPE, shell=True): |  | ||||||
|             raise InstrumentError('caiman not in PATH. Cannot enable energy probe') |  | ||||||
|         if not self.resistor_values: |  | ||||||
|             raise ConfigError('At least one resistor value must be specified') |  | ||||||
|         if len(self.resistor_values) > self.MAX_CHANNELS: |  | ||||||
|             raise ConfigError('{} Channels where specified when Energy Probe supports up to {}' |  | ||||||
|                               .format(len(self.resistor_values), self.MAX_CHANNELS)) |  | ||||||
|         if pandas is None: |  | ||||||
|             self.logger.warning("pandas package will significantly speed up this instrument") |  | ||||||
|             self.logger.warning("to install it try: pip install pandas") |  | ||||||
|  |  | ||||||
|     def setup(self, context): |  | ||||||
|         if not self.labels: |  | ||||||
|             self.labels = ["PORT_{}".format(channel) for channel, _ in enumerate(self.resistor_values)] |  | ||||||
|         self.output_directory = os.path.join(context.output_directory, 'energy_probe') |  | ||||||
|         rstring = "" |  | ||||||
|         for i, rval in enumerate(self.resistor_values): |  | ||||||
|             rstring += '-r {}:{} '.format(i, rval) |  | ||||||
|         self.command = 'caiman -d {} -l {} {}'.format(self.device_entry, rstring, self.output_directory) |  | ||||||
|         os.makedirs(self.output_directory) |  | ||||||
|  |  | ||||||
|     def start(self, context): |  | ||||||
|         self.logger.debug(self.command) |  | ||||||
|         self.caiman = subprocess.Popen(self.command, |  | ||||||
|                                        stdout=subprocess.PIPE, |  | ||||||
|                                        stderr=subprocess.PIPE, |  | ||||||
|                                        stdin=subprocess.PIPE, |  | ||||||
|                                        preexec_fn=os.setpgrp, |  | ||||||
|                                        shell=True) |  | ||||||
|  |  | ||||||
|     def stop(self, context): |  | ||||||
|         os.killpg(self.caiman.pid, signal.SIGTERM) |  | ||||||
|  |  | ||||||
|     def update_result(self, context):  # pylint: disable=too-many-locals |  | ||||||
|         num_of_channels = len(self.resistor_values) |  | ||||||
|         processed_data = [[] for _ in xrange(num_of_channels)] |  | ||||||
|         filenames = [os.path.join(self.output_directory, '{}.csv'.format(label)) for label in self.labels] |  | ||||||
|         struct_format = '{}I'.format(num_of_channels * self.attributes_per_sample) |  | ||||||
|         not_a_full_row_seen = False |  | ||||||
|         with open(os.path.join(self.output_directory, "0000000000"), "rb") as bfile: |  | ||||||
|             while True: |  | ||||||
|                 data = bfile.read(num_of_channels * self.bytes_per_sample) |  | ||||||
|                 if data == '': |  | ||||||
|                     break |  | ||||||
|                 try: |  | ||||||
|                     unpacked_data = struct.unpack(struct_format, data) |  | ||||||
|                 except struct.error: |  | ||||||
|                     if not_a_full_row_seen: |  | ||||||
|                         self.logger.warn('possibly missaligned caiman raw data, row contained {} bytes'.format(len(data))) |  | ||||||
|                         continue |  | ||||||
|                     else: |  | ||||||
|                         not_a_full_row_seen = True |  | ||||||
|                 for i in xrange(num_of_channels): |  | ||||||
|                     index = i * self.attributes_per_sample |  | ||||||
|                     processed_data[i].append({attr: val for attr, val in |  | ||||||
|                                               zip(self.attributes, unpacked_data[index:index + self.attributes_per_sample])}) |  | ||||||
|         for i, path in enumerate(filenames): |  | ||||||
|             with open(path, 'w') as f: |  | ||||||
|                 if pandas is not None: |  | ||||||
|                     self._pandas_produce_csv(processed_data[i], f) |  | ||||||
|                 else: |  | ||||||
|                     self._slow_produce_csv(processed_data[i], f) |  | ||||||
|  |  | ||||||
|     # pylint: disable=R0201 |  | ||||||
|     def _pandas_produce_csv(self, data, f): |  | ||||||
|         dframe = pandas.DataFrame(data) |  | ||||||
|         dframe = dframe / 1000.0 |  | ||||||
|         dframe.to_csv(f) |  | ||||||
|  |  | ||||||
|     def _slow_produce_csv(self, data, f): |  | ||||||
|         new_data = [] |  | ||||||
|         for entry in data: |  | ||||||
|             new_data.append({key: val / 1000.0 for key, val in entry.items()}) |  | ||||||
|         writer = csv.DictWriter(f, self.attributes) |  | ||||||
|         writer.writeheader() |  | ||||||
|         writer.writerows(new_data) |  | ||||||
|  |  | ||||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user