1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2024-10-06 02:41:11 +01:00

Merge pull request #384 from setrofim/next

Fixes and tidy.
This commit is contained in:
marcbonnici 2017-04-27 09:05:51 +01:00 committed by GitHub
commit c11a212674
445 changed files with 24 additions and 47722 deletions

View File

@ -1,17 +0,0 @@
#!/bin/bash
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
wa create workload $@

View File

@ -1,16 +0,0 @@
#!/bin/bash
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
wa list $@

View File

@ -1,17 +0,0 @@
#!/bin/bash
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
wa run $@

View File

@ -60,7 +60,7 @@ for root, dirs, files in os.walk(wlauto_dir):
scripts = [os.path.join('scripts', s) for s in os.listdir('scripts')]
params = dict(
name='wlauto',
name='wa',
description='A framework for automating workload execution and measurement collection on ARM devices.',
version=get_wa_version(),
packages=packages,

View File

@ -1,31 +0,0 @@
import uuid
import logging
from wa.framework import pluginloader
from wa.framework.plugin import Plugin
class JobActor(Plugin):
kind = 'job_actor'
def initialize(self, context):
pass
def run(self):
pass
def restart(self):
pass
def complete(self):
pass
def finalize(self):
pass
class NullJobActor(JobActor):
name = 'null-job-actor'

View File

@ -1,306 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import logging
import string
import threading
import subprocess
import colorama
from wa.framework import signal
from wa.framework.exception import WAError
from wa.utils.misc import get_traceback
COLOR_MAP = {
logging.DEBUG: colorama.Fore.BLUE,
logging.INFO: colorama.Fore.GREEN,
logging.WARNING: colorama.Fore.YELLOW,
logging.ERROR: colorama.Fore.RED,
logging.CRITICAL: colorama.Style.BRIGHT + colorama.Fore.RED,
}
RESET_COLOR = colorama.Style.RESET_ALL
_indent_level = 0
_indent_width = 4
_console_handler = None
def init(verbosity=logging.INFO, color=True, indent_with=4,
regular_fmt='%(levelname)-8s %(message)s',
verbose_fmt='%(asctime)s %(levelname)-8s %(name)-10.10s: %(message)s',
debug=False):
global _indent_width, _console_handler
_indent_width = indent_with
signal.log_error_func = lambda m: log_error(m, signal.logger)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
error_handler = ErrorSignalHandler(logging.DEBUG)
root_logger.addHandler(error_handler)
_console_handler = logging.StreamHandler()
if color:
formatter = ColorFormatter
else:
formatter = LineFormatter
if verbosity:
_console_handler.setLevel(logging.DEBUG)
_console_handler.setFormatter(formatter(verbose_fmt))
else:
_console_handler.setLevel(logging.INFO)
_console_handler.setFormatter(formatter(regular_fmt))
root_logger.addHandler(_console_handler)
logging.basicConfig(level=logging.DEBUG)
if not debug:
logging.raiseExceptions = False
def set_level(level):
_console_handler.setLevel(level)
def add_file(filepath, level=logging.DEBUG,
fmt='%(asctime)s %(levelname)-8s %(name)s: %(message)-10.10s'):
root_logger = logging.getLogger()
file_handler = logging.FileHandler(filepath)
file_handler.setLevel(level)
file_handler.setFormatter(LineFormatter(fmt))
root_logger.addHandler(file_handler)
def enable(logs):
if isinstance(logs, list):
for log in logs:
__enable_logger(log)
else:
__enable_logger(logs)
def disable(logs):
if isinstance(logs, list):
for log in logs:
__disable_logger(log)
else:
__disable_logger(logs)
def __enable_logger(logger):
if isinstance(logger, basestring):
logger = logging.getLogger(logger)
logger.propagate = True
def __disable_logger(logger):
if isinstance(logger, basestring):
logger = logging.getLogger(logger)
logger.propagate = False
def indent():
global _indent_level
_indent_level += 1
def dedent():
global _indent_level
_indent_level -= 1
def log_error(e, logger, critical=False):
"""
Log the specified Exception as an error. The Error message will be formatted
differently depending on the nature of the exception.
:e: the error to log. should be an instance of ``Exception``
:logger: logger to be used.
:critical: if ``True``, this error will be logged at ``logging.CRITICAL``
level, otherwise it will be logged as ``logging.ERROR``.
"""
if critical:
log_func = logger.critical
else:
log_func = logger.error
if isinstance(e, KeyboardInterrupt):
log_func('Got CTRL-C. Aborting.')
elif isinstance(e, WAError):
log_func(e)
elif isinstance(e, subprocess.CalledProcessError):
tb = get_traceback()
log_func(tb)
command = e.cmd
if e.args:
command = '{} {}'.format(command, ' '.join(e.args))
message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n'
log_func(message.format(command, e.returncode, e.output))
elif isinstance(e, SyntaxError):
tb = get_traceback()
log_func(tb)
message = 'Syntax Error in {}, line {}, offset {}:'
log_func(message.format(e.filename, e.lineno, e.offset))
log_func('\t{}'.format(e.msg))
else:
tb = get_traceback()
log_func(tb)
log_func('{}({})'.format(e.__class__.__name__, e))
class ErrorSignalHandler(logging.Handler):
"""
Emits signals for ERROR and WARNING level traces.
"""
def emit(self, record):
if record.levelno == logging.ERROR:
signal.send(signal.ERROR_LOGGED, self)
elif record.levelno == logging.WARNING:
signal.send(signal.WARNING_LOGGED, self)
class LineFormatter(logging.Formatter):
"""
Logs each line of the message separately.
"""
def format(self, record):
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
indent = _indent_width * _indent_level
d = record.__dict__
parts = []
for line in record.message.split('\n'):
line = ' ' * indent + line
d.update({'message': line.strip('\r')})
parts.append(self._fmt % d)
return '\n'.join(parts)
class ColorFormatter(LineFormatter):
"""
Formats logging records with color and prepends record info
to each line of the message.
BLUE for DEBUG logging level
GREEN for INFO logging level
YELLOW for WARNING logging level
RED for ERROR logging level
BOLD RED for CRITICAL logging level
"""
def __init__(self, fmt=None, datefmt=None):
super(ColorFormatter, self).__init__(fmt, datefmt)
template_text = self._fmt.replace('%(message)s', RESET_COLOR + '%(message)s${color}')
template_text = '${color}' + template_text + RESET_COLOR
self.fmt_template = string.Template(template_text)
def format(self, record):
self._set_color(COLOR_MAP[record.levelno])
return super(ColorFormatter, self).format(record)
def _set_color(self, color):
self._fmt = self.fmt_template.substitute(color=color)
class BaseLogWriter(object):
def __init__(self, name, level=logging.DEBUG):
"""
File-like object class designed to be used for logging from streams
Each complete line (terminated by new line character) gets logged
at DEBUG level. In complete lines are buffered until the next new line.
:param name: The name of the logger that will be used.
"""
self.logger = logging.getLogger(name)
self.buffer = ''
if level == logging.DEBUG:
self.do_write = self.logger.debug
elif level == logging.INFO:
self.do_write = self.logger.info
elif level == logging.WARNING:
self.do_write = self.logger.warning
elif level == logging.ERROR:
self.do_write = self.logger.error
else:
raise Exception('Unknown logging level: {}'.format(level))
def flush(self):
# Defined to match the interface expected by pexpect.
return self
def close(self):
if self.buffer:
self.logger.debug(self.buffer)
self.buffer = ''
return self
def __del__(self):
# Ensure we don't lose bufferd output
self.close()
class LogWriter(BaseLogWriter):
def write(self, data):
data = data.replace('\r\n', '\n').replace('\r', '\n')
if '\n' in data:
parts = data.split('\n')
parts[0] = self.buffer + parts[0]
for part in parts[:-1]:
self.do_write(part)
self.buffer = parts[-1]
else:
self.buffer += data
return self
class LineLogWriter(BaseLogWriter):
def write(self, data):
self.do_write(data)
class StreamLogger(threading.Thread):
"""
Logs output from a stream in a thread.
"""
def __init__(self, name, stream, level=logging.DEBUG, klass=LogWriter):
super(StreamLogger, self).__init__()
self.writer = klass(name, level)
self.stream = stream
self.daemon = True
def run(self):
line = self.stream.readline()
while line:
self.writer.write(line.rstrip('\n'))
line = self.stream.readline()
self.writer.close()

View File

@ -1,362 +0,0 @@
import os
import shutil
import logging
import uuid
from copy import copy
from datetime import datetime, timedelta
from wa.framework import signal, log
from wa.framework.configuration.core import merge_config_values
from wa.utils import serializer
from wa.utils.misc import enum_metaclass, ensure_directory_exists as _d
from wa.utils.types import numeric
class Status(object):
__metaclass__ = enum_metaclass('values', return_name=True)
values = [
'NEW',
'PENDING',
'RUNNING',
'COMPLETE',
'OK',
'OKISH',
'NONCRITICAL',
'PARTIAL',
'FAILED',
'ABORTED',
'SKIPPED',
'UNKNOWN',
]
class WAOutput(object):
basename = '.wa-output'
@classmethod
def load(cls, source):
if os.path.isfile(source):
pod = serializer.load(source)
elif os.path.isdir(source):
pod = serializer.load(os.path.join(source, cls.basename))
else:
message = 'Cannot load {} from {}'
raise ValueError(message.format(cls.__name__, source))
return cls.from_pod(pod)
@classmethod
def from_pod(cls, pod):
instance = cls(pod['output_directory'])
instance.status = pod['status']
instance.metrics = [Metric.from_pod(m) for m in pod['metrics']]
instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']]
instance.events = [RunEvent.from_pod(e) for e in pod['events']]
instance.classifiers = pod['classifiers']
return instance
def __init__(self, output_directory):
self.logger = logging.getLogger('output')
self.output_directory = output_directory
self.status = Status.UNKNOWN
self.classifiers = {}
self.metrics = []
self.artifacts = []
self.events = []
def initialize(self, overwrite=False):
if os.path.exists(self.output_directory):
if not overwrite:
raise RuntimeError('"{}" already exists.'.format(self.output_directory))
self.logger.info('Removing existing output directory.')
shutil.rmtree(self.output_directory)
self.logger.debug('Creating output directory {}'.format(self.output_directory))
os.makedirs(self.output_directory)
def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
classifiers = merge_config_values(self.classifiers, classifiers or {})
self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
def get_path(self, subpath):
return os.path.join(self.output_directory, subpath)
def to_pod(self):
return {
'output_directory': self.output_directory,
'status': self.status,
'metrics': [m.to_pod() for m in self.metrics],
'artifacts': [a.to_pod() for a in self.artifacts],
'events': [e.to_pod() for e in self.events],
'classifiers': copy(self.classifiers),
}
def persist(self):
statefile = os.path.join(self.output_directory, self.basename)
with open(statefile, 'wb') as wfh:
serializer.dump(self, wfh)
class RunInfo(object):
default_name_format = 'wa-run-%y%m%d-%H%M%S'
def __init__(self, project=None, project_stage=None, name=None):
self.uuid = uuid.uuid4()
self.project = project
self.project_stage = project_stage
self.name = name or datetime.now().strftime(self.default_name_format)
self.start_time = None
self.end_time = None
self.duration = None
@staticmethod
def from_pod(pod):
instance = RunInfo()
instance.uuid = uuid.UUID(pod['uuid'])
instance.project = pod['project']
instance.project_stage = pod['project_stage']
instance.name = pod['name']
instance.start_time = pod['start_time']
instance.end_time = pod['end_time']
instance.duration = timedelta(seconds=pod['duration'])
return instance
def to_pod(self):
d = copy(self.__dict__)
d['uuid'] = str(self.uuid)
d['duration'] = self.duration.days * 3600 * 24 + self.duration.seconds
return d
class RunOutput(WAOutput):
@property
def info_directory(self):
return _d(os.path.join(self.output_directory, '_info'))
@property
def config_directory(self):
return _d(os.path.join(self.output_directory, '_config'))
@property
def failed_directory(self):
return _d(os.path.join(self.output_directory, '_failed'))
@property
def log_file(self):
return os.path.join(self.output_directory, 'run.log')
@classmethod
def from_pod(cls, pod):
instance = WAOutput.from_pod(pod)
instance.info = RunInfo.from_pod(pod['info'])
instance.jobs = [JobOutput.from_pod(i) for i in pod['jobs']]
instance.failed = [JobOutput.from_pod(i) for i in pod['failed']]
return instance
def __init__(self, output_directory):
super(RunOutput, self).__init__(output_directory)
self.logger = logging.getLogger('output')
self.info = RunInfo()
self.jobs = []
self.failed = []
def initialize(self, overwrite=False):
super(RunOutput, self).initialize(overwrite)
log.add_file(self.log_file)
self.add_artifact('runlog', self.log_file, 'log')
def create_job_output(self, id):
outdir = os.path.join(self.output_directory, id)
job_output = JobOutput(outdir)
self.jobs.append(job_output)
return job_output
def move_failed(self, job_output):
basename = os.path.basename(job_output.output_directory)
i = 1
dest = os.path.join(self.failed_directory, basename + '-{}'.format(i))
while os.path.exists(dest):
i += 1
dest = '{}-{}'.format(dest[:-2], i)
shutil.move(job_output.output_directory, dest)
def to_pod(self):
pod = super(RunOutput, self).to_pod()
pod['info'] = self.info.to_pod()
pod['jobs'] = [i.to_pod() for i in self.jobs]
pod['failed'] = [i.to_pod() for i in self.failed]
return pod
class JobOutput(WAOutput):
def add_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
class Artifact(object):
"""
This is an artifact generated during execution/post-processing of a workload.
Unlike metrics, this represents an actual artifact, such as a file, generated.
This may be "result", such as trace, or it could be "meta data" such as logs.
These are distinguished using the ``kind`` attribute, which also helps WA decide
how it should be handled. Currently supported kinds are:
:log: A log file. Not part of "results" as such but contains information about the
run/workload execution that be useful for diagnostics/meta analysis.
:meta: A file containing metadata. This is not part of "results", but contains
information that may be necessary to reproduce the results (contrast with
``log`` artifacts which are *not* necessary).
:data: This file contains new data, not available otherwise and should be considered
part of the "results" generated by WA. Most traces would fall into this category.
:export: Exported version of results or some other artifact. This signifies that
this artifact does not contain any new data that is not available
elsewhere and that it may be safely discarded without losing information.
:raw: Signifies that this is a raw dump/log that is normally processed to extract
useful information and is then discarded. In a sense, it is the opposite of
``export``, but in general may also be discarded.
.. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on
how important it is to preserve this file, e.g. when archiving, vs
how much space it takes up. Unlike ``export`` artifacts which are
(almost) always ignored by other exporters as that would never result
in data loss, ``raw`` files *may* be processed by exporters if they
decided that the risk of losing potentially (though unlikely) useful
data is greater than the time/space cost of handling the artifact (e.g.
a database uploader may choose to ignore ``raw`` artifacts, where as a
network filer archiver may choose to archive them).
.. note: The kind parameter is intended to represent the logical function of a particular
artifact, not it's intended means of processing -- this is left entirely up to the
result processors.
"""
RUN = 'run'
ITERATION = 'iteration'
valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
@staticmethod
def from_pod(pod):
return Artifact(**pod)
def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
""""
:param name: Name that uniquely identifies this artifact.
:param path: The *relative* path of the artifact. Depending on the ``level``
must be either relative to the run or iteration output directory.
Note: this path *must* be delimited using ``/`` irrespective of the
operating system.
:param kind: The type of the artifact this is (e.g. log file, result, etc.) this
will be used a hit to result processors. This must be one of ``'log'``,
``'meta'``, ``'data'``, ``'export'``, ``'raw'``.
:param level: The level at which the artifact will be generated. Must be either
``'iteration'`` or ``'run'``.
:param mandatory: Boolean value indicating whether this artifact must be present
at the end of result processing for its level.
:param description: A free-form description of what this artifact is.
"""
if kind not in self.valid_kinds:
raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds))
self.name = name
self.path = path.replace('/', os.sep) if path is not None else path
self.kind = kind
self.level = level
self.mandatory = mandatory
self.description = description
def exists(self, context):
"""Returns ``True`` if artifact exists within the specified context, and
``False`` otherwise."""
fullpath = os.path.join(context.output_directory, self.path)
return os.path.exists(fullpath)
def to_pod(self):
return copy(self.__dict__)
class RunEvent(object):
"""
An event that occured during a run.
"""
@staticmethod
def from_pod(pod):
instance = RunEvent(pod['message'])
instance.timestamp = pod['timestamp']
return instance
def __init__(self, message):
self.timestamp = datetime.utcnow()
self.message = message
def to_pod(self):
return copy(self.__dict__)
def __str__(self):
return '{} {}'.format(self.timestamp, self.message)
__repr__ = __str__
class Metric(object):
"""
This is a single metric collected from executing a workload.
:param name: the name of the metric. Uniquely identifies the metric
within the results.
:param value: The numerical value of the metric for this execution of
a workload. This can be either an int or a float.
:param units: Units for the collected value. Can be None if the value
has no units (e.g. it's a count or a standardised score).
:param lower_is_better: Boolean flag indicating where lower values are
better than higher ones. Defaults to False.
:param classifiers: A set of key-value pairs to further classify this metric
beyond current iteration (e.g. this can be used to identify
sub-tests).
"""
@staticmethod
def from_pod(pod):
return Metric(**pod)
def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None):
self.name = name
self.value = numeric(value)
self.units = units
self.lower_is_better = lower_is_better
self.classifiers = classifiers or {}
def to_pod(self):
return copy(self.__dict__)
def __str__(self):
result = '{}: {}'.format(self.name, self.value)
if self.units:
result += ' ' + self.units
result += ' ({})'.format('-' if self.lower_is_better else '+')
return '<{}>'.format(result)
__repr__ = __str__
def _check_artifact_path(path, rootpath):
if path.startswith(rootpath):
return os.path.abspath(path)
rootpath = os.path.abspath(rootpath)
full_path = os.path.join(rootpath, path)
if not os.path.isfile(full_path):
raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
return full_path

View File

@ -81,19 +81,10 @@ class AttributeCollection(object):
__repr__ = __str__
def _to_attrcls(self, p):
old_owner = getattr(p, "_owner", None)
if isinstance(p, basestring):
p = self._attrcls(p)
elif isinstance(p, tuple) or isinstance(p, list):
p = self._attrcls(*p)
elif isinstance(p, dict):
p = self._attrcls(**p)
elif not isinstance(p, self._attrcls):
raise ValueError('Invalid parameter value: {}'.format(p))
if (p.name in self._attrs and not p.override and
p.name != 'modules'): # TODO: HACK due to "diamond dependecy" in workloads...
if not isinstance(p, self._attrcls):
raise ValueError('Invalid attribute value: {}; must be a {}'.format(p, self._attrcls))
if (p.name in self._attrs and not p.override):
raise ValueError('Attribute {} has already been defined.'.format(p.name))
p._owner = old_owner
return p
def __iadd__(self, other):
@ -269,7 +260,6 @@ class PluginMeta(type):
to_propagate = [
('parameters', Parameter, AttributeCollection),
('artifacts', Artifact, AttributeCollection),
('core_modules', str, ListCollection),
]
virtual_methods = ['validate', 'initialize', 'finalize']
@ -299,8 +289,10 @@ class PluginMeta(type):
if prop_attr in attrs:
pattrs = attrs[prop_attr] or []
for pa in pattrs:
if not isinstance(pa, basestring):
pa._owner = clsname
if not isinstance(pa, attr_cls):
msg = 'Invalid value "{}" for attribute "{}"; must be a {}'
raise ValueError(msg.format(pa, prop_attr, attr_cls))
pa._owner = clsname
propagated += pattrs
should_propagate = True
if should_propagate:
@ -339,13 +331,7 @@ class Plugin(object):
kind = None
name = None
parameters = [
Parameter('modules', kind=list,
description="""
Lists the modules to be loaded by this plugin. A module is a
plug-in that further extends functionality of an plugin.
"""),
]
parameters = []
artifacts = []
aliases = []
core_modules = []

View File

@ -1,80 +0,0 @@
import string
from copy import copy
from devlib import Platform, AndroidTarget
class TargetInfo(object):
@staticmethod
def from_pod(pod):
instance = TargetInfo()
instance.target = pod['target']
instance.abi = pod['abi']
instance.cpuinfo = Cpuinfo(pod['cpuinfo'])
instance.os = pod['os']
instance.os_version = pod['os_version']
instance.abi = pod['abi']
instance.is_rooted = pod['is_rooted']
instance.kernel_version = KernelVersion(pod['kernel_release'],
pod['kernel_version'])
instance.kernel_config = KernelConfig(pod['kernel_config'])
if pod["target"] == "AndroidTarget":
instance.screen_resolution = pod['screen_resolution']
instance.prop = pod['prop']
instance.prop = pod['android_id']
return instance
def __init__(self, target=None):
if target:
self.target = target.__class__.__name__
self.cpuinfo = target.cpuinfo
self.os = target.os
self.os_version = target.os_version
self.abi = target.abi
self.is_rooted = target.is_rooted
self.kernel_version = target.kernel_version
self.kernel_config = target.config
if isinstance(target, AndroidTarget):
self.screen_resolution = target.screen_resolution
self.prop = target.getprop()
self.android_id = target.android_id
else:
self.target = None
self.cpuinfo = None
self.os = None
self.os_version = None
self.abi = None
self.is_rooted = None
self.kernel_version = None
self.kernel_config = None
if isinstance(target, AndroidTarget):
self.screen_resolution = None
self.prop = None
self.android_id = None
def to_pod(self):
pod = {}
pod['target'] = self.target
pod['abi'] = self.abi
pod['cpuinfo'] = self.cpuinfo.sections
pod['os'] = self.os
pod['os_version'] = self.os_version
pod['abi'] = self.abi
pod['is_rooted'] = self.is_rooted
pod['kernel_release'] = self.kernel_version.release
pod['kernel_version'] = self.kernel_version.version
pod['kernel_config'] = dict(self.kernel_config.iteritems())
if self.target == "AndroidTarget":
pod['screen_resolution'] = self.screen_resolution
pod['prop'] = self.prop
pod['android_id'] = self.android_id
return pod

View File

@ -84,13 +84,14 @@ class TargetDescription(object):
self.platform = platform
self.connection = conn
self.assistant = assistant
self.assistant_params = assistant_params
self._set('target_params', target_params)
self._set('platform_params', platform_params)
self._set('conn_params', conn_params)
self._set('assistant_params', assistant_params)
def get_default_config(self):
param_attrs = ['target_params', 'platform_params', 'conn_params']
param_attrs = ['target_params', 'platform_params',
'conn_params', 'assistant_params']
config = {}
for pattr in param_attrs:
for n, p in getattr(self, pattr).itervalues():

View File

@ -23,7 +23,6 @@ class TestDevice(Plugin):
kind = 'device'
def __init__(self, *args, **kwargs):
self.modules = []
self.boot_called = 0
self.push_file_called = 0
self.pull_file_called = 0

View File

@ -1,21 +1,25 @@
import unittest
from nose.tools import assert_equal
from wa.framework.configuration import merge_config_values
from wa.utils.misc import merge_config_values
class TestConfigUtils(unittest.TestCase):
def test_merge_values(self):
test_cases = [
# base, other, expected_result
('a', 3, 3),
('a', [1, 2], ['a', 1, 2]),
({1: 2}, [3, 4], [{1: 2}, 3, 4]),
(set([2]), [1, 2, 3], [2, 1, 2, 3]),
(set([2]), [1, 2, 3], [2, 1, 3]),
([1, 2, 3], set([2]), set([1, 2, 3])),
([1, 2], None, [1, 2]),
(None, 'a', 'a'),
]
for v1, v2, expected in test_cases:
assert_equal(merge_config_values(v1, v2), expected)
result = merge_config_values(v1, v2)
assert_equal(result, expected)
if v2 is not None:
assert_equal(type(result), type(v2))

View File

@ -19,9 +19,9 @@ from unittest import TestCase
from nose.tools import assert_equal, assert_raises
from wlauto.utils.exec_control import (init_environment, reset_environment,
activate_environment, once,
once_per_class, once_per_instance)
from wa.utils.exec_control import (init_environment, reset_environment,
activate_environment, once,
once_per_class, once_per_instance)
class TestClass(object):

View File

@ -1,164 +0,0 @@
import os
import sys
import unittest
from StringIO import StringIO
from mock import Mock
from nose.tools import assert_true, assert_false, assert_equal
from wa.framework import signal
from wa.framework.agenda import Agenda
from wa.framework.run import RunnerJob
from wa.framework.execution import agenda_iterator
sys.path.insert(0, os.path.dirname(__file__))
from testutils import SignalWatcher
class TestAgendaIteration(unittest.TestCase):
def setUp(self):
agenda_text = """
global:
iterations: 2
sections:
- id: a
- id: b
workloads:
- id: 1
name: bbench
workloads:
- id: 2
name: dhrystone
- id: 3
name: coremark
iterations: 1
"""
agenda_file = StringIO(agenda_text)
agenda_file.name = 'agenda'
self.agenda = Agenda(agenda_file)
def test_iteration_by_iteration(self):
specs = ['{}-{}'.format(s.id, w.id)
for _, s, w, _
in agenda_iterator(self.agenda, 'by_iteration')]
assert_equal(specs,
['a-2', 'b-2', 'a-3', 'b-3', 'b-1', 'a-2', 'b-2', 'b-1'])
def test_iteration_by_section(self):
specs = ['{}-{}'.format(s.id, w.id)
for _, s, w, _
in agenda_iterator(self.agenda, 'by_section')]
assert_equal(specs,
['a-2', 'a-3', 'b-2', 'b-3', 'b-1', 'a-2', 'b-2', 'b-1'])
def test_iteration_by_spec(self):
specs = ['{}-{}'.format(s.id, w.id)
for _, s, w, _ in
agenda_iterator(self.agenda, 'by_spec')]
assert_equal(specs,
['a-2', 'a-2', 'a-3', 'b-2', 'b-2', 'b-3', 'b-1', 'b-1'])
class FakeWorkloadLoader(object):
def get_workload(self, name, target, **params):
workload = Mock()
workload.name = name
workload.target = target
workload.parameters = params
return workload
class WorkloadExecutionWatcher(SignalWatcher):
signals = [
signal.BEFORE_WORKLOAD_SETUP,
signal.SUCCESSFUL_WORKLOAD_SETUP,
signal.AFTER_WORKLOAD_SETUP,
signal.BEFORE_WORKLOAD_EXECUTION,
signal.SUCCESSFUL_WORKLOAD_EXECUTION,
signal.AFTER_WORKLOAD_EXECUTION,
signal.BEFORE_WORKLOAD_RESULT_UPDATE,
signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE,
signal.AFTER_WORKLOAD_RESULT_UPDATE,
signal.BEFORE_WORKLOAD_TEARDOWN,
signal.SUCCESSFUL_WORKLOAD_TEARDOWN,
signal.AFTER_WORKLOAD_TEARDOWN,
]
class TestWorkloadExecution(unittest.TestCase):
def setUp(self):
params = {
'target': Mock(),
'context': Mock(),
'loader': FakeWorkloadLoader(),
}
data = {
'id': 'test',
'workload': 'test',
'label': None,
'parameters': None,
}
self.job = RunnerJob('job1', 'execute-workload-job', params, data)
self.workload = self.job.actor.workload
self.watcher = WorkloadExecutionWatcher()
def test_normal_flow(self):
self.job.run()
assert_true(self.workload.setup.called)
assert_true(self.workload.run.called)
assert_true(self.workload.update_result.called)
assert_true(self.workload.teardown.called)
self.watcher.assert_all_called()
def test_failed_run(self):
def bad(self):
raise Exception()
self.workload.run = bad
try:
self.job.run()
except Exception:
pass
assert_true(self.workload.setup.called)
assert_false(self.workload.update_result.called)
assert_true(self.workload.teardown.called)
assert_true(self.watcher.before_workload_setup.called)
assert_true(self.watcher.successful_workload_setup.called)
assert_true(self.watcher.after_workload_setup.called)
assert_true(self.watcher.before_workload_execution.called)
assert_false(self.watcher.successful_workload_execution.called)
assert_true(self.watcher.after_workload_execution.called)
assert_true(self.watcher.before_workload_result_update.called)
assert_false(self.watcher.successful_workload_result_update.called)
assert_true(self.watcher.after_workload_result_update.called)
assert_true(self.watcher.before_workload_teardown.called)
assert_true(self.watcher.successful_workload_teardown.called)
assert_true(self.watcher.after_workload_teardown.called)
def test_failed_setup(self):
def bad(self):
raise Exception()
self.workload.setup = bad
try:
self.job.run()
except Exception:
pass
assert_false(self.workload.run.called)
assert_false(self.workload.update_result.called)
assert_false(self.workload.teardown.called)
assert_true(self.watcher.before_workload_setup.called)
assert_false(self.watcher.successful_workload_setup.called)
assert_true(self.watcher.after_workload_setup.called)
assert_false(self.watcher.before_workload_execution.called)
assert_false(self.watcher.successful_workload_execution.called)
assert_false(self.watcher.after_workload_execution.called)
assert_false(self.watcher.before_workload_result_update.called)
assert_false(self.watcher.successful_workload_result_update.called)
assert_false(self.watcher.after_workload_result_update.called)
assert_false(self.watcher.before_workload_teardown.called)
assert_false(self.watcher.successful_workload_teardown.called)
assert_false(self.watcher.after_workload_teardown.called)

View File

@ -44,15 +44,8 @@ class PluginLoaderTest(TestCase):
class MyMeta(PluginMeta):
virtual_methods = ['validate', 'virtual1', 'virtual2']
class MyBasePlugin(Plugin):
__metaclass__ = MyMeta
name = 'base'
kind = 'test'
@ -166,43 +159,6 @@ class PluginMetaTest(TestCase):
7,
]
def test_virtual_methods(self):
acid = MyAcidPlugin()
acid.virtual1()
assert_equal(acid.v1, 1)
assert_equal(acid.vv1, 1)
assert_equal(acid.v2, 0)
assert_equal(acid.vv2, 0)
assert_equal(acid.v3, 'acid')
acid.virtual2()
acid.virtual2()
assert_equal(acid.v1, 1)
assert_equal(acid.vv1, 1)
assert_equal(acid.v2, 2)
assert_equal(acid.vv2, 2)
def test_initialization(self):
class MyExt(Plugin):
name = 'myext'
kind = 'test'
values = {'a': 0}
def __init__(self, *args, **kwargs):
super(MyExt, self).__init__(*args, **kwargs)
self.instance_init = 0
def initialize(self, context):
self.values['a'] += 1
class MyChildExt(MyExt):
name = 'mychildext'
def initialize(self, context):
self.instance_init += 1
ext = MyChildExt()
ext.initialize(None)
assert_equal(MyExt.values['a'], 1)
assert_equal(ext.instance_init, 1)
class ParametersTest(TestCase):

View File

@ -1,44 +0,0 @@
import os
import sys
import shutil
import tempfile
import unittest
from mock import Mock
from nose.tools import assert_true, assert_false, assert_equal, assert_almost_equal
from wa.framework import pluginloader
from wa.framework.output import RunOutput
from wa.framework.run import Runner, RunnerJob, runmethod, reset_runmethods
from wa.utils.serializer import json
class RunnerTest(unittest.TestCase):
def setUp(self):
self.output = RunOutput(tempfile.mktemp())
self.output.initialize()
def tearDown(self):
shutil.rmtree(self.output.output_directory)
def test_run_init(self):
runner = Runner(self.output)
runner.initialize()
runner.finalize()
assert_true(runner.info.name)
assert_true(runner.info.start_time)
assert_true(runner.info.end_time)
assert_almost_equal(runner.info.duration,
runner.info.end_time -
runner.info.start_time)
def test_normal_run(self):
runner = Runner(self.output)
runner.add_job(1, Mock())
runner.add_job(2, Mock())
runner.initialize()
runner.run()
runner.finalize()
assert_equal(len(runner.completed_jobs), 2)

View File

@ -20,7 +20,7 @@ from unittest import TestCase
from nose.tools import raises, assert_equal, assert_not_equal, assert_in, assert_not_in
from nose.tools import assert_true, assert_false
from wa.utils.types import list_or_integer, list_or_bool, caseless_string, arguments, prioritylist, TreeNode
from wa.utils.types import list_or_integer, list_or_bool, caseless_string, arguments, prioritylist
class TestPriorityList(TestCase):
@ -91,81 +91,3 @@ class TestPriorityList(TestCase):
assert_equal(list(pl), ['a', 'b','y', 'x', 'm', 'n'])
pl.add_after('z', 'm')
assert_equal(list(pl), ['a', 'b', 'y', 'x', 'm', 'z', 'n'])
class TestTreeNode(TestCase):
def test_addremove(self):
n1, n2, n3 = TreeNode(), TreeNode(), TreeNode()
n1.add_child(n2)
n3.parent = n2
assert_equal(n2.parent, n1)
assert_in(n3, n2.children)
n2.remove_child(n3)
assert_equal(n3.parent, None)
assert_not_in(n3, n2.children)
n1.add_child(n2) # duplicat add
assert_equal(n1.children, [n2])
def test_ancestor_descendant(self):
n1, n2a, n2b, n3 = TreeNode(), TreeNode(), TreeNode(), TreeNode()
n1.add_child(n2a)
n1.add_child(n2b)
n2a.add_child(n3)
assert_equal(list(n3.iter_ancestors()), [n3, n2a, n1])
assert_equal(list(n1.iter_descendants()), [n2a, n3, n2b])
assert_true(n1.has_descendant(n3))
assert_true(n3.has_ancestor(n1))
assert_false(n3.has_ancestor(n2b))
def test_root(self):
n1, n2, n3 = TreeNode(), TreeNode(), TreeNode()
n1.add_child(n2)
n2.add_child(n3)
assert_true(n1.is_root)
assert_false(n2.is_root)
assert_equal(n3.get_root(), n1)
def test_common_ancestor(self):
n1, n2, n3a, n3b, n4, n5 = TreeNode(), TreeNode(), TreeNode(), TreeNode(), TreeNode(), TreeNode()
n1.add_child(n2)
n2.add_child(n3a)
n2.add_child(n3b)
n3b.add_child(n4)
n3a.add_child(n5)
assert_equal(n4.get_common_ancestor(n3a), n2)
assert_equal(n3a.get_common_ancestor(n4), n2)
assert_equal(n3b.get_common_ancestor(n4), n3b)
assert_equal(n4.get_common_ancestor(n3b), n3b)
assert_equal(n4.get_common_ancestor(n5), n2)
def test_iteration(self):
n1, n2, n3, n4, n5 = TreeNode(), TreeNode(), TreeNode(), TreeNode(), TreeNode()
n1.add_child(n2)
n2.add_child(n3)
n3.add_child(n4)
n4.add_child(n5)
ancestors = [a for a in n5.iter_ancestors(upto=n2)]
assert_equal(ancestors, [n5, n4, n3])
ancestors = [a for a in n5.iter_ancestors(after=n2)]
assert_equal(ancestors, [n2, n1])
@raises(ValueError)
def test_trivial_loop(self):
n1, n2, n3 = TreeNode(), TreeNode(), TreeNode()
n1.add_child(n2)
n2.add_child(n3)
n3.add_child(n1)
@raises(ValueError)
def test_tree_violation(self):
n1, n2a, n2b, n3 = TreeNode(), TreeNode(), TreeNode(), TreeNode()
n1.add_child(n2a)
n1.add_child(n2b)
n2a.add_child(n3)
n2b.add_child(n3)
@raises(ValueError)
def test_self_parent(self):
n = TreeNode()
n.add_child(n)

View File

@ -28,6 +28,7 @@ fact that Python is not the best language to use for configuration.
import os
import re
import math
import numbers
import shlex
import string
from bisect import insort

View File

@ -1,35 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.core.configuration import settings # NOQA
from wlauto.core.device_manager import DeviceManager # NOQA
from wlauto.core.command import Command # NOQA
from wlauto.core.workload import Workload # NOQA
from wlauto.core.plugin import Artifact, Alias # NOQA
from wlauto.core.configuration.configuration import ConfigurationPoint as Parameter
import wlauto.core.pluginloader as PluginLoader # NOQA
from wlauto.core.instrumentation import Instrument # NOQA
from wlauto.core.result import ResultProcessor, IterationResult # NOQA
from wlauto.core.resource import ResourceGetter, Resource, GetterPriority, NO_ONE # NOQA
from wlauto.core.exttype import get_plugin_type # NOQA Note: MUST be imported after other core imports.
from wlauto.common.resources import File, PluginAsset, Executable
from wlauto.common.android.resources import ApkFile, JarFile
from wlauto.common.android.workload import (UiAutomatorWorkload, ApkWorkload, AndroidBenchmark, # NOQA
AndroidUiAutoBenchmark, GameWorkload) # NOQA
from wlauto.core.version import get_wa_version
__version__ = get_wa_version()

View File

@ -1,79 +0,0 @@
# This agenda specifies configuration that may be used for regression runs
# on big.LITTLE systems. This agenda will with a TC2 device configured as
# described in the documentation.
config:
device: tc2
run_name: big.LITTLE_regression
global:
iterations: 5
sections:
- id: mp_a15only
boot_parameters:
os_mode: mp_a15_only
runtime_parameters:
a15_governor: interactive
a15_governor_tunables:
above_hispeed_delay: 20000
- id: mp_a7bc
boot_parameters:
os_mode: mp_a7_bootcluster
runtime_parameters:
a7_governor: interactive
a7_min_frequency: 500000
a7_governor_tunables:
above_hispeed_delay: 20000
a15_governor: interactive
a15_governor_tunables:
above_hispeed_delay: 20000
- id: mp_a15bc
boot_parameters:
os_mode: mp_a15_bootcluster
runtime_parameters:
a7_governor: interactive
a7_min_frequency: 500000
a7_governor_tunables:
above_hispeed_delay: 20000
a15_governor: interactive
a15_governor_tunables:
above_hispeed_delay: 20000
workloads:
- id: b01
name: andebench
workload_parameters:
number_of_threads: 5
- id: b02
name: andebench
label: andebenchst
workload_parameters:
number_of_threads: 1
- id: b03
name: antutu
label: antutu4.0.3
workload_parameters:
version: 4.0.3
- id: b04
name: benchmarkpi
- id: b05
name: caffeinemark
- id: b06
name: cfbench
- id: b07
name: geekbench
label: geekbench3
workload_parameters:
version: 3
- id: b08
name: linpack
- id: b09
name: quadrant
- id: b10
name: smartbench
- id: b11
name: sqlite
- id: b12
name: vellamo
- id: w01
name: bbench_with_audio
- id: w02
name: audio

View File

@ -1,43 +0,0 @@
# This an agenda that is built-up during the explantion of the agenda features
# in the documentation. This should work out-of-the box on most rooted Android
# devices.
config:
project: governor_comparison
run_name: performance_vs_interactive
device: generic_android
reboot_policy: never
instrumentation: [coreutil, cpufreq]
coreutil:
threshold: 80
sysfs_extractor:
paths: [/proc/meminfo]
result_processors: [sqlite]
sqlite:
database: ~/my_wa_results.sqlite
global:
iterations: 5
sections:
- id: perf
runtime_params:
sysfile_values:
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
- id: inter
runtime_params:
sysfile_values:
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive
workloads:
- id: 01_dhry
name: dhrystone
label: dhrystone_15over6
workload_params:
threads: 6
mloops: 15
- id: 02_memc
name: memcpy
instrumentation: [sysfs_extractor]
- id: 03_cycl
name: cyclictest
iterations: 10

View File

@ -1,16 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

View File

@ -1,400 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import stat
import string
import textwrap
import argparse
import shutil
import getpass
import subprocess
from collections import OrderedDict
import yaml
from wlauto import PluginLoader, Command, settings
from wlauto.exceptions import CommandError, ConfigError
from wlauto.core.command import init_argument_parser
from wlauto.utils.misc import (capitalize, check_output,
ensure_file_directory_exists as _f, ensure_directory_exists as _d)
from wlauto.utils.types import identifier
from wlauto.utils.doc import format_body
__all__ = ['create_workload']
TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates')
UIAUTO_BUILD_SCRIPT = """#!/bin/bash
class_dir=bin/classes/com/arm/wlauto/uiauto
base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
mkdir -p $$class_dir
cp $$base_class $$class_dir
ant build
if [[ -f bin/${package_name}.jar ]]; then
cp bin/${package_name}.jar ..
fi
"""
class CreateSubcommand(object):
name = None
help = None
usage = None
description = None
epilog = None
formatter_class = None
def __init__(self, logger, subparsers):
self.logger = logger
self.group = subparsers
parser_params = dict(help=(self.help or self.description), usage=self.usage,
description=format_body(textwrap.dedent(self.description), 80),
epilog=self.epilog)
if self.formatter_class:
parser_params['formatter_class'] = self.formatter_class
self.parser = subparsers.add_parser(self.name, **parser_params)
init_argument_parser(self.parser) # propagate top-level options
self.initialize()
def initialize(self):
pass
class CreateWorkloadSubcommand(CreateSubcommand):
name = 'workload'
description = '''Create a new workload. By default, a basic workload template will be
used but you can use options to specify a different template.'''
def initialize(self):
self.parser.add_argument('name', metavar='NAME',
help='Name of the workload to be created')
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
help='The location at which the workload will be created. If not specified, ' +
'this defaults to "~/.workload_automation/workloads".')
self.parser.add_argument('-f', '--force', action='store_true',
help='Create the new workload even if a workload with the specified ' +
'name already exists.')
template_group = self.parser.add_mutually_exclusive_group()
template_group.add_argument('-A', '--android-benchmark', action='store_true',
help='Use android benchmark template. This template allows you to specify ' +
' an APK file that will be installed and run on the device. You should ' +
' place the APK file into the workload\'s directory at the same level ' +
'as the __init__.py.')
template_group.add_argument('-U', '--ui-automation', action='store_true',
help='Use UI automation template. This template generates a UI automation ' +
'Android project as well as the Python class. This a more general ' +
'version of the android benchmark template that makes no assumptions ' +
'about the nature of your workload, apart from the fact that you need ' +
'UI automation. If you need to install an APK, start an app on device, ' +
'etc., you will need to do that explicitly in your code.')
template_group.add_argument('-B', '--android-uiauto-benchmark', action='store_true',
help='Use android uiauto benchmark template. This generates a UI automation ' +
'project as well as a Python class. This template should be used ' +
'if you have a APK file that needs to be run on the device. You ' +
'should place the APK file into the workload\'s directory at the ' +
'same level as the __init__.py.')
def execute(self, state, args): # pylint: disable=R0201
where = args.path or 'local'
check_name = not args.force
if args.android_benchmark:
kind = 'android'
elif args.ui_automation:
kind = 'uiauto'
elif args.android_uiauto_benchmark:
kind = 'android_uiauto'
else:
kind = 'basic'
try:
create_workload(args.name, kind, where, check_name)
except CommandError, e:
print "ERROR:", e
class CreatePackageSubcommand(CreateSubcommand):
name = 'package'
description = '''Create a new empty Python package for WA plugins. On installation,
this package will "advertise" itself to WA so that Plugins with in it will
be loaded by WA when it runs.'''
def initialize(self):
self.parser.add_argument('name', metavar='NAME',
help='Name of the package to be created')
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
help='The location at which the new pacakge will be created. If not specified, ' +
'current working directory will be used.')
self.parser.add_argument('-f', '--force', action='store_true',
help='Create the new package even if a file or directory with the same name '
'already exists at the specified location.')
def execute(self, args): # pylint: disable=R0201
package_dir = args.path or os.path.abspath('.')
template_path = os.path.join(TEMPLATES_DIR, 'setup.template')
self.create_plugins_package(package_dir, args.name, template_path, args.force)
def create_plugins_package(self, location, name, setup_template_path, overwrite=False):
package_path = os.path.join(location, name)
if os.path.exists(package_path):
if overwrite:
self.logger.info('overwriting existing "{}"'.format(package_path))
shutil.rmtree(package_path)
else:
raise CommandError('Location "{}" already exists.'.format(package_path))
actual_package_path = os.path.join(package_path, name)
os.makedirs(actual_package_path)
setup_text = render_template(setup_template_path, {'package_name': name, 'user': getpass.getuser()})
with open(os.path.join(package_path, 'setup.py'), 'w') as wfh:
wfh.write(setup_text)
touch(os.path.join(actual_package_path, '__init__.py'))
class CreateAgendaSubcommand(CreateSubcommand):
name = 'agenda'
description = """
Create an agenda whith the specified plugins enabled. And parameters set to their
default values.
"""
def initialize(self):
self.parser.add_argument('plugins', nargs='+',
help='Plugins to be added')
self.parser.add_argument('-i', '--iterations', type=int, default=1,
help='Sets the number of iterations for all workloads')
self.parser.add_argument('-r', '--include-runtime-params', action='store_true',
help="""
Adds runtime parameters to the global section of the generated
agenda. Note: these do not have default values, so only name
will be added. Also, runtime parameters are devices-specific, so
a device must be specified (either in the list of plugins,
or in the existing config).
""")
self.parser.add_argument('-o', '--output', metavar='FILE',
help='Output file. If not specfied, STDOUT will be used instead.')
def execute(self, args): # pylint: disable=no-self-use,too-many-branches,too-many-statements
loader = PluginLoader(packages=settings.plugin_packages,
paths=settings.plugin_paths)
agenda = OrderedDict()
agenda['config'] = OrderedDict(instrumentation=[], result_processors=[])
agenda['global'] = OrderedDict(iterations=args.iterations)
agenda['workloads'] = []
device = None
device_config = None
for name in args.plugins:
extcls = loader.get_plugin_class(name)
config = loader.get_default_config(name)
del config['modules']
if extcls.kind == 'workload':
entry = OrderedDict()
entry['name'] = extcls.name
if name != extcls.name:
entry['label'] = name
entry['params'] = config
agenda['workloads'].append(entry)
elif extcls.kind == 'device':
if device is not None:
raise ConfigError('Specifying multiple devices: {} and {}'.format(device.name, name))
device = extcls
device_config = config
agenda['config']['device'] = name
agenda['config']['device_config'] = config
else:
if extcls.kind == 'instrument':
agenda['config']['instrumentation'].append(name)
if extcls.kind == 'result_processor':
agenda['config']['result_processors'].append(name)
agenda['config'][name] = config
if args.include_runtime_params:
if not device:
if settings.device:
device = loader.get_plugin_class(settings.device)
device_config = loader.get_default_config(settings.device)
else:
raise ConfigError('-r option requires for a device to be in the list of plugins')
rps = OrderedDict()
for rp in device.runtime_parameters:
if hasattr(rp, 'get_runtime_parameters'):
# a core parameter needs to be expanded for each of the
# device's cores, if they're avialable
for crp in rp.get_runtime_parameters(device_config.get('core_names', [])):
rps[crp.name] = None
else:
rps[rp.name] = None
agenda['global']['runtime_params'] = rps
if args.output:
wfh = open(args.output, 'w')
else:
wfh = sys.stdout
yaml.dump(agenda, wfh, indent=4, default_flow_style=False)
if args.output:
wfh.close()
class CreateCommand(Command):
name = 'create'
description = '''Used to create various WA-related objects (see positional arguments list for what
objects may be created).\n\nUse "wa create <object> -h" for object-specific arguments.'''
formatter_class = argparse.RawDescriptionHelpFormatter
subcmd_classes = [
CreateWorkloadSubcommand,
CreatePackageSubcommand,
CreateAgendaSubcommand,
]
def initialize(self, context):
subparsers = self.parser.add_subparsers(dest='what')
self.subcommands = [] # pylint: disable=W0201
for subcmd_cls in self.subcmd_classes:
subcmd = subcmd_cls(self.logger, subparsers)
self.subcommands.append(subcmd)
def execute(self, args):
for subcmd in self.subcommands:
if subcmd.name == args.what:
subcmd.execute(args)
break
else:
raise CommandError('Not a valid create parameter: {}'.format(args.name))
def create_workload(name, kind='basic', where='local', check_name=True, **kwargs):
if check_name:
extloader = PluginLoader(packages=settings.plugin_packages, paths=settings.plugin_paths)
if name in [wl.name for wl in extloader.list_workloads()]:
raise CommandError('Workload with name "{}" already exists.'.format(name))
class_name = get_class_name(name)
if where == 'local':
workload_dir = _d(os.path.join(settings.user_directory, 'workloads', name))
else:
workload_dir = _d(os.path.join(where, name))
if kind == 'basic':
create_basic_workload(workload_dir, name, class_name, **kwargs)
elif kind == 'uiauto':
create_uiautomator_workload(workload_dir, name, class_name, **kwargs)
elif kind == 'android':
create_android_benchmark(workload_dir, name, class_name, **kwargs)
elif kind == 'android_uiauto':
create_android_uiauto_benchmark(workload_dir, name, class_name, **kwargs)
else:
raise CommandError('Unknown workload type: {}'.format(kind))
print 'Workload created in {}'.format(workload_dir)
def create_basic_workload(path, name, class_name):
source_file = os.path.join(path, '__init__.py')
with open(source_file, 'w') as wfh:
wfh.write(render_template('basic_workload', {'name': name, 'class_name': class_name}))
def create_uiautomator_workload(path, name, class_name):
uiauto_path = _d(os.path.join(path, 'uiauto'))
create_uiauto_project(uiauto_path, name)
source_file = os.path.join(path, '__init__.py')
with open(source_file, 'w') as wfh:
wfh.write(render_template('uiauto_workload', {'name': name, 'class_name': class_name}))
def create_android_benchmark(path, name, class_name):
source_file = os.path.join(path, '__init__.py')
with open(source_file, 'w') as wfh:
wfh.write(render_template('android_benchmark', {'name': name, 'class_name': class_name}))
def create_android_uiauto_benchmark(path, name, class_name):
uiauto_path = _d(os.path.join(path, 'uiauto'))
create_uiauto_project(uiauto_path, name)
source_file = os.path.join(path, '__init__.py')
with open(source_file, 'w') as wfh:
wfh.write(render_template('android_uiauto_benchmark', {'name': name, 'class_name': class_name}))
def create_uiauto_project(path, name, target='1'):
sdk_path = get_sdk_path()
android_path = os.path.join(sdk_path, 'tools', 'android')
package_name = 'com.arm.wlauto.uiauto.' + name.lower()
# ${ANDROID_HOME}/tools/android create uitest-project -n com.arm.wlauto.uiauto.linpack -t 1 -p ../test2
command = '{} create uitest-project --name {} --target {} --path {}'.format(android_path,
package_name,
target,
path)
try:
check_output(command, shell=True)
except subprocess.CalledProcessError as e:
if 'is is not valid' in e.output:
message = 'No Android SDK target found; have you run "{} update sdk" and download a platform?'
raise CommandError(message.format(android_path))
build_script = os.path.join(path, 'build.sh')
with open(build_script, 'w') as wfh:
template = string.Template(UIAUTO_BUILD_SCRIPT)
wfh.write(template.substitute({'package_name': package_name}))
os.chmod(build_script, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
source_file = _f(os.path.join(path, 'src',
os.sep.join(package_name.split('.')[:-1]),
'UiAutomation.java'))
with open(source_file, 'w') as wfh:
wfh.write(render_template('UiAutomation.java', {'name': name, 'package_name': package_name}))
# Utility functions
def get_sdk_path():
sdk_path = os.getenv('ANDROID_HOME')
if not sdk_path:
raise CommandError('Please set ANDROID_HOME environment variable to point to ' +
'the locaton of Android SDK')
return sdk_path
def get_class_name(name, postfix=''):
name = identifier(name)
return ''.join(map(capitalize, name.split('_'))) + postfix
def render_template(name, params):
filepath = os.path.join(TEMPLATES_DIR, name)
with open(filepath) as fh:
text = fh.read()
template = string.Template(text)
return template.substitute(params)
def touch(path):
with open(path, 'w') as _:
pass

View File

@ -1,74 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto import PluginLoader, Command, settings
from wlauto.utils.formatter import DescriptionListFormatter
from wlauto.utils.doc import get_summary
from wlauto.core import pluginloader
class ListCommand(Command):
name = 'list'
description = 'List available WA plugins with a short description of each.'
def initialize(self, context):
plugin_types = ['{}s'.format(name) for name in pluginloader.kinds]
self.parser.add_argument('kind', metavar='KIND',
help=('Specify the kind of plugin to list. Must be '
'one of: {}'.format(', '.join(plugin_types))),
choices=plugin_types)
self.parser.add_argument('-n', '--name', help='Filter results by the name specified')
self.parser.add_argument('-o', '--packaged-only', action='store_true',
help='''
Only list plugins packaged with WA itself. Do not list plugins
installed locally or from other packages.
''')
self.parser.add_argument('-p', '--platform', help='Only list results that are supported by '
'the specified platform')
def execute(self, state, args):
filters = {}
if args.name:
filters['name'] = args.name
results = pluginloader.list_plugins(args.kind[:-1])
if filters or args.platform:
filtered_results = []
for result in results:
passed = True
for k, v in filters.iteritems():
if getattr(result, k) != v:
passed = False
break
if passed and args.platform:
passed = check_platform(result, args.platform)
if passed:
filtered_results.append(result)
else: # no filters specified
filtered_results = results
if filtered_results:
output = DescriptionListFormatter()
for result in sorted(filtered_results, key=lambda x: x.name):
output.add_item(get_summary(result), result.name)
print output.format_data()
def check_platform(plugin, platform):
supported_platforms = getattr(plugin, 'supported_platforms', [])
if supported_platforms:
return platform in supported_platforms
return True

View File

@ -1,217 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from wlauto import Command, settings
from wlauto.core import pluginloader
from wlauto.common.resources import Executable
from wlauto.core.resource import NO_ONE
from wlauto.core.resolver import ResourceResolver
from wlauto.core.configuration import RunConfiguration
from wlauto.common.android.workload import ApkWorkload
class RecordCommand(Command):
name = 'record'
description = '''Performs a revent recording
This command helps making revent recordings. It will automatically
deploy revent and even has the option of automatically opening apps.
Revent allows you to record raw inputs such as screen swipes or button presses.
This can be useful for recording inputs for workloads such as games that don't
have XML UI layouts that can be used with UIAutomator. As a drawback from this,
revent recordings are specific to the device type they were recorded on.
WA uses two parts to the names of revent recordings in the format,
{device_name}.{suffix}.revent.
- device_name can either be specified manually with the ``-d`` argument or
it can be automatically determined. On Android device it will be obtained
from ``build.prop``, on Linux devices it is obtained from ``/proc/device-tree/model``.
- suffix is used by WA to determine which part of the app execution the
recording is for, currently these are either ``setup`` or ``run``. This
should be specified with the ``-s`` argument.
'''
def initialize(self, context):
self.context = context
self.parser.add_argument('-d', '--device', help='The name of the device')
self.parser.add_argument('-o', '--output', help='Directory to save the recording in')
# Need validation
self.parser.add_argument('-s', '--suffix', help='The suffix of the revent file, e.g. ``setup``')
self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it',
action="store_true")
group = self.parser.add_mutually_exclusive_group(required=False)
group.add_argument('-p', '--package', help='Package to launch before recording')
group.add_argument('-w', '--workload', help='Name of a revent workload (mostly games)')
# Validate command options
def validate_args(self, args):
if args.clear and not (args.package or args.workload):
self.logger.error("Package/Workload must be specified if you want to clear cache")
self.parser.print_help()
sys.exit()
if args.workload and args.suffix:
self.logger.error("cannot specify manual suffixes for workloads")
self.parser.print_help()
sys.exit()
if args.suffix:
args.suffix += "."
# pylint: disable=W0201
def execute(self, state, args):
self.validate_args(args)
self.logger.info("Connecting to device...")
# Setup config
self.config = RunConfiguration(pluginloader)
for filepath in settings.config_paths:
self.config.load_config(filepath)
self.config.set_agenda(Agenda())
self.config.finalize()
# Setup device
self.device_manager = pluginloader.get_manager(self.config.device)
self.device_manager.validate()
self.device_manager.connect()
context = LightContext(self.config, self.device_manager)
self.device_manager.initialize(context)
self.device = self.device_manager.target
if args.device:
self.device_name = args.device
else:
self.device_name = self.device.model
# Install Revent
host_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent'))
self.target_binary = self.device.install_if_needed(host_binary)
if args.workload:
self.workload_record(args, context)
elif args.package:
self.package_record(args, context)
else:
self.manual_record(args, context)
def manual_record(self, args, context):
revent_file = self.device.get_workpath('{}.{}revent'.format(self.device_name, args.suffix or ""))
self._record(revent_file, "", args.output)
def package_record(self, args, context):
revent_file = self.device.get_workpath('{}.{}revent'.format(self.device_name, args.suffix or ""))
if args.clear:
self.device.execute("pm clear {}".format(args.package))
self.logger.info("Starting {}".format(args.package))
self.device.execute('monkey -p {} -c android.intent.category.LAUNCHER 1'.format(args.package))
self._record(revent_file, "", args.output)
def workload_record(self, args, context):
setup_file = self.device.get_workpath('{}.setup.revent'.format(self.device_name))
run_file = self.device.get_workpath('{}.run.revent'.format(self.device_name))
self.logger.info("Deploying {}".format(args.workload))
workload = pluginloader.get_workload(args.workload, self.device)
workload.apk_init_resources(context)
workload.initialize_package(context)
workload.do_post_install(context)
workload.start_activity()
if args.clear:
workload.reset(context)
self._record(setup_file, " SETUP",
args.output or os.path.join(workload.dependencies_directory, 'revent_files'))
self._record(run_file, " RUN",
args.output or os.path.join(workload.dependencies_directory, 'revent_files'))
self.logger.info("Tearing down {}".format(args.workload))
workload.apk_teardown(context)
def _record(self, revent_file, name, output_path):
self.logger.info("Press Enter when you are ready to record{}...".format(name))
raw_input("")
command = "{} record -t 100000 -s {}".format(self.target_binary, revent_file)
self.device.kick_off(command)
self.logger.info("Press Enter when you have finished recording {}...".format(name))
raw_input("")
self.device.killall("revent")
output_path = output_path or os.getcwdu()
if not os.path.isdir(output_path):
os.mkdirs(output_path)
revent_file_name = self.device.path.basename(revent_file)
host_path = os.path.join(output_path, revent_file_name)
if os.path.exists(host_path):
self.logger.info("Revent file '{}' already exists, overwrite? [y/n]".format(revent_file_name))
if raw_input("") == "y":
os.remove(host_path)
else:
self.logger.warning("Did not pull and overwrite '{}'".format(revent_file_name))
return
self.logger.info("Pulling '{}' from device".format(self.device.path.basename(revent_file)))
self.device.pull(revent_file, output_path)
class ReplayCommand(RecordCommand):
name = 'replay'
description = '''Replay a revent recording
Revent allows you to record raw inputs such as screen swipes or button presses.
See ``wa show record`` to see how to make an revent recording.
'''
def initialize(self, context):
self.context = context
self.parser.add_argument('revent', help='The name of the file to replay')
self.parser.add_argument('-p', '--package', help='Package to launch before recording')
self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it',
action="store_true")
# pylint: disable=W0201
def run(self, args):
self.logger.info("Pushing file to device")
self.device.push(args.revent, self.device.working_directory)
revent_file = self.device.path.join(self.device.working_directory, os.path.split(args.revent)[1])
if args.clear:
self.device.execute("pm clear {}".format(args.package))
if args.package:
self.logger.info("Starting {}".format(args.package))
self.device.execute('monkey -p {} -c android.intent.category.LAUNCHER 1'.format(args.package))
command = "{} replay {}".format(self.target_binary, revent_file)
self.device.execute(command)
self.logger.info("Finished replay")
# Used to satisfy the API
class LightContext(object):
def __init__(self, config, device_manager):
self.resolver = ResourceResolver(config)
self.resolver.load()
self.device_manager = device_manager

View File

@ -1,123 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import shutil
import wlauto
from wlauto import Command, settings
from wlauto.core import pluginloader
from wlauto.core.configuration import RunConfiguration
from wlauto.core.configuration.parsers import AgendaParser, ConfigParser
from wlauto.core.execution import Executor
from wlauto.core.output import init_wa_output
from wlauto.core.version import get_wa_version
from wlauto.exceptions import NotFoundError, ConfigError
from wlauto.utils.log import add_log_file
from wlauto.utils.types import toggle_set
class RunCommand(Command):
name = 'run'
description = 'Execute automated workloads on a remote device and process the resulting output.'
def initialize(self, context):
self.parser.add_argument('agenda', metavar='AGENDA',
help="""
Agenda for this workload automation run. This defines which
workloads will be executed, how many times, with which
tunables, etc. See example agendas in {} for an example of
how this file should be structured.
""".format(os.path.dirname(wlauto.__file__)))
self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None,
help="""
Specify a directory where the output will be generated. If
the directory already exists, the script will abort unless -f
option (see below) is used, in which case the contents of the
directory will be overwritten. If this option is not specified,
then {} will be used instead.
""".format(settings.default_output_directory))
self.parser.add_argument('-f', '--force', action='store_true',
help="""
Overwrite output directory if it exists. By default, the script
will abort in this situation to prevent accidental data loss.
""")
self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID',
help="""
Specify a workload spec ID from an agenda to run. If this is
specified, only that particular spec will be run, and other
workloads in the agenda will be ignored. This option may be
used to specify multiple IDs.
""")
self.parser.add_argument('--disable', action='append', dest='instruments_to_disable',
default=[],
metavar='INSTRUMENT', help="""
Specify an instrument to disable from the command line. This
equivalent to adding "~{metavar}" to the instrumentation list in
the agenda. This can be used to temporarily disable a troublesome
instrument for a particular run without introducing permanent
change to the config (which one might then forget to revert).
This option may be specified multiple times.
""")
def execute(self, config, args):
output = self.set_up_output_directory(config, args)
add_log_file(output.logfile)
self.logger.debug('Version: {}'.format(get_wa_version()))
self.logger.debug('Command Line: {}'.format(' '.join(sys.argv)))
disabled_instruments = toggle_set(["~{}".format(i)
for i in args.instruments_to_disable])
config.jobs_config.disable_instruments(disabled_instruments)
config.jobs_config.only_run_ids(args.only_run_ids)
parser = AgendaParser()
if os.path.isfile(args.agenda):
parser.load_from_path(config, args.agenda)
shutil.copy(args.agenda, output.raw_config_dir)
else:
try:
pluginloader.get_plugin_class(args.agenda, kind='workload')
agenda = {'workloads': [{'name': args.agenda}]}
parser.load(config, agenda, 'CMDLINE_ARGS')
except NotFoundError:
msg = 'Agenda file "{}" does not exist, and there no workload '\
'with that name.\nYou can get a list of available '\
'by running "wa list workloads".'
raise ConfigError(msg.format(args.agenda))
executor = Executor()
executor.execute(config, output)
def set_up_output_directory(self, config, args):
if args.output_directory:
output_directory = args.output_directory
else:
output_directory = settings.default_output_directory
self.logger.debug('Using output directory: {}'.format(output_directory))
try:
return init_wa_output(output_directory, config, args.force)
except RuntimeError as e:
if 'path exists' in str(e):
msg = 'Output directory "{}" exists.\nPlease specify another '\
'location, or use -f option to overwrite.'
self.logger.critical(msg.format(output_directory))
sys.exit(1)
else:
raise e

View File

@ -1,114 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import subprocess
from cStringIO import StringIO
from wlauto import Command
from wlauto.core.configuration import settings
from wlauto.core import pluginloader
from wlauto.utils.doc import (get_summary, get_description, get_type_name, format_column, format_body,
format_paragraph, indent, strip_inlined_text)
from wlauto.utils.misc import get_pager
from wlauto.utils.terminalsize import get_terminal_size
class ShowCommand(Command):
name = 'show'
description = """
Display documentation for the specified plugin (workload, instrument, etc.).
"""
def initialize(self, context):
self.parser.add_argument('name', metavar='EXTENSION',
help='''The name of the plugin for which information will
be shown.''')
def execute(self, state, args):
# pylint: disable=unpacking-non-sequence
plugin = pluginloader.get_plugin_class(args.name)
out = StringIO()
term_width, term_height = get_terminal_size()
format_plugin(plugin, out, term_width)
text = out.getvalue()
pager = get_pager()
if len(text.split('\n')) > term_height and pager:
try:
sp = subprocess.Popen(pager, stdin=subprocess.PIPE)
sp.communicate(text)
except OSError:
self.logger.warning('Could not use PAGER "{}"'.format(pager))
sys.stdout.write(text)
else:
sys.stdout.write(text)
def format_plugin(plugin, out, width):
format_plugin_name(plugin, out)
out.write('\n')
format_plugin_summary(plugin, out, width)
out.write('\n')
if hasattr(plugin, 'supported_platforms'):
format_supported_platforms(plugin, out, width)
out.write('\n')
if plugin.parameters:
format_plugin_parameters(plugin, out, width)
out.write('\n')
format_plugin_description(plugin, out, width)
def format_plugin_name(plugin, out):
out.write('\n{}\n'.format(plugin.name))
def format_plugin_summary(plugin, out, width):
out.write('{}\n'.format(format_body(strip_inlined_text(get_summary(plugin)), width)))
def format_supported_platforms(plugin, out, width):
text = 'supported on: {}'.format(', '.join(plugin.supported_platforms))
out.write('{}\n'.format(format_body(text, width)))
def format_plugin_description(plugin, out, width):
# skip the initial paragraph of multi-paragraph description, as already
# listed above.
description = get_description(plugin).split('\n\n', 1)[-1]
out.write('{}\n'.format(format_body(strip_inlined_text(description), width)))
def format_plugin_parameters(plugin, out, width, shift=4):
out.write('parameters:\n\n')
param_texts = []
for param in plugin.parameters:
description = format_paragraph(strip_inlined_text(param.description or ''), width - shift)
param_text = '{}'.format(param.name)
if param.mandatory:
param_text += " (MANDATORY)"
param_text += '\n{}\n'.format(description)
param_text += indent('type: {}\n'.format(get_type_name(param.kind)))
if param.allowed_values:
param_text += indent('allowed values: {}\n'.format(', '.join(map(str, param.allowed_values))))
elif param.constraint:
param_text += indent('constraint: {}\n'.format(get_type_name(param.constraint)))
if param.default is not None:
param_text += indent('default: {}\n'.format(param.default))
param_texts.append(indent(param_text, shift))
out.write(format_column('\n'.join(param_texts), width))

View File

@ -1,25 +0,0 @@
package ${package_name};
import android.app.Activity;
import android.os.Bundle;
import android.util.Log;
import android.view.KeyEvent;
// Import the uiautomator libraries
import com.android.uiautomator.core.UiObject;
import com.android.uiautomator.core.UiObjectNotFoundException;
import com.android.uiautomator.core.UiScrollable;
import com.android.uiautomator.core.UiSelector;
import com.android.uiautomator.testrunner.UiAutomatorTestCase;
import com.arm.wlauto.uiauto.BaseUiAutomation;
public class UiAutomation extends BaseUiAutomation {
public static String TAG = "${name}";
public void runUiAutomation() throws Exception {
// UI Automation code goes here
}
}

View File

@ -1,27 +0,0 @@
from wlauto import AndroidBenchmark, Parameter
class ${class_name}(AndroidBenchmark):
name = '${name}'
# NOTE: Please do not leave these comments in the code.
#
# Replace with the package for the app in the APK file.
package = 'com.foo.bar'
# Replace with the full path to the activity to run.
activity = '.RunBuzz'
description = "This is an placeholder description"
parameters = [
# Workload parameters go here e.g.
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
description='This is an example parameter')
]
def run(self, context):
pass
def update_result(self, context):
super(${class_name}, self).update_result(context)
# process results and add them using
# context.result.add_metric

View File

@ -1,24 +0,0 @@
from wlauto import AndroidUiAutoBenchmark, Parameter
class ${class_name}(AndroidUiAutoBenchmark):
name = '${name}'
# NOTE: Please do not leave these comments in the code.
#
# Replace with the package for the app in the APK file.
package = 'com.foo.bar'
# Replace with the full path to the activity to run.
activity = '.RunBuzz'
description = "This is an placeholder description"
parameters = [
# Workload parameters go here e.g.
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
description='This is an example parameter')
]
def update_result(self, context):
super(${class_name}, self).update_result(context)
# process results and add them using
# context.result.add_metric

View File

@ -1,28 +0,0 @@
from wlauto import Workload, Parameter
class ${class_name}(Workload):
name = '${name}'
description = "This is an placeholder description"
parameters = [
# Workload parameters go here e.g.
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
description='This is an example parameter')
]
def setup(self, context):
pass
def run(self, context):
pass
def update_result(self, context):
pass
def teardown(self, context):
pass
def validate(self):
pass

View File

@ -1,102 +0,0 @@
import os
import sys
import warnings
from multiprocessing import Process
try:
from setuptools.command.install import install as orig_install
from setuptools import setup
except ImportError:
from distutils.command.install import install as orig_install
from distutils.core import setup
try:
import pwd
except ImportError:
pwd = None
warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
try:
os.remove('MANIFEST')
except OSError:
pass
packages = []
data_files = {}
source_dir = os.path.dirname(__file__)
for root, dirs, files in os.walk('$package_name'):
rel_dir = os.path.relpath(root, source_dir)
data = []
if '__init__.py' in files:
for f in files:
if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']:
data.append(f)
package_name = rel_dir.replace(os.sep, '.')
package_dir = root
packages.append(package_name)
data_files[package_name] = data
else:
# use previous package name
filepaths = [os.path.join(root, f) for f in files]
data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])
params = dict(
name='$package_name',
version='0.0.1',
packages=packages,
package_data=data_files,
url='N/A',
maintainer='$user',
maintainer_email='$user@example.com',
install_requires=[
'wlauto',
],
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: Other/Proprietary License',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
],
)
def update_wa_packages():
sudo_user = os.getenv('SUDO_USER')
if sudo_user:
user_entry = pwd.getpwnam(sudo_user)
os.setgid(user_entry.pw_gid)
os.setuid(user_entry.pw_uid)
env_root = os.getenv('WA_USER_DIRECTORY', os.path.join(os.path.expanduser('~'), '.workload_automation'))
if not os.path.isdir(env_root):
os.makedirs(env_root)
wa_packages_file = os.path.join(env_root, 'packages')
if os.path.isfile(wa_packages_file):
with open(wa_packages_file, 'r') as wfh:
package_list = wfh.read().split()
if params['name'] not in package_list:
package_list.append(params['name'])
else: # no existing package file
package_list = [params['name']]
with open(wa_packages_file, 'w') as wfh:
wfh.write('\n'.join(package_list))
class install(orig_install):
def run(self):
orig_install.run(self)
# Must be done in a separate process because will drop privileges if
# sudo, and won't be able to reacquire them.
p = Process(target=update_wa_packages)
p.start()
p.join()
params['cmdclass'] = {'install': install}
setup(**params)

View File

@ -1,35 +0,0 @@
from wlauto import UiAutomatorWorkload, Parameter
class ${class_name}(UiAutomatorWorkload):
name = '${name}'
description = "This is an placeholder description"
parameters = [
# Workload parameters go here e.g.
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
description='This is an example parameter')
]
def setup(self, context):
super(${class_name}, self).setup(context)
# Perform any necessary setup before starting the UI automation
# e.g. copy files to the device, start apps, reset logs, etc.
def update_result(self, context):
pass
# Process workload execution artifacts to extract metrics
# and add them to the run result using
# context.result.add_metric()
def teardown(self, context):
super(${class_name}, self).teardown(context)
# Preform any necessary cleanup
def validate(self):
pass
# Validate inter-parameter assumptions etc

View File

@ -1,16 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

View File

@ -1,16 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

View File

@ -1,40 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.common.resources import FileResource
class ReventFile(FileResource):
name = 'revent'
def __init__(self, owner, stage):
super(ReventFile, self).__init__(owner)
self.stage = stage
class JarFile(FileResource):
name = 'jar'
class ApkFile(FileResource):
name = 'apk'
def __init__(self, owner, version):
super(ApkFile, self).__init__(owner)
self.version = version

View File

@ -1,506 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
from wlauto.core.plugin import Parameter
from wlauto.core.workload import Workload
from wlauto.core.resource import NO_ONE
from wlauto.common.resources import PluginAsset, Executable
from wlauto.exceptions import WorkloadError, ResourceError, ConfigError
from wlauto.utils.android import ApkInfo, ANDROID_NORMAL_PERMISSIONS
from wlauto.utils.types import boolean
import wlauto.common.android.resources
DELAY = 5
class UiAutomatorWorkload(Workload):
"""
Base class for all workloads that rely on a UI Automator JAR file.
This class should be subclassed by workloads that rely on android UiAutomator
to work. This class handles transferring the UI Automator JAR file to the device
and invoking it to run the workload. By default, it will look for the JAR file in
the same directory as the .py file for the workload (this can be changed by overriding
the ``uiauto_file`` property in the subclassing workload).
To inintiate UI Automation, the fully-qualified name of the Java class and the
corresponding method name are needed. By default, the package part of the class name
is derived from the class file, and class and method names are ``UiAutomation``
and ``runUiAutomaton`` respectively. If you have generated the boilder plate for the
UiAutomatior code using ``create_workloads`` utility, then everything should be named
correctly. If you're creating the Java project manually, you need to make sure the names
match what is expected, or you could override ``uiauto_package``, ``uiauto_class`` and
``uiauto_method`` class attributes with the value that match your Java code.
You can also pass parameters to the JAR file. To do this add the parameters to
``self.uiauto_params`` dict inside your class's ``__init__`` or ``setup`` methods.
"""
supported_platforms = ['android']
uiauto_package = ''
uiauto_class = 'UiAutomation'
uiauto_method = 'runUiAutomation'
# Can be overidden by subclasses to adjust to run time of specific
# benchmarks.
run_timeout = 4 * 60 # seconds
def __init__(self, device, _call_super=True, **kwargs): # pylint: disable=W0613
if _call_super:
super(UiAutomatorWorkload, self).__init__(device, **kwargs)
self.uiauto_file = None
self.device_uiauto_file = None
self.command = None
self.uiauto_params = {}
def init_resources(self, context):
self.uiauto_file = context.resolver.get(wlauto.common.android.resources.JarFile(self))
if not self.uiauto_file:
raise ResourceError('No UI automation JAR file found for workload {}.'.format(self.name))
self.device_uiauto_file = self.device.path.join(self.device.working_directory,
os.path.basename(self.uiauto_file))
if not self.uiauto_package:
self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0]
def setup(self, context):
method_string = '{}.{}#{}'.format(self.uiauto_package, self.uiauto_class, self.uiauto_method)
params_dict = self.uiauto_params
params_dict['workdir'] = self.device.working_directory
params = ''
for k, v in self.uiauto_params.iteritems():
params += ' -e {} {}'.format(k, v)
self.command = 'uiautomator runtest {}{} -c {}'.format(self.device_uiauto_file, params, method_string)
self.device.push(self.uiauto_file, self.device_uiauto_file)
self.device.killall('uiautomator')
def run(self, context):
result = self.device.execute(self.command, self.run_timeout)
if 'FAILURE' in result:
raise WorkloadError(result)
else:
self.logger.debug(result)
time.sleep(DELAY)
def update_result(self, context):
pass
def teardown(self, context):
self.device.remove(self.device_uiauto_file)
def validate(self):
if not self.uiauto_file:
raise WorkloadError('No UI automation JAR file found for workload {}.'.format(self.name))
if not self.uiauto_package:
raise WorkloadError('No UI automation package specified for workload {}.'.format(self.name))
class ApkWorkload(Workload):
"""
A workload based on an APK file.
Defines the following attributes:
:package: The package name of the app. This is usually a Java-style name of the form
``com.companyname.appname``.
:activity: This is the initial activity of the app. This will be used to launch the
app during the setup.
:view: The class of the main view pane of the app. This needs to be defined in order
to collect SurfaceFlinger-derived statistics (such as FPS) for the app, but
may otherwise be left as ``None``.
:install_timeout: Timeout for the installation of the APK. This may vary wildly based on
the size and nature of a specific APK, and so should be defined on
per-workload basis.
.. note:: To a lesser extent, this will also vary based on the the
device and the nature of adb connection (USB vs Ethernet),
so, as with all timeouts, so leeway must be included in
the specified value.
.. note:: Both package and activity for a workload may be obtained from the APK using
the ``aapt`` tool that comes with the ADT (Android Developemnt Tools) bundle.
"""
package = None
activity = None
view = None
supported_platforms = ['android']
parameters = [
Parameter('install_timeout', kind=int, default=300,
description='Timeout for the installation of the apk.'),
Parameter('check_apk', kind=boolean, default=True,
description='''
Discover the APK for this workload on the host, and check that
the version matches the one on device (if already installed).
'''),
Parameter('force_install', kind=boolean, default=False,
description='''
Always re-install the APK, even if matching version is found
on already installed on the device.
'''),
Parameter('uninstall_apk', kind=boolean, default=False,
description='If ``True``, will uninstall workload\'s APK as part of teardown.'),
]
def __init__(self, device, _call_super=True, **kwargs):
if _call_super:
super(ApkWorkload, self).__init__(device, **kwargs)
self.apk_file = None
self.apk_version = None
self.logcat_log = None
def init_resources(self, context):
self.apk_file = context.resolver.get(wlauto.common.android.resources.ApkFile(self),
version=getattr(self, 'version', None),
strict=self.check_apk)
def validate(self):
if self.check_apk:
if not self.apk_file:
raise WorkloadError('No APK file found for workload {}.'.format(self.name))
else:
if self.force_install:
raise ConfigError('force_install cannot be "True" when check_apk is set to "False".')
def setup(self, context):
self.initialize_package(context)
self.start_activity()
self.device.execute('am kill-all') # kill all *background* activities
self.device.clear_logcat()
def initialize_package(self, context):
installed_version = self.device.get_package_version(self.package)
if self.check_apk:
self.initialize_with_host_apk(context, installed_version)
else:
if not installed_version:
message = '''{} not found found on the device and check_apk is set to "False"
so host version was not checked.'''
raise WorkloadError(message.format(self.package))
message = 'Version {} installed on device; skipping host APK check.'
self.logger.debug(message.format(installed_version))
self.reset(context)
self.apk_version = installed_version
def initialize_with_host_apk(self, context, installed_version):
host_version = ApkInfo(self.apk_file).version_name
if installed_version != host_version:
if installed_version:
message = '{} host version: {}, device version: {}; re-installing...'
self.logger.debug(message.format(os.path.basename(self.apk_file),
host_version, installed_version))
else:
message = '{} host version: {}, not found on device; installing...'
self.logger.debug(message.format(os.path.basename(self.apk_file),
host_version))
self.force_install = True # pylint: disable=attribute-defined-outside-init
else:
message = '{} version {} found on both device and host.'
self.logger.debug(message.format(os.path.basename(self.apk_file),
host_version))
if self.force_install:
if installed_version:
self.device.uninstall(self.package)
self.install_apk(context)
else:
self.reset(context)
self.apk_version = host_version
def start_activity(self):
output = self.device.execute('am start -W -n {}/{}'.format(self.package, self.activity))
if 'Error:' in output:
self.device.execute('am force-stop {}'.format(self.package)) # this will dismiss any erro dialogs
raise WorkloadError(output)
self.logger.debug(output)
def reset(self, context): # pylint: disable=W0613
self.device.execute('am force-stop {}'.format(self.package))
self.device.execute('pm clear {}'.format(self.package))
# As of android API level 23, apps can request permissions at runtime,
# this will grant all of them so requests do not pop up when running the app
if self.device.os_version['sdk'] >= 23:
self._grant_requested_permissions()
def install_apk(self, context):
output = self.device.install(self.apk_file, self.install_timeout)
if 'Failure' in output:
if 'ALREADY_EXISTS' in output:
self.logger.warn('Using already installed APK (did not unistall properly?)')
else:
raise WorkloadError(output)
else:
self.logger.debug(output)
self.do_post_install(context)
def _grant_requested_permissions(self):
dumpsys_output = self.device.execute(command="dumpsys package {}".format(self.package))
permissions = []
lines = iter(dumpsys_output.splitlines())
for line in lines:
if "requested permissions:" in line:
break
for line in lines:
if "android.permission." in line:
permissions.append(line.split(":")[0].strip())
else:
break
for permission in permissions:
# "Normal" Permisions are automatically granted and cannot be changed
permission_name = permission.rsplit('.', 1)[1]
if permission_name not in ANDROID_NORMAL_PERMISSIONS:
self.device.execute("pm grant {} {}".format(self.package, permission))
def do_post_install(self, context):
""" May be overwritten by dervied classes."""
pass
def run(self, context):
pass
def update_result(self, context):
self.logcat_log = os.path.join(context.output_directory, 'logcat.log')
context.device_manager.dump_logcat(self.logcat_log)
context.add_iteration_artifact(name='logcat',
path='logcat.log',
kind='log',
description='Logact dump for the run.')
def teardown(self, context):
self.device.execute('am force-stop {}'.format(self.package))
if self.uninstall_apk:
self.device.uninstall(self.package)
AndroidBenchmark = ApkWorkload # backward compatibility
class ReventWorkload(Workload):
default_setup_timeout = 5 * 60 # in seconds
default_run_timeout = 10 * 60 # in seconds
@property
def on_device_setup_revent(self):
return self.device.get_workpath('{}.setup.revent'.format(self.device.model))
@property
def on_device_run_revent(self):
return self.device.get_workpath('{}.run.revent'.format(self.device.model))
def __init__(self, device, _call_super=True, **kwargs):
if _call_super:
super(ReventWorkload, self).__init__(device, **kwargs)
self.on_device_revent_binary = None
self.setup_timeout = kwargs.get('setup_timeout', self.default_setup_timeout)
self.run_timeout = kwargs.get('run_timeout', self.default_run_timeout)
self.revent_setup_file = None
self.revent_run_file = None
def initialize(self, context):
self.revent_setup_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'setup'))
self.revent_run_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'run'))
def setup(self, context):
self._check_revent_files(context)
self.device.killall('revent')
command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_setup_revent)
self.device.execute(command, timeout=self.setup_timeout)
def run(self, context):
command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_run_revent)
self.logger.debug('Replaying {}'.format(os.path.basename(self.on_device_run_revent)))
self.device.execute(command, timeout=self.run_timeout)
self.logger.debug('Replay completed.')
def update_result(self, context):
pass
def teardown(self, context):
self.device.remove(self.on_device_setup_revent)
self.device.remove(self.on_device_run_revent)
def _check_revent_files(self, context):
# check the revent binary
revent_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent'))
if not os.path.isfile(revent_binary):
message = '{} does not exist. '.format(revent_binary)
message += 'Please build revent for your system and place it in that location'
raise WorkloadError(message)
if not self.revent_setup_file:
# pylint: disable=too-few-format-args
message = '{0}.setup.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name)
raise WorkloadError(message)
if not self.revent_run_file:
# pylint: disable=too-few-format-args
message = '{0}.run.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name)
raise WorkloadError(message)
self.on_device_revent_binary = self.device.install_executable(revent_binary)
self.device.push(self.revent_run_file, self.on_device_run_revent)
self.device.push(self.revent_setup_file, self.on_device_setup_revent)
class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark):
supported_platforms = ['android']
def __init__(self, device, **kwargs):
UiAutomatorWorkload.__init__(self, device, **kwargs)
AndroidBenchmark.__init__(self, device, _call_super=False, **kwargs)
def init_resources(self, context):
UiAutomatorWorkload.init_resources(self, context)
AndroidBenchmark.init_resources(self, context)
def setup(self, context):
UiAutomatorWorkload.setup(self, context)
AndroidBenchmark.setup(self, context)
def update_result(self, context):
UiAutomatorWorkload.update_result(self, context)
AndroidBenchmark.update_result(self, context)
def teardown(self, context):
UiAutomatorWorkload.teardown(self, context)
AndroidBenchmark.teardown(self, context)
class GameWorkload(ApkWorkload, ReventWorkload):
"""
GameWorkload is the base class for all the workload that use revent files to
run.
For more in depth details on how to record revent files, please see
:ref:`revent_files_creation`. To subclass this class, please refer to
:ref:`GameWorkload`.
Additionally, this class defines the following attributes:
:asset_file: A tarball containing additional assets for the workload. These are the assets
that are not part of the APK but would need to be downloaded by the workload
(usually, on first run of the app). Since the presence of a network connection
cannot be assumed on some devices, this provides an alternative means of obtaining
the assets.
:saved_state_file: A tarball containing the saved state for a workload. This tarball gets
deployed in the same way as the asset file. The only difference being that
it is usually much slower and re-deploying the tarball should alone be
enough to reset the workload to a known state (without having to reinstall
the app or re-deploy the other assets).
:loading_time: Time it takes for the workload to load after the initial activity has been
started.
"""
# May be optionally overwritten by subclasses
asset_file = None
saved_state_file = None
view = 'SurfaceView'
loading_time = 10
supported_platforms = ['android']
parameters = [
Parameter('install_timeout', default=500, override=True),
Parameter('assets_push_timeout', kind=int, default=500,
description='Timeout used during deployment of the assets package (if there is one).'),
Parameter('clear_data_on_reset', kind=bool, default=True,
description="""
If set to ``False``, this will prevent WA from clearing package
data for this workload prior to running it.
"""),
]
def __init__(self, device, **kwargs): # pylint: disable=W0613
ApkWorkload.__init__(self, device, **kwargs)
ReventWorkload.__init__(self, device, _call_super=False, **kwargs)
self.logcat_process = None
self.module_dir = os.path.dirname(sys.modules[self.__module__].__file__)
self.revent_dir = os.path.join(self.module_dir, 'revent_files')
def apk_init_resources(self, context):
ApkWorkload.init_resources(self, context)
def init_resources(self, context):
self.apk_init_resources(context)
ReventWorkload.init_resources(self, context)
def setup(self, context):
ApkWorkload.setup(self, context)
self.logger.debug('Waiting for the game to load...')
time.sleep(self.loading_time)
ReventWorkload.setup(self, context)
def do_post_install(self, context):
ApkWorkload.do_post_install(self, context)
self._deploy_assets(context, self.assets_push_timeout)
def reset(self, context):
# If saved state exists, restore it; if not, do full
# uninstall/install cycle.
self.device.execute('am force-stop {}'.format(self.package))
if self.saved_state_file:
self._deploy_resource_tarball(context, self.saved_state_file)
else:
if self.clear_data_on_reset:
self.device.execute('pm clear {}'.format(self.package))
self._deploy_assets(context)
def run(self, context):
ReventWorkload.run(self, context)
def apk_teardown(self, context):
if not self.saved_state_file:
ApkWorkload.teardown(self, context)
else:
self.device.execute('am force-stop {}'.format(self.package))
def teardown(self, context):
self.apk_teardown(context)
ReventWorkload.teardown(self, context)
def _deploy_assets(self, context, timeout=300):
if self.asset_file:
self._deploy_resource_tarball(context, self.asset_file, timeout)
if self.saved_state_file: # must be deployed *after* asset tarball!
self._deploy_resource_tarball(context, self.saved_state_file, timeout)
def _deploy_resource_tarball(self, context, resource_file, timeout=300):
kind = 'data'
if ':' in resource_file:
kind, resource_file = resource_file.split(':', 1)
ondevice_cache = self.device.path.join(self.device.working_directory, '.cache', self.name, resource_file)
if not self.device.file_exists(ondevice_cache):
asset_tarball = context.resolver.get(PluginAsset(self, resource_file))
if not asset_tarball:
message = 'Could not find resource {} for workload {}.'
raise WorkloadError(message.format(resource_file, self.name))
# adb push will create intermediate directories if they don't
# exist.
self.device.push(asset_tarball, ondevice_cache, timeout=timeout)
device_asset_directory = self.device.path.join(context.device_manager.external_storage_directory, 'Android', kind)
deploy_command = 'cd {} && {} tar -xzf {}'.format(device_asset_directory,
self.device.busybox,
ondevice_cache)
self.device.execute(deploy_command, timeout=timeout, as_root=True)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,6 +0,0 @@
The gem5 simulator can be obtained from http://repo.gem5.org/gem5/ and the
corresponding documentation can be found at http://www.gem5.org.
The source for the m5 binaries bundled with Workload Automation (found at
wlauto/common/bin/arm64/m5 and wlauto/common/bin/armeabi/m5) can be found at
util/m5 in the gem5 source at http://repo.gem5.org/gem5/.

View File

@ -1,16 +0,0 @@
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

View File

@ -1,64 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from wlauto.core.resource import Resource
class FileResource(Resource):
"""
Base class for all resources that are a regular file in the
file system.
"""
def delete(self, instance):
os.remove(instance)
class File(FileResource):
name = 'file'
def __init__(self, owner, path, url=None):
super(File, self).__init__(owner)
self.path = path
self.url = url
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url)
class PluginAsset(File):
name = 'plugin_asset'
def __init__(self, owner, path):
super(PluginAsset, self).__init__(owner, os.path.join(owner.name, path))
class Executable(FileResource):
name = 'executable'
def __init__(self, owner, platform, filename):
super(Executable, self).__init__(owner)
self.platform = platform
self.filename = filename
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename)

View File

@ -1,289 +0,0 @@
"""
Default config for Workload Automation. DO NOT MODIFY this file. This file
gets copied to ~/.workload_automation/config.py on initial run of run_workloads.
Add your configuration to that file instead.
"""
# *** WARNING: ***
# Configuration listed in this file is NOT COMPLETE. This file sets the default
# configuration for WA and gives EXAMPLES of other configuration available. It
# is not supposed to be an exhaustive list.
# PLEASE REFER TO WA DOCUMENTATION FOR THE COMPLETE LIST OF AVAILABLE
# EXTENSIONS AND THEIR configuration.
# This defines when the device will be rebooted during Workload Automation execution. #
# #
# Valid policies are: #
# never: The device will never be rebooted. #
# as_needed: The device will only be rebooted if the need arises (e.g. if it #
# becomes unresponsive #
# initial: The device will be rebooted when the execution first starts, just before executing #
# the first workload spec. #
# each_spec: The device will be rebooted before running a new workload spec. #
# each_iteration: The device will be rebooted before each new iteration. #
# #
reboot_policy = 'as_needed'
# Defines the order in which the agenda spec will be executed. At the moment, #
# the following execution orders are supported: #
# #
# by_iteration: The first iteration of each workload spec is executed one ofter the other, #
# so all workloads are executed before proceeding on to the second iteration. #
# This is the default if no order is explicitly specified. #
# If multiple sections were specified, this will also split them up, so that specs #
# in the same section are further apart in the execution order. #
# by_section: Same as "by_iteration", but runn specs from the same section one after the other #
# by_spec: All iterations of the first spec are executed before moving on to the next #
# spec. This may also be specified as ``"classic"``, as this was the way #
# workloads were executed in earlier versions of WA. #
# random: Randomisizes the order in which specs run. #
execution_order = 'by_iteration'
# This indicates when a job will be re-run.
# Possible values:
# OK: This iteration has completed and no errors have been detected
# PARTIAL: One or more instruments have failed (the iteration may still be running).
# FAILED: The workload itself has failed.
# ABORTED: The user interupted the workload
#
# If set to an empty list, a job will not be re-run ever.
retry_on_status = ['FAILED', 'PARTIAL']
# How many times a job will be re-run before giving up
max_retries = 3
####################################################################################################
######################################### Device Settings ##########################################
####################################################################################################
# Specify the device you want to run workload automation on. This must be a #
# string with the ID of the device. At the moment, only 'TC2' is supported. #
# #
device = 'generic_android'
# Configuration options that will be passed onto the device. These are obviously device-specific, #
# so check the documentation for the particular device to find out which options and values are #
# valid. The settings listed below are common to all devices #
# #
device_config = dict(
# The name used by adb to identify the device. Use "adb devices" in bash to list
# the devices currently seen by adb.
#adb_name='10.109.173.2:5555',
# The directory on the device that WA will use to push files to
#working_directory='/sdcard/wa-working',
# This specifies the device's CPU cores. The order must match how they
# appear in cpufreq. The example below is for TC2.
# core_names = ['a7', 'a7', 'a7', 'a15', 'a15']
# Specifies cluster mapping for the device's cores.
# core_clusters = [0, 0, 0, 1, 1]
)
####################################################################################################
################################### Instrumention Configuration ####################################
####################################################################################################
# This defines the additionnal instrumentation that will be enabled during workload execution, #
# which in turn determines what additional data (such as /proc/interrupts content or Streamline #
# traces) will be available in the results directory. #
# #
instrumentation = [
# Records the time it took to run the workload
'execution_time',
# Collects /proc/interrupts before and after execution and does a diff.
'interrupts',
# Collects the contents of/sys/devices/system/cpu before and after execution and does a diff.
'cpufreq',
# Gets energy usage from the workload form HWMON devices
# NOTE: the hardware needs to have the right sensors in order for this to work
#'hwmon',
# Run perf in the background during workload execution and then collect the results. perf is a
# standard Linux performance analysis tool.
#'perf',
# Collect Streamline traces during workload execution. Streamline is part of DS-5
#'streamline',
# Collects traces by interacting with Ftrace Linux kernel internal tracer
#'trace-cmd',
# Obtains the power consumption of the target device's core measured by National Instruments
# Data Acquisition(DAQ) device.
#'daq',
# Collects CCI counter data.
#'cci_pmu_logger',
# Collects FPS (Frames Per Second) and related metrics (such as jank) from
# the View of the workload (Note: only a single View per workload is
# supported at the moment, so this is mainly useful for games).
#'fps',
]
####################################################################################################
################################# Result Processors Configuration ##################################
####################################################################################################
# Specifies how results will be processed and presented. #
# #
result_processors = [
# Creates a status.txt that provides a summary status for the run
'status',
# Creates a results.txt file for each iteration that lists all collected metrics
# in "name = value (units)" format
'standard',
# Creates a results.csv that contains metrics for all iterations of all workloads
# in the .csv format.
'csv',
# Creates a summary.csv that contains summary metrics for all iterations of all
# all in the .csv format. Summary metrics are defined on per-worklod basis
# are typically things like overall scores. The contents of summary.csv are
# always a subset of the contents of results.csv (if it is generated).
#'summary_csv',
# Creates a results.csv that contains metrics for all iterations of all workloads
# in the JSON format
#'json',
# Write results to an sqlite3 database. By default, a new database will be
# generated for each run, however it is possible to specify a path to an
# existing DB file (see result processor configuration below), in which
# case results from multiple runs may be stored in the one file.
#'sqlite',
]
####################################################################################################
################################### Logging output Configuration ###################################
####################################################################################################
# Specify the format of logging messages. The format uses the old formatting syntax: #
# #
# http://docs.python.org/2/library/stdtypes.html#string-formatting-operations #
# #
# The attributes that can be used in formats are listested here: #
# #
# http://docs.python.org/2/library/logging.html#logrecord-attributes #
# #
logging = {
# Log file format
'file format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
# Verbose console output format
'verbose format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
# Regular console output format
'regular format': '%(levelname)-8s %(message)s',
# Colouring the console output
'colour_enabled': True,
}
####################################################################################################
#################################### Instruments Configuration #####################################
####################################################################################################
# Instrumention Configuration is related to specific insturment's settings. Some of the #
# instrumentations require specific settings in order for them to work. These settings are #
# specified here. #
# Note that these settings only take effect if the corresponding instrument is
# enabled above.
####################################################################################################
######################################## perf configuration ########################################
# The hardware events such as instructions executed, cache-misses suffered, or branches
# mispredicted to be reported by perf. Events can be obtained from the device by tpying
# 'perf list'.
#perf_events = ['migrations', 'cs']
# The perf options which can be obtained from man page for perf-record
#perf_options = '-a -i'
####################################################################################################
####################################### hwmon configuration ########################################
# The kinds of sensors hwmon instrument will look for
#hwmon_sensors = ['energy', 'temp']
####################################################################################################
###################################### trace-cmd configuration #####################################
# trace-cmd events to be traced. The events can be found by rooting on the device then type
# 'trace-cmd list -e'
#trace_events = ['power*']
####################################################################################################
######################################### DAQ configuration ########################################
# The host address of the machine that runs the daq Server which the insturment communicates with
#daq_server_host = '10.1.17.56'
# The port number for daq Server in which daq insturment communicates with
#daq_server_port = 56788
# The values of resistors 1 and 2 (in Ohms) across which the voltages are measured
#daq_resistor_values = [0.002, 0.002]
####################################################################################################
################################### cci_pmu_logger configuration ###################################
# The events to be counted by PMU
# NOTE: The number of events must not exceed the number of counters available (which is 4 for CCI-400)
#cci_pmu_events = ['0x63', '0x83']
# The name of the events which will be used when reporting PMU counts
#cci_pmu_event_labels = ['event_0x63', 'event_0x83']
# The period (in jiffies) between counter reads
#cci_pmu_period = 15
####################################################################################################
################################### fps configuration ##############################################
# Data points below this FPS will dropped as not constituting "real" gameplay. The assumption
# being that while actually running, the FPS in the game will not drop below X frames per second,
# except on loading screens, menus, etc, which should not contribute to FPS calculation.
#fps_drop_threshold=5
# If set to True, this will keep the raw dumpsys output in the results directory (this is maily
# used for debugging). Note: frames.csv with collected frames data will always be generated
# regardless of this setting.
#fps_keep_raw=False
####################################################################################################
################################# Result Processor Configuration ###################################
####################################################################################################
# Specifies an alternative database to store results in. If the file does not
# exist, it will be created (the directiory of the file must exist however). If
# the file does exist, the results will be added to the existing data set (each
# run as a UUID, so results won't clash even if identical agendas were used).
# Note that in order for this to work, the version of the schema used to generate
# the DB file must match that of the schema used for the current run. Please
# see "What's new" secition in WA docs to check if the schema has changed in
# recent releases of WA.
#sqlite_database = '/work/results/myresults.sqlite'
# If the file specified by sqlite_database exists, setting this to True will
# cause that file to be overwritten rather than updated -- existing results in
# the file will be lost.
#sqlite_overwrite = False
# distribution: internal
####################################################################################################
#################################### Resource Getter configuration #################################
####################################################################################################
# The location on your system where /arm/scratch is mounted. Used by
# Scratch resource getter.
#scratch_mount_point = '/arm/scratch'
# end distribution

View File

@ -1,16 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

View File

@ -1,81 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import textwrap
from wlauto.core.plugin import Plugin
from wlauto.utils.doc import format_body
from wlauto.core.version import get_wa_version
def init_argument_parser(parser):
parser.add_argument('-c', '--config', action='append', default=[],
help='specify an additional config.py')
parser.add_argument('-v', '--verbose', action='count',
help='The scripts will produce verbose output.')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(get_wa_version()))
return parser
class Command(Plugin):
"""
Defines a Workload Automation command. This will be executed from the
command line as ``wa <command> [args ...]``. This defines the name to be
used when invoking wa, the code that will actually be executed on
invocation and the argument parser to be used to parse the reset of the
command line arguments.
"""
kind = "command"
help = None
usage = None
description = None
epilog = None
formatter_class = None
def __init__(self, subparsers):
super(Command, self).__init__()
self.group = subparsers
parser_params = dict(help=(self.help or self.description), usage=self.usage,
description=format_body(textwrap.dedent(self.description), 80),
epilog=self.epilog)
if self.formatter_class:
parser_params['formatter_class'] = self.formatter_class
self.parser = subparsers.add_parser(self.name, **parser_params)
init_argument_parser(self.parser) # propagate top-level options
self.initialize(None)
def initialize(self, context):
"""
Perform command-specific initialisation (e.g. adding command-specific
options to the command's parser). ``context`` is always ``None``.
"""
pass
def execute(self, state, args):
"""
Execute this command.
:state: An initialized ``ConfigManager`` that contains the current state of
WA exeuction up to that point (processed configuraition, loaded
plugins, etc).
:args: An ``argparse.Namespace`` containing command line arguments (as returned by
``argparse.ArgumentParser.parse_args()``. This would usually be the result of
invoking ``self.parser``.
"""
raise NotImplementedError()

View File

@ -1,19 +0,0 @@
# Copyright 2013-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.core.configuration.configuration import (settings,
RunConfiguration,
JobGenerator,
ConfigurationPoint)
from wlauto.core.configuration.plugin_cache import PluginCache

File diff suppressed because it is too large Load Diff

View File

@ -1,42 +0,0 @@
from wlauto.core.configuration.configuration import MetaConfiguration, RunConfiguration
from wlauto.core.configuration.plugin_cache import PluginCache
from wlauto.utils.serializer import yaml
from wlauto.utils.doc import strip_inlined_text
DEFAULT_INSTRUMENTS = ['execution_time',
'interrupts',
'cpufreq',
'status',
'standard',
'csv']
def _format_yaml_comment(param, short_description=False):
comment = param.description
comment = strip_inlined_text(comment)
if short_description:
comment = comment.split('\n\n')[0]
comment = comment.replace('\n', '\n# ')
comment = "# {}\n".format(comment)
return comment
def _format_instruments(output):
plugin_cache = PluginCache()
output.write("instrumentation:\n")
for plugin in DEFAULT_INSTRUMENTS:
plugin_cls = plugin_cache.loader.get_plugin_class(plugin)
output.writelines(_format_yaml_comment(plugin_cls, short_description=True))
output.write(" - {}\n".format(plugin))
output.write("\n")
def generate_default_config(path):
with open(path, 'w') as output:
for param in MetaConfiguration.config_points + RunConfiguration.config_points:
entry = {param.name: param.default}
comment = _format_yaml_comment(param)
output.writelines(comment)
yaml.dump(entry, output, default_flow_style=False)
output.write("\n")
_format_instruments(output)

View File

@ -1,213 +0,0 @@
import random
from itertools import izip_longest, groupby, chain
from wlauto.core import pluginloader
from wlauto.core.configuration.configuration import (MetaConfiguration,
RunConfiguration,
JobGenerator, settings)
from wlauto.core.configuration.parsers import ConfigParser
from wlauto.core.configuration.plugin_cache import PluginCache
class CombinedConfig(object):
@staticmethod
def from_pod(pod):
instance = CombinedConfig()
instance.settings = MetaConfiguration.from_pod(pod.get('settings', {}))
instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {}))
return instance
def __init__(self, settings=None, run_config=None):
self.settings = settings
self.run_config = run_config
def to_pod(self):
return {'settings': self.settings.to_pod(),
'run_config': self.run_config.to_pod()}
class Job(object):
def __init__(self, spec, iteration, context):
self.spec = spec
self.iteration = iteration
self.context = context
self.status = 'new'
self.workload = None
self.output = None
def load(self, target, loader=pluginloader):
self.workload = loader.get_workload(self.spec.workload_name,
target,
**self.spec.workload_parameters)
self.workload.init_resources(self.context)
self.workload.validate()
class ConfigManager(object):
"""
Represents run-time state of WA. Mostly used as a container for loaded
configuration and discovered plugins.
This exists outside of any command or run and is associated with the running
instance of wA itself.
"""
@property
def enabled_instruments(self):
return self.jobs_config.enabled_instruments
@property
def job_specs(self):
if not self._jobs_generated:
msg = 'Attempting to access job specs before '\
'jobs have been generated'
raise RuntimeError(msg)
return [j.spec for j in self._jobs]
@property
def jobs(self):
if not self._jobs_generated:
msg = 'Attempting to access jobs before '\
'they have been generated'
raise RuntimeError(msg)
return self._jobs
def __init__(self, settings=settings):
self.settings = settings
self.run_config = RunConfiguration()
self.plugin_cache = PluginCache()
self.jobs_config = JobGenerator(self.plugin_cache)
self.loaded_config_sources = []
self._config_parser = ConfigParser()
self._jobs = []
self._jobs_generated = False
self.agenda = None
def load_config_file(self, filepath):
self._config_parser.load_from_path(self, filepath)
self.loaded_config_sources.append(filepath)
def load_config(self, values, source, wrap_exceptions=True):
self._config_parser.load(self, values, source)
self.loaded_config_sources.append(source)
def get_plugin(self, name=None, kind=None, *args, **kwargs):
return self.plugin_cache.get_plugin(name, kind, *args, **kwargs)
def get_instruments(self, target):
instruments = []
for name in self.enabled_instruments:
instruments.append(self.get_plugin(name, kind='instrument',
target=target))
return instruments
def finalize(self):
if not self.agenda:
msg = 'Attempting to finalize config before agenda has been set'
raise RuntimeError(msg)
self.run_config.merge_device_config(self.plugin_cache)
return CombinedConfig(self.settings, self.run_config)
def generate_jobs(self, context):
job_specs = self.jobs_config.generate_job_specs(context.tm)
exec_order = self.run_config.execution_order
for spec, i in permute_iterations(job_specs, exec_order):
job = Job(spec, i, context)
job.load(context.tm.target)
self._jobs.append(job)
self._jobs_generated = True
def permute_by_job(specs):
"""
This is that "classic" implementation that executes all iterations of a
workload spec before proceeding onto the next spec.
"""
for spec in specs:
for i in range(1, spec.iterations + 1):
yield (spec, i)
def permute_by_iteration(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all
sections for the first global spec first, followed by all sections for the
second spec, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
"""
groups = [list(g) for k, g in groupby(specs, lambda s: s.workload_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in xrange(spec.iterations)])
for t in chain(*map(list, izip_longest(*all_tuples))):
if t is not None:
yield t
def permute_by_section(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all specs
for the first section followed by all specs for the seciod section, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
"""
groups = [list(g) for k, g in groupby(specs, lambda s: s.section_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in xrange(spec.iterations)])
for t in chain(*map(list, izip_longest(*all_tuples))):
if t is not None:
yield t
def permute_randomly(specs):
"""
This will generate a random permutation of specs/iteration tuples.
"""
result = []
for spec in specs:
for i in xrange(1, spec.iterations + 1):
result.append((spec, i))
random.shuffle(result)
for t in result:
yield t
permute_map = {
'by_iteration': permute_by_iteration,
'by_job': permute_by_job,
'by_section': permute_by_section,
'random': permute_randomly,
}
def permute_iterations(specs, exec_order):
if exec_order not in permute_map:
msg = 'Unknown execution order "{}"; must be in: {}'
raise ValueError(msg.format(exec_order, permute_map.keys()))
return permute_map[exec_order](specs)

View File

@ -1,308 +0,0 @@
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from wlauto.exceptions import ConfigError
from wlauto.utils.serializer import read_pod, SerializerSyntaxError
from wlauto.utils.types import toggle_set, counter
from wlauto.core.configuration.configuration import JobSpec
###############
### Parsers ###
###############
class ConfigParser(object):
def load_from_path(self, state, filepath):
self.load(state, _load_file(filepath, "Config"), filepath)
def load(self, state, raw, source, wrap_exceptions=True): # pylint: disable=too-many-branches
try:
if 'run_name' in raw:
msg = '"run_name" can only be specified in the config '\
'section of an agenda'
raise ConfigError(msg)
if 'id' in raw:
raise ConfigError('"id" cannot be set globally')
merge_result_processors_instruments(raw)
# Get WA core configuration
for cfg_point in state.settings.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.settings.set(cfg_point.name, value)
# Get run specific configuration
for cfg_point in state.run_config.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.run_config.set(cfg_point.name, value)
# Get global job spec configuration
for cfg_point in JobSpec.configuration.itervalues():
value = get_aliased_param(cfg_point, raw)
if value is not None:
state.jobs_config.set_global_value(cfg_point.name, value)
for name, values in raw.iteritems():
# Assume that all leftover config is for a plug-in or a global
# alias it is up to PluginCache to assert this assumption
state.plugin_cache.add_configs(name, values, source)
except ConfigError as e:
if wrap_exceptions:
raise ConfigError('Error in "{}":\n{}'.format(source, str(e)))
else:
raise e
class AgendaParser(object):
def load_from_path(self, state, filepath):
raw = _load_file(filepath, 'Agenda')
self.load(state, raw, filepath)
def load(self, state, raw, source):
try:
if not isinstance(raw, dict):
raise ConfigError('Invalid agenda, top level entry must be a dict')
self._populate_and_validate_config(state, raw, source)
sections = self._pop_sections(raw)
global_workloads = self._pop_workloads(raw)
if raw:
msg = 'Invalid top level agenda entry(ies): "{}"'
raise ConfigError(msg.format('", "'.join(raw.keys())))
sect_ids, wkl_ids = self._collect_ids(sections, global_workloads)
self._process_global_workloads(state, global_workloads, wkl_ids)
self._process_sections(state, sections, sect_ids, wkl_ids)
state.agenda = source
except (ConfigError, SerializerSyntaxError) as e:
raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
def _populate_and_validate_config(self, state, raw, source):
for name in ['config', 'global']:
entry = raw.pop(name, None)
if entry is None:
continue
if not isinstance(entry, dict):
msg = 'Invalid entry "{}" - must be a dict'
raise ConfigError(msg.format(name))
if 'run_name' in entry:
state.run_config.set('run_name', entry.pop('run_name'))
state.load_config(entry, source, wrap_exceptions=False)
def _pop_sections(self, raw):
sections = raw.pop("sections", [])
if not isinstance(sections, list):
raise ConfigError('Invalid entry "sections" - must be a list')
return sections
def _pop_workloads(self, raw):
workloads = raw.pop("workloads", [])
if not isinstance(workloads, list):
raise ConfigError('Invalid entry "workloads" - must be a list')
return workloads
def _collect_ids(self, sections, global_workloads):
seen_section_ids = set()
seen_workload_ids = set()
for workload in global_workloads:
workload = _get_workload_entry(workload)
_collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
for section in sections:
_collect_valid_id(section.get("id"), seen_section_ids, "section")
for workload in section["workloads"] if "workloads" in section else []:
workload = _get_workload_entry(workload)
_collect_valid_id(workload.get("id"), seen_workload_ids,
"workload")
return seen_section_ids, seen_workload_ids
def _process_global_workloads(self, state, global_workloads, seen_wkl_ids):
for workload_entry in global_workloads:
workload = _process_workload_entry(workload_entry, seen_wkl_ids,
state.jobs_config)
state.jobs_config.add_workload(workload)
def _process_sections(self, state, sections, seen_sect_ids, seen_wkl_ids):
for section in sections:
workloads = []
for workload_entry in section.pop("workloads", []):
workload = _process_workload_entry(workload_entry, seen_workload_ids,
state.jobs_config)
workloads.append(workload)
section = _construct_valid_entry(section, seen_sect_ids,
"s", state.jobs_config)
state.jobs_config.add_section(section, workloads)
########################
### Helper functions ###
########################
def get_aliased_param(cfg_point, d, default=None, pop=True):
"""
Given a ConfigurationPoint and a dict, this function will search the dict for
the ConfigurationPoint's name/aliases. If more than one is found it will raise
a ConfigError. If one (and only one) is found then it will return the value
for the ConfigurationPoint. If the name or aliases are present in the dict it will
return the "default" parameter of this function.
"""
aliases = [cfg_point.name] + cfg_point.aliases
alias_map = [a for a in aliases if a in d]
if len(alias_map) > 1:
raise ConfigError(DUPLICATE_ENTRY_ERROR.format(aliases))
elif alias_map:
if pop:
return d.pop(alias_map[0])
else:
return d[alias_map[0]]
else:
return default
def _load_file(filepath, error_name):
if not os.path.isfile(filepath):
raise ValueError("{} does not exist".format(filepath))
try:
raw = read_pod(filepath)
except SerializerSyntaxError as e:
raise ConfigError('Error parsing {} {}: {}'.format(error_name, filepath, e))
if not isinstance(raw, dict):
message = '{} does not contain a valid {} structure; top level must be a dict.'
raise ConfigError(message.format(filepath, error_name))
return raw
def merge_result_processors_instruments(raw):
instr_config = JobSpec.configuration['instrumentation']
instruments = toggle_set(get_aliased_param(instr_config, raw, default=[]))
result_processors = toggle_set(raw.pop('result_processors', []))
if instruments and result_processors:
conflicts = instruments.conflicts_with(result_processors)
if conflicts:
msg = '"instrumentation" and "result_processors" have '\
'conflicting entries: {}'
entires = ', '.join('"{}"'.format(c.strip("~")) for c in conflicts)
raise ConfigError(msg.format(entires))
raw['instrumentation'] = instruments.merge_with(result_processors)
def _pop_aliased(d, names, entry_id):
name_count = sum(1 for n in names if n in d)
if name_count > 1:
names_list = ', '.join(names)
msg = 'Inivalid workload entry "{}": at moust one of ({}}) must be specified.'
raise ConfigError(msg.format(workload_entry['id'], names_list))
for name in names:
if name in d:
return d.pop(name)
return None
def _construct_valid_entry(raw, seen_ids, prefix, jobs_config):
workload_entry = {}
# Generate an automatic ID if the entry doesn't already have one
if 'id' not in raw:
while True:
new_id = '{}{}'.format(prefix, counter(name=prefix))
if new_id not in seen_ids:
break
workload_entry['id'] = new_id
seen_ids.add(new_id)
else:
workload_entry['id'] = raw.pop('id')
# Process instrumentation
merge_result_processors_instruments(raw)
# Validate all workload_entry
for name, cfg_point in JobSpec.configuration.iteritems():
value = get_aliased_param(cfg_point, raw)
if value is not None:
value = cfg_point.kind(value)
cfg_point.validate_value(name, value)
workload_entry[name] = value
wk_id = workload_entry['id']
param_names = ['workload_params', 'workload_parameters']
if prefix == 'wk':
param_names += ['params', 'parameters']
workload_entry["workload_parameters"] = _pop_aliased(raw, param_names, wk_id)
param_names = ['runtime_parameters', 'runtime_params']
if prefix == 's':
param_names += ['params', 'parameters']
workload_entry["runtime_parameters"] = _pop_aliased(raw, param_names, wk_id)
param_names = ['boot_parameters', 'boot_params']
workload_entry["boot_parameters"] = _pop_aliased(raw, param_names, wk_id)
if "instrumentation" in workload_entry:
jobs_config.update_enabled_instruments(workload_entry["instrumentation"])
# error if there are unknown workload_entry
if raw:
msg = 'Invalid entry(ies) in "{}": "{}"'
raise ConfigError(msg.format(workload_entry['id'], ', '.join(raw.keys())))
return workload_entry
def _collect_valid_id(entry_id, seen_ids, entry_type):
if entry_id is None:
return
if entry_id in seen_ids:
raise ConfigError('Duplicate {} ID "{}".'.format(entry_type, entry_id))
# "-" is reserved for joining section and workload IDs
if "-" in entry_id:
msg = 'Invalid {} ID "{}"; IDs cannot contain a "-"'
raise ConfigError(msg.format(entry_type, entry_id))
if entry_id == "global":
msg = 'Invalid {} ID "global"; is a reserved ID'
raise ConfigError(msg.format(entry_type))
seen_ids.add(entry_id)
def _get_workload_entry(workload):
if isinstance(workload, basestring):
workload = {'name': workload}
elif not isinstance(workload, dict):
raise ConfigError('Invalid workload entry: "{}"')
return workload
def _process_workload_entry(workload, seen_workload_ids, jobs_config):
workload = _get_workload_entry(workload)
workload = _construct_valid_entry(workload, seen_workload_ids,
"wk", jobs_config)
return workload

View File

@ -1,210 +0,0 @@
# Copyright 2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from collections import defaultdict
from wlauto.core import pluginloader
from wlauto.exceptions import ConfigError
from wlauto.utils.types import obj_dict
from devlib.utils.misc import memoized
GENERIC_CONFIGS = ["device_config", "workload_parameters",
"boot_parameters", "runtime_parameters"]
class PluginCache(object):
"""
The plugin cache is used to store configuration that cannot be processed at
this stage, whether thats because it is unknown if its needed
(in the case of disabled plug-ins) or it is not know what it belongs to (in
the case of "device-config" ect.). It also maintains where configuration came
from, and the priority order of said sources.
"""
def __init__(self, loader=pluginloader):
self.loader = loader
self.sources = []
self.plugin_configs = defaultdict(lambda: defaultdict(dict))
self.global_alias_values = defaultdict(dict)
# Generate a mapping of what global aliases belong to
self._global_alias_map = defaultdict(dict)
self._list_of_global_aliases = set()
for plugin in self.loader.list_plugins():
for param in plugin.parameters:
if param.global_alias:
self._global_alias_map[plugin.name][param.global_alias] = param
self._list_of_global_aliases.add(param.global_alias)
def add_source(self, source):
if source in self.sources:
raise Exception("Source has already been added.")
self.sources.append(source)
def add_global_alias(self, alias, value, source):
if source not in self.sources:
msg = "Source '{}' has not been added to the plugin cache."
raise RuntimeError(msg.format(source))
if not self.is_global_alias(alias):
msg = "'{} is not a valid global alias'"
raise RuntimeError(msg.format(alias))
self.global_alias_values[alias][source] = value
def add_configs(self, plugin_name, values, source):
if self.is_global_alias(plugin_name):
self.add_global_alias(plugin_name, values, source)
return
for name, value in values.iteritems():
self.add_config(plugin_name, name, value, source)
def add_config(self, plugin_name, name, value, source):
if source not in self.sources:
msg = "Source '{}' has not been added to the plugin cache."
raise RuntimeError(msg.format(source))
if (not self.loader.has_plugin(plugin_name) and
plugin_name not in GENERIC_CONFIGS):
msg = 'configuration provided for unknown plugin "{}"'
raise ConfigError(msg.format(plugin_name))
if (plugin_name not in GENERIC_CONFIGS and
name not in self.get_plugin_parameters(plugin_name)):
msg = "'{}' is not a valid parameter for '{}'"
raise ConfigError(msg.format(name, plugin_name))
self.plugin_configs[plugin_name][source][name] = value
def is_global_alias(self, name):
return name in self._list_of_global_aliases
def get_plugin_config(self, plugin_name, generic_name=None):
config = obj_dict(not_in_dict=['name'])
config.name = plugin_name
# Load plugin defaults
cfg_points = self.get_plugin_parameters(plugin_name)
for cfg_point in cfg_points.itervalues():
cfg_point.set_value(config, check_mandatory=False)
# Merge global aliases
for alias, param in self._global_alias_map[plugin_name].iteritems():
if alias in self.global_alias_values:
for source in self.sources:
if source not in self.global_alias_values[alias]:
continue
val = self.global_alias_values[alias][source]
param.set_value(config, value=val)
# Merge user config
# Perform a simple merge with the order of sources representing priority
if generic_name is None:
plugin_config = self.plugin_configs[plugin_name]
for source in self.sources:
if source not in plugin_config:
continue
for name, value in plugin_config[source].iteritems():
cfg_points[name].set_value(config, value=value)
# A more complicated merge that involves priority of sources and specificity
else:
self._merge_using_priority_specificity(plugin_name, generic_name, config)
return config
def get_plugin(self, name, kind=None, *args, **kwargs):
config = self.get_plugin_config(name)
kwargs = dict(config.items() + kwargs.items())
return self.loader.get_plugin(name, kind=kind, *args, **kwargs)
@memoized
def get_plugin_parameters(self, name):
params = self.loader.get_plugin_class(name).parameters
return {param.name: param for param in params}
# pylint: disable=too-many-nested-blocks, too-many-branches
def _merge_using_priority_specificity(self, specific_name,
generic_name, final_config):
"""
WA configuration can come from various sources of increasing priority,
as well as being specified in a generic and specific manner (e.g.
``device_config`` and ``nexus10`` respectivly). WA has two rules for
the priority of configuration:
- Configuration from higher priority sources overrides
configuration from lower priority sources.
- More specific configuration overrides less specific configuration.
There is a situation where these two rules come into conflict. When a
generic configuration is given in config source of high priority and a
specific configuration is given in a config source of lower priority.
In this situation it is not possible to know the end users intention
and WA will error.
:param generic_name: The name of the generic configuration
e.g ``device_config``
:param specific_name: The name of the specific configuration used
e.g ``nexus10``
:param cfg_point: A dict of ``ConfigurationPoint``s to be used when
merging configuration. keys=config point name,
values=config point
:rtype: A fully merged and validated configuration in the form of a
obj_dict.
"""
generic_config = copy(self.plugin_configs[generic_name])
specific_config = copy(self.plugin_configs[specific_name])
cfg_points = self.get_plugin_parameters(specific_name)
sources = self.sources
seen_specific_config = defaultdict(list)
# set_value uses the 'name' attribute of the passed object in it error
# messages, to ensure these messages make sense the name will have to be
# changed several times during this function.
final_config.name = specific_name
# pylint: disable=too-many-nested-blocks
for source in sources:
try:
if source in generic_config:
final_config.name = generic_name
for name, cfg_point in cfg_points.iteritems():
if name in generic_config[source]:
if name in seen_specific_config:
msg = ('"{generic_name}" configuration "{config_name}" has already been '
'specified more specifically for {specific_name} in:\n\t\t{sources}')
msg = msg.format(generic_name=generic_name,
config_name=name,
specific_name=specific_name,
sources=", ".join(seen_specific_config[name]))
raise ConfigError(msg)
value = generic_config[source][name]
cfg_point.set_value(final_config, value, check_mandatory=False)
if source in specific_config:
final_config.name = specific_name
for name, cfg_point in cfg_points.iteritems():
if name in specific_config[source]:
seen_specific_config[name].append(str(source))
value = specific_config[source][name]
cfg_point.set_value(final_config, value, check_mandatory=False)
except ConfigError as e:
raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
# Validate final configuration
final_config.name = specific_name
for cfg_point in cfg_points.itervalues():
cfg_point.validate(final_config)

View File

@ -1,89 +0,0 @@
# Copyright 2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class JobSpecSource(object):
kind = ""
def __init__(self, config, parent=None):
self.config = config
self.parent = parent
@property
def id(self):
return self.config['id']
def name(self):
raise NotImplementedError()
class WorkloadEntry(JobSpecSource):
kind = "workload"
@property
def name(self):
if self.parent.id == "global":
return 'workload "{}"'.format(self.id)
else:
return 'workload "{}" from section "{}"'.format(self.id, self.parent.id)
class SectionNode(JobSpecSource):
kind = "section"
@property
def name(self):
if self.id == "global":
return "globally specified configuration"
else:
return 'section "{}"'.format(self.id)
@property
def is_leaf(self):
return not bool(self.children)
def __init__(self, config, parent=None):
super(SectionNode, self).__init__(config, parent=parent)
self.workload_entries = []
self.children = []
def add_section(self, section):
new_node = SectionNode(section, parent=self)
self.children.append(new_node)
return new_node
def add_workload(self, workload_config):
self.workload_entries.append(WorkloadEntry(workload_config, self))
def descendants(self):
for child in self.children:
for n in child.descendants():
yield n
yield child
def ancestors(self):
if self.parent is not None:
yield self.parent
for ancestor in self.parent.ancestors():
yield ancestor
def leaves(self):
if self.is_leaf:
yield self
else:
for n in self.descendants():
if n.is_leaf:
yield n

View File

@ -1,198 +0,0 @@
import string
from copy import copy
from wlauto.core.plugin import Plugin, Parameter
from wlauto.core.configuration.configuration import RuntimeParameter
from wlauto.exceptions import ConfigError
from wlauto.utils.types import list_of_integers, list_of, caseless_string
from devlib.platform import Platform
from devlib.target import AndroidTarget, Cpuinfo, KernelVersion, KernelConfig
__all__ = ['RuntimeParameter', 'CoreParameter', 'DeviceManager', 'TargetInfo']
UNKOWN_RTP = 'Unknown runtime parameter "{}"'
class TargetInfo(object):
@staticmethod
def from_pod(pod):
instance = TargetInfo()
instance.target = pod['target']
instance.abi = pod['abi']
instance.cpuinfo = Cpuinfo(pod['cpuinfo'])
instance.os = pod['os']
instance.os_version = pod['os_version']
instance.abi = pod['abi']
instance.is_rooted = pod['is_rooted']
instance.kernel_version = KernelVersion(pod['kernel_release'],
pod['kernel_version'])
instance.kernel_config = KernelConfig(pod['kernel_config'])
if pod["target"] == "AndroidTarget":
instance.screen_resolution = pod['screen_resolution']
instance.prop = pod['prop']
instance.prop = pod['android_id']
return instance
def __init__(self, target=None):
if target:
self.target = target.__class__.__name__
self.cpuinfo = target.cpuinfo
self.os = target.os
self.os_version = target.os_version
self.abi = target.abi
self.is_rooted = target.is_rooted
self.kernel_version = target.kernel_version
self.kernel_config = target.config
if isinstance(target, AndroidTarget):
self.screen_resolution = target.screen_resolution
self.prop = target.getprop()
self.android_id = target.android_id
else:
self.target = None
self.cpuinfo = None
self.os = None
self.os_version = None
self.abi = None
self.is_rooted = None
self.kernel_version = None
self.kernel_config = None
if isinstance(target, AndroidTarget):
self.screen_resolution = None
self.prop = None
self.android_id = None
def to_pod(self):
pod = {}
pod['target'] = self.target
pod['abi'] = self.abi
pod['cpuinfo'] = self.cpuinfo.sections
pod['os'] = self.os
pod['os_version'] = self.os_version
pod['abi'] = self.abi
pod['is_rooted'] = self.is_rooted
pod['kernel_release'] = self.kernel_version.release
pod['kernel_version'] = self.kernel_version.version
pod['kernel_config'] = dict(self.kernel_config.iteritems())
if self.target == "AndroidTarget":
pod['screen_resolution'] = self.screen_resolution
pod['prop'] = self.prop
pod['android_id'] = self.android_id
return pod
class DeviceManager(Plugin):
kind = "manager"
name = None
target_type = None
platform_type = Platform
has_gpu = None
path_module = None
info = None
parameters = [
Parameter('core_names', kind=list_of(caseless_string),
description="""
This is a list of all cpu cores on the device with each
element being the core type, e.g. ``['a7', 'a7', 'a15']``. The
order of the cores must match the order they are listed in
``'/sys/devices/system/cpu'``. So in this case, ``'cpu0'`` must
be an A7 core, and ``'cpu2'`` an A15.'
"""),
Parameter('core_clusters', kind=list_of_integers,
description="""
This is a list indicating the cluster affinity of the CPU cores,
each element correponding to the cluster ID of the core coresponding
to its index. E.g. ``[0, 0, 1]`` indicates that cpu0 and cpu1 are on
cluster 0, while cpu2 is on cluster 1. If this is not specified, this
will be inferred from ``core_names`` if possible (assuming all cores with
the same name are on the same cluster).
"""),
Parameter('working_directory',
description='''
Working directory to be used by WA. This must be in a location where the specified user
has write permissions. This will default to /home/<username>/wa (or to /root/wa, if
username is 'root').
'''),
Parameter('binaries_directory',
description='Location of executable binaries on this device (must be in PATH).'),
]
modules = []
runtime_parameter_managers = [
]
def __init__(self):
super(DeviceManager, self).__init__()
self.runtime_parameter_values = None
# Framework
def connect(self):
raise NotImplementedError("connect method must be implemented for device managers")
def initialize(self, context):
super(DeviceManager, self).initialize(context)
self.info = TargetInfo(self.target)
self.target.setup()
def start(self):
pass
def stop(self):
pass
def validate(self):
pass
# Runtime Parameters
def merge_runtime_parameters(self, params):
merged_values = {}
for source, values in params.iteritems():
for name, value in values:
for rtpm in self.runtime_parameter_managers:
if rtpm.match(name):
rtpm.update_value(name, value, source, merged_values)
break
else:
msg = 'Unknown runtime parameter "{}" in "{}"'
raise ConfigError(msg.format(name, source))
return merged_values
def static_runtime_parameter_validation(self, params):
params = copy(params)
for rtpm in self.runtime_parameters_managers:
rtpm.static_validation(params)
if params:
msg = 'Unknown runtime_parameters for "{}": "{}"'
raise ConfigError(msg.format(self.name, '", "'.join(params.iterkeys())))
def dynamic_runtime_parameter_validation(self, params):
for rtpm in self.runtime_parameters_managers:
rtpm.dynamic_validation(params)
def commit_runtime_parameters(self, params):
params = copy(params)
for rtpm in self.runtime_parameters_managers:
rtpm.commit(params)
#Runtime parameter getters/setters
def get_sysfile_values(self):
return self._written_sysfiles
def set_sysfile_values(self, params):
for sysfile, value in params.iteritems():
verify = not sysfile.endswith('!')
sysfile = sysfile.rstrip('!')
self._written_sysfiles.append((sysfile, value))
self.target.write_value(sysfile, value, verify=verify)

View File

@ -1,108 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import argparse
import logging
import os
import subprocess
import warnings
from wlauto.core import pluginloader
from wlauto.core.command import init_argument_parser
from wlauto.core.configuration import settings
from wlauto.core.configuration.manager import ConfigManager
from wlauto.core.host import init_user_directory
from wlauto.exceptions import WAError, DevlibError, ConfigError
from wlauto.utils.doc import format_body
from wlauto.utils.log import init_logging
from wlauto.utils.misc import get_traceback
warnings.filterwarnings(action='ignore', category=UserWarning, module='zope')
logger = logging.getLogger('command_line')
def load_commands(subparsers):
commands = {}
for command in pluginloader.list_commands():
commands[command.name] = pluginloader.get_command(command.name,
subparsers=subparsers)
return commands
def main():
config = ConfigManager()
if not os.path.exists(settings.user_directory):
init_user_directory()
try:
description = ("Execute automated workloads on a remote device and process "
"the resulting output.\n\nUse \"wa <subcommand> -h\" to see "
"help for individual subcommands.")
parser = argparse.ArgumentParser(description=format_body(description, 80),
prog='wa',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
init_argument_parser(parser)
# each command will add its own subparser
commands = load_commands(parser.add_subparsers(dest='command'))
args = parser.parse_args()
settings.set("verbosity", args.verbose)
config.load_config_file(settings.user_config_file)
for config_file in args.config:
if not os.path.exists(config_file):
raise ConfigError("Config file {} not found".format(config_file))
config.load_config_file(config_file)
init_logging(settings.verbosity)
command = commands[args.command]
sys.exit(command.execute(config, args))
except KeyboardInterrupt:
logging.info('Got CTRL-C. Aborting.')
sys.exit(3)
except (WAError, DevlibError) as e:
logging.critical(e)
sys.exit(1)
except subprocess.CalledProcessError as e:
tb = get_traceback()
logging.critical(tb)
command = e.cmd
if e.args:
command = '{} {}'.format(command, ' '.join(e.args))
message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n'
logging.critical(message.format(command, e.returncode, e.output))
sys.exit(2)
except SyntaxError as e:
tb = get_traceback()
logging.critical(tb)
message = 'Syntax Error in {}, line {}, offset {}:'
logging.critical(message.format(e.filename, e.lineno, e.offset))
logging.critical('\t{}'.format(e.msg))
sys.exit(2)
except Exception as e: # pylint: disable=broad-except
tb = get_traceback()
logging.critical(tb)
logging.critical('{}({})'.format(e.__class__.__name__, e))
sys.exit(2)

View File

@ -1,875 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=no-member
"""
This module contains the execution logic for Workload Automation. It defines the
following actors:
WorkloadSpec: Identifies the workload to be run and defines parameters under
which it should be executed.
Executor: Responsible for the overall execution process. It instantiates
and/or intialises the other actors, does any necessary vaidation
and kicks off the whole process.
Execution Context: Provides information about the current state of run
execution to instrumentation.
RunInfo: Information about the current run.
Runner: This executes workload specs that are passed to it. It goes through
stages of execution, emitting an appropriate signal at each step to
allow instrumentation to do its stuff.
"""
import logging
import os
import random
import subprocess
import uuid
from collections import Counter, defaultdict, OrderedDict
from contextlib import contextmanager
from copy import copy
from datetime import datetime
from itertools import izip_longest
import wlauto.core.signal as signal
from wlauto.core import instrumentation
from wlauto.core import pluginloader
from wlauto.core.configuration import settings
from wlauto.core.device_manager import TargetInfo
from wlauto.core.plugin import Artifact
from wlauto.core.resolver import ResourceResolver
from wlauto.core.result import ResultManager, IterationResult, RunResult
from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError,
DeviceError, DeviceNotRespondingError)
from wlauto.utils.misc import (ensure_directory_exists as _d,
get_traceback, format_duration)
from wlauto.utils.serializer import json
# The maximum number of reboot attempts for an iteration.
MAX_REBOOT_ATTEMPTS = 3
# If something went wrong during device initialization, wait this
# long (in seconds) before retrying. This is necessary, as retrying
# immediately may not give the device enough time to recover to be able
# to reboot.
REBOOT_DELAY = 3
class ExecutionContext(object):
def __init__(self, cm, tm, output):
self.logger = logging.getLogger('ExecContext')
self.cm = cm
self.tm = tm
self.output = output
self.logger.debug('Loading resource discoverers')
self.resolver = ResourceResolver(cm)
self.resolver.load()
class OldExecutionContext(object):
"""
Provides a context for instrumentation. Keeps track of things like
current workload and iteration.
This class also provides two status members that can be used by workloads
and instrumentation to keep track of arbitrary state. ``result``
is reset on each new iteration of a workload; run_status is maintained
throughout a Workload Automation run.
"""
# These are the artifacts generated by the core framework.
default_run_artifacts = [
Artifact('runlog', 'run.log', 'log', mandatory=True,
description='The log for the entire run.'),
]
@property
def current_iteration(self):
if self.current_job:
spec_id = self.current_job.spec.id
return self.job_iteration_counts[spec_id]
else:
return None
@property
def job_status(self):
if not self.current_job:
return None
return self.current_job.result.status
@property
def workload(self):
return getattr(self.spec, 'workload', None)
@property
def spec(self):
return getattr(self.current_job, 'spec', None)
@property
def result(self):
return getattr(self.current_job, 'result', self.run_result)
def __init__(self, device_manager, config):
self.device_manager = device_manager
self.device = self.device_manager.target
self.config = config
self.reboot_policy = config.reboot_policy
self.output_directory = None
self.current_job = None
self.resolver = None
self.last_error = None
self.run_info = None
self.run_result = None
self.run_output_directory = self.config.output_directory
self.host_working_directory = self.config.meta_directory
self.iteration_artifacts = None
self.run_artifacts = copy(self.default_run_artifacts)
self.job_iteration_counts = defaultdict(int)
self.aborted = False
self.runner = None
if config.agenda.filepath:
self.run_artifacts.append(Artifact('agenda',
os.path.join(self.host_working_directory,
os.path.basename(config.agenda.filepath)),
'meta',
mandatory=True,
description='Agenda for this run.'))
for i, filepath in enumerate(settings.config_paths, 1):
name = 'config_{}'.format(i)
path = os.path.join(self.host_working_directory,
name + os.path.splitext(filepath)[1])
self.run_artifacts.append(Artifact(name,
path,
kind='meta',
mandatory=True,
description='Config file used for the run.'))
def initialize(self):
if not os.path.isdir(self.run_output_directory):
os.makedirs(self.run_output_directory)
self.output_directory = self.run_output_directory
self.resolver = ResourceResolver(self.config)
self.run_info = RunInfo(self.config)
self.run_result = RunResult(self.run_info, self.run_output_directory)
def next_job(self, job):
"""Invoked by the runner when starting a new iteration of workload execution."""
self.current_job = job
self.job_iteration_counts[self.spec.id] += 1
if not self.aborted:
outdir_name = '_'.join(map(str, [self.spec.label, self.spec.id, self.current_iteration]))
self.output_directory = _d(os.path.join(self.run_output_directory, outdir_name))
self.iteration_artifacts = [wa for wa in self.workload.artifacts]
self.current_job.result.iteration = self.current_iteration
self.current_job.result.output_directory = self.output_directory
def end_job(self):
if self.current_job.result.status == IterationResult.ABORTED:
self.aborted = True
self.current_job = None
self.output_directory = self.run_output_directory
def add_metric(self, *args, **kwargs):
self.result.add_metric(*args, **kwargs)
def add_artifact(self, name, path, kind, *args, **kwargs):
if self.current_job is None:
self.add_run_artifact(name, path, kind, *args, **kwargs)
else:
self.add_iteration_artifact(name, path, kind, *args, **kwargs)
def add_run_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.run_output_directory)
self.run_artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
def add_iteration_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.iteration_artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
def get_artifact(self, name):
if self.iteration_artifacts:
for art in self.iteration_artifacts:
if art.name == name:
return art
for art in self.run_artifacts:
if art.name == name:
return art
return None
def _check_artifact_path(path, rootpath):
if path.startswith(rootpath):
return os.path.abspath(path)
rootpath = os.path.abspath(rootpath)
full_path = os.path.join(rootpath, path)
if not os.path.isfile(full_path):
raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
return full_path
class FakeTargetManager(object):
# TODO: this is a FAKE
def __init__(self, name, config):
self.device_name = name
self.device_config = config
from devlib import LocalLinuxTarget
self.target = LocalLinuxTarget({'unrooted': True})
def get_target_info(self):
return TargetInfo(self.target)
def validate_runtime_parameters(self, params):
pass
def merge_runtime_parameters(self, params):
pass
def init_target_manager(config):
return FakeTargetManager(config.device, config.device_config)
class Executor(object):
"""
The ``Executor``'s job is to set up the execution context and pass to a
``Runner`` along with a loaded run specification. Once the ``Runner`` has
done its thing, the ``Executor`` performs some final reporint before
returning.
The initial context set up involves combining configuration from various
sources, loading of requided workloads, loading and installation of
instruments and result processors, etc. Static validation of the combined
configuration is also performed.
"""
# pylint: disable=R0915
def __init__(self):
self.logger = logging.getLogger('Executor')
self.error_logged = False
self.warning_logged = False
pluginloader = None
self.device_manager = None
self.device = None
self.context = None
def execute(self, config_manager, output):
"""
Execute the run specified by an agenda. Optionally, selectors may be
used to only selecute a subset of the specified agenda.
Params::
:state: a ``ConfigManager`` containing processed configuraiton
:output: an initialized ``RunOutput`` that will be used to
store the results.
"""
signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED)
signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED)
self.logger.info('Initializing run')
self.logger.debug('Finalizing run configuration.')
config = config_manager.finalize()
output.write_config(config)
self.logger.info('Connecting to target')
target_manager = init_target_manager(config.run_config)
output.write_target_info(target_manager.get_target_info())
self.logger.info('Initializing execution conetext')
context = ExecutionContext(config_manager, target_manager, output)
self.logger.info('Generating jobs')
config_manager.generate_jobs(context)
output.write_job_specs(config_manager.job_specs)
self.logger.info('Installing instrumentation')
for instrument in config_manager.get_instruments(target_manager.target):
instrumentation.install(instrument)
instrumentation.validate()
def old_exec(self, agenda, selectors={}):
self.config.set_agenda(agenda, selectors)
self.config.finalize()
config_outfile = os.path.join(self.config.meta_directory, 'run_config.json')
with open(config_outfile, 'w') as wfh:
json.dump(self.config, wfh)
self.logger.debug('Initialising device configuration.')
if not self.config.device:
raise ConfigError('Make sure a device is specified in the config.')
self.device_manager = pluginloader.get_manager(self.config.device,
**self.config.device_config)
self.device_manager.validate()
self.device = self.device_manager.target
self.context = ExecutionContext(self.device_manager, self.config)
self.logger.debug('Loading resource discoverers.')
self.context.initialize()
self.context.resolver.load()
self.context.add_artifact('run_config', config_outfile, 'meta')
self.logger.debug('Installing instrumentation')
for name, params in self.config.instrumentation.iteritems():
instrument = pluginloader.get_instrument(name, self.device, **params)
instrumentation.install(instrument)
instrumentation.validate()
self.logger.debug('Installing result processors')
result_manager = ResultManager()
for name, params in self.config.result_processors.iteritems():
processor = pluginloader.get_result_processor(name, **params)
result_manager.install(processor)
result_manager.validate()
self.logger.debug('Loading workload specs')
for workload_spec in self.config.workload_specs:
workload_spec.load(self.device, pluginloader)
workload_spec.workload.init_resources(self.context)
workload_spec.workload.validate()
if self.config.flashing_config:
if not self.device.flasher:
msg = 'flashing_config specified for {} device that does not support flashing.'
raise ConfigError(msg.format(self.device.name))
self.logger.debug('Flashing the device')
self.device.flasher.flash(self.device)
self.logger.info('Running workloads')
runner = self._get_runner(result_manager)
runner.init_queue(self.config.workload_specs)
runner.run()
self.execute_postamble()
def execute_postamble(self):
"""
This happens after the run has completed. The overall results of the run are
summarised to the user.
"""
result = self.context.run_result
counter = Counter()
for ir in result.iteration_results:
counter[ir.status] += 1
self.logger.info('Done.')
self.logger.info('Run duration: {}'.format(format_duration(self.context.run_info.duration)))
status_summary = 'Ran a total of {} iterations: '.format(sum(self.context.job_iteration_counts.values()))
parts = []
for status in IterationResult.values:
if status in counter:
parts.append('{} {}'.format(counter[status], status))
self.logger.info(status_summary + ', '.join(parts))
self.logger.info('Results can be found in {}'.format(self.config.output_directory))
if self.error_logged:
self.logger.warn('There were errors during execution.')
self.logger.warn('Please see {}'.format(self.config.log_file))
elif self.warning_logged:
self.logger.warn('There were warnings during execution.')
self.logger.warn('Please see {}'.format(self.config.log_file))
def _get_runner(self, result_manager):
if not self.config.execution_order or self.config.execution_order == 'by_iteration':
if self.config.reboot_policy == 'each_spec':
self.logger.info('each_spec reboot policy with the default by_iteration execution order is '
'equivalent to each_iteration policy.')
runnercls = ByIterationRunner
elif self.config.execution_order in ['classic', 'by_spec']:
runnercls = BySpecRunner
elif self.config.execution_order == 'by_section':
runnercls = BySectionRunner
elif self.config.execution_order == 'random':
runnercls = RandomRunner
else:
raise ConfigError('Unexpected execution order: {}'.format(self.config.execution_order))
return runnercls(self.device_manager, self.context, result_manager)
def _error_signalled_callback(self):
self.error_logged = True
signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED)
def _warning_signalled_callback(self):
self.warning_logged = True
signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED)
class Runner(object):
"""
"""
class RunnerJob(object):
"""
Represents a single execution of a ``RunnerJobDescription``. There will be one created for each iteration
specified by ``RunnerJobDescription.number_of_iterations``.
"""
def __init__(self, spec, retry=0):
self.spec = spec
self.retry = retry
self.iteration = None
self.result = IterationResult(self.spec)
class OldRunner(object):
"""
This class is responsible for actually performing a workload automation
run. The main responsibility of this class is to emit appropriate signals
at the various stages of the run to allow things like traces an other
instrumentation to hook into the process.
This is an abstract base class that defines each step of the run, but not
the order in which those steps are executed, which is left to the concrete
derived classes.
"""
class _RunnerError(Exception):
"""Internal runner error."""
pass
@property
def config(self):
return self.context.config
@property
def current_job(self):
if self.job_queue:
return self.job_queue[0]
return None
@property
def previous_job(self):
if self.completed_jobs:
return self.completed_jobs[-1]
return None
@property
def next_job(self):
if self.job_queue:
if len(self.job_queue) > 1:
return self.job_queue[1]
return None
@property
def spec_changed(self):
if self.previous_job is None and self.current_job is not None: # Start of run
return True
if self.previous_job is not None and self.current_job is None: # End of run
return True
return self.current_job.spec.id != self.previous_job.spec.id
@property
def spec_will_change(self):
if self.current_job is None and self.next_job is not None: # Start of run
return True
if self.current_job is not None and self.next_job is None: # End of run
return True
return self.current_job.spec.id != self.next_job.spec.id
def __init__(self, device_manager, context, result_manager):
self.device_manager = device_manager
self.device = device_manager.target
self.context = context
self.result_manager = result_manager
self.logger = logging.getLogger('Runner')
self.job_queue = []
self.completed_jobs = []
self._initial_reset = True
def init_queue(self, specs):
raise NotImplementedError()
def run(self): # pylint: disable=too-many-branches
self._send(signal.RUN_START)
self._initialize_run()
try:
while self.job_queue:
try:
self._init_job()
self._run_job()
except KeyboardInterrupt:
self.current_job.result.status = IterationResult.ABORTED
raise
except Exception, e: # pylint: disable=broad-except
self.current_job.result.status = IterationResult.FAILED
self.current_job.result.add_event(e.message)
if isinstance(e, DeviceNotRespondingError):
self.logger.info('Device appears to be unresponsive.')
if self.context.reboot_policy.can_reboot and self.device.can('reset_power'):
self.logger.info('Attempting to hard-reset the device...')
try:
self.device.boot(hard=True)
self.device.connect()
except DeviceError: # hard_boot not implemented for the device.
raise e
else:
raise e
else: # not a DeviceNotRespondingError
self.logger.error(e)
finally:
self._finalize_job()
except KeyboardInterrupt:
self.logger.info('Got CTRL-C. Finalizing run... (CTRL-C again to abort).')
# Skip through the remaining jobs.
while self.job_queue:
self.context.next_job(self.current_job)
self.current_job.result.status = IterationResult.ABORTED
self._finalize_job()
except DeviceNotRespondingError:
self.logger.info('Device unresponsive and recovery not possible. Skipping the rest of the run.')
self.context.aborted = True
while self.job_queue:
self.context.next_job(self.current_job)
self.current_job.result.status = IterationResult.SKIPPED
self._finalize_job()
instrumentation.enable_all()
self._finalize_run()
self._process_results()
self.result_manager.finalize(self.context)
self._send(signal.RUN_END)
def _initialize_run(self):
self.context.runner = self
self.context.run_info.start_time = datetime.utcnow()
self._connect_to_device()
self.logger.info('Initializing device')
self.device_manager.initialize(self.context)
self.logger.info('Initializing workloads')
for workload_spec in self.context.config.workload_specs:
workload_spec.workload.initialize(self.context)
self.context.run_info.device_properties = self.device_manager.info
self.result_manager.initialize(self.context)
self._send(signal.RUN_INIT)
if instrumentation.check_failures():
raise InstrumentError('Detected failure(s) during instrumentation initialization.')
def _connect_to_device(self):
if self.context.reboot_policy.perform_initial_boot:
try:
self.device_manager.connect()
except DeviceError: # device may be offline
if self.device.can('reset_power'):
with self._signal_wrap('INITIAL_BOOT'):
self.device.boot(hard=True)
else:
raise DeviceError('Cannot connect to device for initial reboot; '
'and device does not support hard reset.')
else: # successfully connected
self.logger.info('\tBooting device')
with self._signal_wrap('INITIAL_BOOT'):
self._reboot_device()
else:
self.logger.info('Connecting to device')
self.device_manager.connect()
def _init_job(self):
self.current_job.result.status = IterationResult.RUNNING
self.context.next_job(self.current_job)
def _run_job(self): # pylint: disable=too-many-branches
spec = self.current_job.spec
if not spec.enabled:
self.logger.info('Skipping workload %s (iteration %s)', spec, self.context.current_iteration)
self.current_job.result.status = IterationResult.SKIPPED
return
self.logger.info('Running workload %s (iteration %s)', spec, self.context.current_iteration)
if spec.flash:
if not self.context.reboot_policy.can_reboot:
raise ConfigError('Cannot flash as reboot_policy does not permit rebooting.')
if not self.device.can('flash'):
raise DeviceError('Device does not support flashing.')
self._flash_device(spec.flash)
elif not self.completed_jobs:
# Never reboot on the very fist job of a run, as we would have done
# the initial reboot if a reboot was needed.
pass
elif self.context.reboot_policy.reboot_on_each_spec and self.spec_changed:
self.logger.debug('Rebooting on spec change.')
self._reboot_device()
elif self.context.reboot_policy.reboot_on_each_iteration:
self.logger.debug('Rebooting on iteration.')
self._reboot_device()
instrumentation.disable_all()
instrumentation.enable(spec.instrumentation)
self.device_manager.start()
if self.spec_changed:
self._send(signal.WORKLOAD_SPEC_START)
self._send(signal.ITERATION_START)
try:
setup_ok = False
with self._handle_errors('Setting up device parameters'):
self.device_manager.set_runtime_parameters(spec.runtime_parameters)
setup_ok = True
if setup_ok:
with self._handle_errors('running {}'.format(spec.workload.name)):
self.current_job.result.status = IterationResult.RUNNING
self._run_workload_iteration(spec.workload)
else:
self.logger.info('\tSkipping the rest of the iterations for this spec.')
spec.enabled = False
except KeyboardInterrupt:
self._send(signal.ITERATION_END)
self._send(signal.WORKLOAD_SPEC_END)
raise
else:
self._send(signal.ITERATION_END)
if self.spec_will_change or not spec.enabled:
self._send(signal.WORKLOAD_SPEC_END)
finally:
self.device_manager.stop()
def _finalize_job(self):
self.context.run_result.iteration_results.append(self.current_job.result)
job = self.job_queue.pop(0)
job.iteration = self.context.current_iteration
if job.result.status in self.config.retry_on_status:
if job.retry >= self.config.max_retries:
self.logger.error('Exceeded maxium number of retries. Abandoning job.')
else:
self.logger.info('Job status was {}. Retrying...'.format(job.result.status))
retry_job = RunnerJob(job.spec, job.retry + 1)
self.job_queue.insert(0, retry_job)
self.completed_jobs.append(job)
self.context.end_job()
def _finalize_run(self):
self.logger.info('Finalizing workloads')
for workload_spec in self.context.config.workload_specs:
workload_spec.workload.finalize(self.context)
self.logger.info('Finalizing.')
self._send(signal.RUN_FIN)
with self._handle_errors('Disconnecting from the device'):
self.device.disconnect()
info = self.context.run_info
info.end_time = datetime.utcnow()
info.duration = info.end_time - info.start_time
def _process_results(self):
self.logger.info('Processing overall results')
with self._signal_wrap('OVERALL_RESULTS_PROCESSING'):
if instrumentation.check_failures():
self.context.run_result.non_iteration_errors = True
self.result_manager.process_run_result(self.context.run_result, self.context)
def _run_workload_iteration(self, workload):
self.logger.info('\tSetting up')
with self._signal_wrap('WORKLOAD_SETUP'):
try:
workload.setup(self.context)
except:
self.logger.info('\tSkipping the rest of the iterations for this spec.')
self.current_job.spec.enabled = False
raise
try:
self.logger.info('\tExecuting')
with self._handle_errors('Running workload'):
with self._signal_wrap('WORKLOAD_EXECUTION'):
workload.run(self.context)
self.logger.info('\tProcessing result')
self._send(signal.BEFORE_WORKLOAD_RESULT_UPDATE)
try:
if self.current_job.result.status != IterationResult.FAILED:
with self._handle_errors('Processing workload result',
on_error_status=IterationResult.PARTIAL):
workload.update_result(self.context)
self._send(signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE)
if self.current_job.result.status == IterationResult.RUNNING:
self.current_job.result.status = IterationResult.OK
finally:
self._send(signal.AFTER_WORKLOAD_RESULT_UPDATE)
finally:
self.logger.info('\tTearing down')
with self._handle_errors('Tearing down workload',
on_error_status=IterationResult.NONCRITICAL):
with self._signal_wrap('WORKLOAD_TEARDOWN'):
workload.teardown(self.context)
self.result_manager.add_result(self.current_job.result, self.context)
def _flash_device(self, flashing_params):
with self._signal_wrap('FLASHING'):
self.device.flash(**flashing_params)
self.device.connect()
def _reboot_device(self):
with self._signal_wrap('BOOT'):
for reboot_attempts in xrange(MAX_REBOOT_ATTEMPTS):
if reboot_attempts:
self.logger.info('\tRetrying...')
with self._handle_errors('Rebooting device'):
self.device.boot(**self.current_job.spec.boot_parameters)
break
else:
raise DeviceError('Could not reboot device; max reboot attempts exceeded.')
self.device.connect()
def _send(self, s):
signal.send(s, self, self.context)
def _take_screenshot(self, filename):
if self.context.output_directory:
filepath = os.path.join(self.context.output_directory, filename)
else:
filepath = os.path.join(settings.output_directory, filename)
self.device.capture_screen(filepath)
@contextmanager
def _handle_errors(self, action, on_error_status=IterationResult.FAILED):
try:
if action is not None:
self.logger.debug(action)
yield
except (KeyboardInterrupt, DeviceNotRespondingError):
raise
except (WAError, TimeoutError), we:
self.device.check_responsive()
if self.current_job:
self.current_job.result.status = on_error_status
self.current_job.result.add_event(str(we))
try:
self._take_screenshot('error.png')
except Exception, e: # pylint: disable=W0703
# We're already in error state, so the fact that taking a
# screenshot failed is not surprising...
pass
if action:
action = action[0].lower() + action[1:]
self.logger.error('Error while {}:\n\t{}'.format(action, we))
except Exception, e: # pylint: disable=W0703
error_text = '{}("{}")'.format(e.__class__.__name__, e)
if self.current_job:
self.current_job.result.status = on_error_status
self.current_job.result.add_event(error_text)
self.logger.error('Error while {}'.format(action))
self.logger.error(error_text)
if isinstance(e, subprocess.CalledProcessError):
self.logger.error('Got:')
self.logger.error(e.output)
tb = get_traceback()
self.logger.error(tb)
@contextmanager
def _signal_wrap(self, signal_name):
"""Wraps the suite in before/after signals, ensuring
that after signal is always sent."""
before_signal = getattr(signal, 'BEFORE_' + signal_name)
success_signal = getattr(signal, 'SUCCESSFUL_' + signal_name)
after_signal = getattr(signal, 'AFTER_' + signal_name)
try:
self._send(before_signal)
yield
self._send(success_signal)
finally:
self._send(after_signal)
class BySpecRunner(Runner):
"""
This is that "classic" implementation that executes all iterations of a workload
spec before proceeding onto the next spec.
"""
def init_queue(self, specs):
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] # pylint: disable=unused-variable
self.job_queue = [j for spec_jobs in jobs for j in spec_jobs]
class BySectionRunner(Runner):
"""
Runs the first iteration for all benchmarks first, before proceeding to the next iteration,
i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2...
If multiple sections where specified in the agenda, this will run all specs for the first section
followed by all specs for the seciod section, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
"""
def init_queue(self, specs):
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]
self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j]
class ByIterationRunner(Runner):
"""
Runs the first iteration for all benchmarks first, before proceeding to the next iteration,
i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2...
If multiple sections where specified in the agenda, this will run all sections for the first global
spec first, followed by all sections for the second spec, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
"""
def init_queue(self, specs):
sections = OrderedDict()
for s in specs:
if s.section_id not in sections:
sections[s.section_id] = []
sections[s.section_id].append(s)
specs = [s for section_specs in izip_longest(*sections.values()) for s in section_specs if s]
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]
self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j]
class RandomRunner(Runner):
"""
This will run specs in a random order.
"""
def init_queue(self, specs):
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] # pylint: disable=unused-variable
all_jobs = [j for spec_jobs in jobs for j in spec_jobs]
random.shuffle(all_jobs)
self.job_queue = all_jobs

View File

@ -1,32 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Separate module to avoid circular dependencies
from wlauto.core.configuration import settings
from wlauto.core.plugin import Plugin
from wlauto.utils.misc import load_class
from wlauto.core import pluginloader
def get_plugin_type(ext):
"""Given an instance of ``wlauto.core.Plugin``, return a string representing
the type of the plugin (e.g. ``'workload'`` for a Workload subclass instance)."""
if not isinstance(ext, Plugin):
raise ValueError('{} is not an instance of Plugin'.format(ext))
for name, cls in pluginloaderkind_map.iteritems():
if isinstance(ext, cls):
return name
raise ValueError('Unknown plugin type: {}'.format(ext.__class__.__name__))

View File

@ -1,33 +0,0 @@
import os
from wlauto.core.configuration import settings
def init_user_directory(overwrite_existing=False): # pylint: disable=R0914
"""
Initialise a fresh user directory.
"""
if os.path.exists(settings.user_directory):
if not overwrite_existing:
raise RuntimeError('Environment {} already exists.'.format(settings.user_directory))
shutil.rmtree(settings.user_directory)
os.makedirs(settings.user_directory)
os.makedirs(settings.dependencies_directory)
os.makedirs(settings.plugins_directory)
# TODO: generate default config.yaml here
if os.getenv('USER') == 'root':
# If running with sudo on POSIX, change the ownership to the real user.
real_user = os.getenv('SUDO_USER')
if real_user:
import pwd # done here as module won't import on win32
user_entry = pwd.getpwnam(real_user)
uid, gid = user_entry.pw_uid, user_entry.pw_gid
os.chown(settings.user_directory, uid, gid)
# why, oh why isn't there a recusive=True option for os.chown?
for root, dirs, files in os.walk(settings.user_directory):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)

View File

@ -1,399 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Adding New Instrument
=====================
Any new instrument should be a subclass of Instrument and it must have a name.
When a new instrument is added to Workload Automation, the methods of the new
instrument will be found automatically and hooked up to the supported signals.
Once a signal is broadcasted, the corresponding registered method is invoked.
Each method in Instrument must take two arguments, which are self and context.
Supported signals can be found in [... link to signals ...] To make
implementations easier and common, the basic steps to add new instrument is
similar to the steps to add new workload.
Hence, the following methods are sufficient to implement to add new instrument:
- setup: This method is invoked after the workload is setup. All the
necessary setups should go inside this method. Setup, includes operations
like, pushing the files to the target device, install them, clear logs,
etc.
- start: It is invoked just before the workload start execution. Here is
where instrument measures start being registered/taken.
- stop: It is invoked just after the workload execution stops. The measures
should stop being taken/registered.
- update_result: It is invoked after the workload updated its result.
update_result is where the taken measures are added to the result so it
can be processed by Workload Automation.
- teardown is invoked after the workload is teared down. It is a good place
to clean any logs generated by the instrument.
For example, to add an instrument which will trace device errors, we subclass
Instrument and overwrite the variable name.::
#BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace')
class TraceErrorsInstrument(Instrument):
name = 'trace-errors'
def __init__(self, device):
super(TraceErrorsInstrument, self).__init__(device)
self.trace_on_device = os.path.join(self.device.working_directory, 'trace')
We then declare and implement the aforementioned methods. For the setup method,
we want to push the file to the target device and then change the file mode to
755 ::
def setup(self, context):
self.device.push(BINARY_FILE, self.device.working_directory)
self.device.execute('chmod 755 {}'.format(self.trace_on_device))
Then we implemented the start method, which will simply run the file to start
tracing. ::
def start(self, context):
self.device.execute('{} start'.format(self.trace_on_device))
Lastly, we need to stop tracing once the workload stops and this happens in the
stop method::
def stop(self, context):
self.device.execute('{} stop'.format(self.trace_on_device))
The generated result can be updated inside update_result, or if it is trace, we
just pull the file to the host device. context has a result variable which
has add_metric method. It can be used to add the instrumentation results metrics
to the final result for the workload. The method can be passed 4 params, which
are metric key, value, unit and lower_is_better, which is a boolean. ::
def update_result(self, context):
# pull the trace file to the device
result = os.path.join(self.device.working_directory, 'trace.txt')
self.device.pull(result, context.working_directory)
# parse the file if needs to be parsed, or add result to
# context.result
At the end, we might want to delete any files generated by the instrumentation
and the code to clear these file goes in teardown method. ::
def teardown(self, context):
self.device.remove(os.path.join(self.device.working_directory, 'trace.txt'))
"""
import logging
import inspect
from collections import OrderedDict
import wlauto.core.signal as signal
from wlauto.core.plugin import Plugin
from wlauto.exceptions import WAError, DeviceNotRespondingError, TimeoutError
from wlauto.utils.misc import get_traceback, isiterable
from wlauto.utils.types import identifier
logger = logging.getLogger('instrumentation')
# Maps method names onto signals the should be registered to.
# Note: the begin/end signals are paired -- if a begin_ signal is sent,
# then the corresponding end_ signal is guaranteed to also be sent.
# Note: using OrderedDict to preserve logical ordering for the table generated
# in the documentation
SIGNAL_MAP = OrderedDict([
# Below are "aliases" for some of the more common signals to allow
# instrumentation to have similar structure to workloads
('initialize', signal.RUN_INIT),
('setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('start', signal.BEFORE_WORKLOAD_EXECUTION),
('stop', signal.AFTER_WORKLOAD_EXECUTION),
('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('finalize', signal.RUN_FIN),
('on_run_start', signal.RUN_START),
('on_run_end', signal.RUN_END),
('on_workload_spec_start', signal.WORKLOAD_SPEC_START),
('on_workload_spec_end', signal.WORKLOAD_SPEC_END),
('on_iteration_start', signal.ITERATION_START),
('on_iteration_end', signal.ITERATION_END),
('before_initial_boot', signal.BEFORE_INITIAL_BOOT),
('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT),
('after_initial_boot', signal.AFTER_INITIAL_BOOT),
('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT),
('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT),
('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT),
('before_boot', signal.BEFORE_BOOT),
('on_successful_boot', signal.SUCCESSFUL_BOOT),
('after_boot', signal.AFTER_BOOT),
('on_spec_init', signal.SPEC_INIT),
('on_run_init', signal.RUN_INIT),
('on_iteration_init', signal.ITERATION_INIT),
('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP),
('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('after_workload_setup', signal.AFTER_WORKLOAD_SETUP),
('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION),
('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION),
('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION),
('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE),
('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN),
('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN),
('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING),
('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING),
('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING),
('on_error', signal.ERROR_LOGGED),
('on_warning', signal.WARNING_LOGGED),
])
PRIORITY_MAP = OrderedDict([
('very_fast_', 20),
('fast_', 10),
('normal_', 0),
('slow_', -10),
('very_slow_', -20),
])
installed = []
def is_installed(instrument):
if isinstance(instrument, Instrument):
if instrument in installed:
return True
if instrument.name in [i.name for i in installed]:
return True
elif isinstance(instrument, type):
if instrument in [i.__class__ for i in installed]:
return True
else: # assume string
if identifier(instrument) in [identifier(i.name) for i in installed]:
return True
return False
def is_enabled(instrument):
if isinstance(instrument, Instrument) or isinstance(instrument, type):
name = instrument.name
else: # assume string
name = instrument
try:
installed_instrument = get_instrument(name)
return installed_instrument.is_enabled
except ValueError:
return False
failures_detected = False
def reset_failures():
global failures_detected # pylint: disable=W0603
failures_detected = False
def check_failures():
result = failures_detected
reset_failures()
return result
class ManagedCallback(object):
"""
This wraps instruments' callbacks to ensure that errors do interfer
with run execution.
"""
def __init__(self, instrument, callback):
self.instrument = instrument
self.callback = callback
def __call__(self, context):
if self.instrument.is_enabled:
try:
self.callback(context)
except (KeyboardInterrupt, DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception as e: # pylint: disable=W0703
logger.error('Error in insturment {}'.format(self.instrument.name))
global failures_detected # pylint: disable=W0603
failures_detected = True
if isinstance(e, WAError):
logger.error(e)
else:
tb = get_traceback()
logger.error(tb)
logger.error('{}({})'.format(e.__class__.__name__, e))
if not context.current_iteration:
# Error occureed outside of an iteration (most likely
# during intial setup or teardown). Since this would affect
# the rest of the run, mark the instument as broken so that
# it doesn't get re-enabled for subsequent iterations.
self.instrument.is_broken = True
disable(self.instrument)
# Need this to keep track of callbacks, because the dispatcher only keeps
# weak references, so if the callbacks aren't referenced elsewhere, they will
# be deallocated before they've had a chance to be invoked.
_callbacks = []
def install(instrument):
"""
This will look for methods (or any callable members) with specific names
in the instrument and hook them up to the corresponding signals.
:param instrument: Instrument instance to install.
"""
logger.debug('Installing instrument %s.', instrument)
if is_installed(instrument):
raise ValueError('Instrument {} is already installed.'.format(instrument.name))
for attr_name in dir(instrument):
priority = 0
stripped_attr_name = attr_name
for key, value in PRIORITY_MAP.iteritems():
if attr_name.startswith(key):
stripped_attr_name = attr_name[len(key):]
priority = value
break
if stripped_attr_name in SIGNAL_MAP:
attr = getattr(instrument, attr_name)
if not callable(attr):
raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument))
argspec = inspect.getargspec(attr)
arg_num = len(argspec.args)
# Instrument callbacks will be passed exactly two arguments: self
# (the instrument instance to which the callback is bound) and
# context. However, we also allow callbacks to capture the context
# in variable arguments (declared as "*args" in the definition).
if arg_num > 2 or (arg_num < 2 and argspec.varargs is None):
message = '{} must take exactly 2 positional arguments; {} given.'
raise ValueError(message.format(attr_name, arg_num))
logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name])
mc = ManagedCallback(instrument, attr)
_callbacks.append(mc)
signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority)
installed.append(instrument)
def uninstall(instrument):
instrument = get_instrument(instrument)
installed.remove(instrument)
def validate():
for instrument in installed:
instrument.validate()
def get_instrument(inst):
if isinstance(inst, Instrument):
return inst
for installed_inst in installed:
if identifier(installed_inst.name) == identifier(inst):
return installed_inst
raise ValueError('Instrument {} is not installed'.format(inst))
def disable_all():
for instrument in installed:
_disable_instrument(instrument)
def enable_all():
for instrument in installed:
_enable_instrument(instrument)
def enable(to_enable):
if isiterable(to_enable):
for inst in to_enable:
_enable_instrument(inst)
else:
_enable_instrument(to_enable)
def disable(to_disable):
if isiterable(to_disable):
for inst in to_disable:
_disable_instrument(inst)
else:
_disable_instrument(to_disable)
def _enable_instrument(inst):
inst = get_instrument(inst)
if not inst.is_broken:
logger.debug('Enabling instrument {}'.format(inst.name))
inst.is_enabled = True
else:
logger.debug('Not enabling broken instrument {}'.format(inst.name))
def _disable_instrument(inst):
inst = get_instrument(inst)
if inst.is_enabled:
logger.debug('Disabling instrument {}'.format(inst.name))
inst.is_enabled = False
def get_enabled():
return [i for i in installed if i.is_enabled]
def get_disabled():
return [i for i in installed if not i.is_enabled]
class Instrument(Plugin):
"""
Base class for instrumentation implementations.
"""
kind = "instrument"
def __init__(self, target, **kwargs):
super(Instrument, self).__init__(**kwargs)
self.target = target
self.is_enabled = True
self.is_broken = False
def initialize(self, context):
pass
def finalize(self, context):
pass
def __str__(self):
return self.name
def __repr__(self):
return 'Instrument({})'.format(self.name)

View File

@ -1,188 +0,0 @@
import logging
import os
import shutil
import string
import sys
import uuid
from copy import copy
from wlauto.core.configuration.configuration import JobSpec
from wlauto.core.configuration.manager import ConfigManager
from wlauto.core.device_manager import TargetInfo
from wlauto.utils.misc import touch
from wlauto.utils.serializer import write_pod, read_pod
logger = logging.getLogger('output')
class RunInfo(object):
"""
Information about the current run, such as its unique ID, run
time, etc.
"""
@staticmethod
def from_pod(pod):
uid = pod.pop('uuid')
if uid is not None:
uid = uuid.UUID(uid)
instance = RunInfo(**pod)
instance.uuid = uid
return instance
def __init__(self, run_name=None, project=None, project_stage=None,
start_time=None, end_time=None, duration=None):
self.uuid = uuid.uuid4()
self.run_name = None
self.project = None
self.project_stage = None
self.start_time = None
self.end_time = None
self.duration = None
def to_pod(self):
d = copy(self.__dict__)
d['uuid'] = str(self.uuid)
return d
class RunState(object):
"""
Represents the state of a WA run.
"""
@staticmethod
def from_pod(pod):
return RunState()
def __init__(self):
pass
def to_pod(self):
return {}
class RunOutput(object):
@property
def logfile(self):
return os.path.join(self.basepath, 'run.log')
@property
def metadir(self):
return os.path.join(self.basepath, '__meta')
@property
def infofile(self):
return os.path.join(self.metadir, 'run_info.json')
@property
def statefile(self):
return os.path.join(self.basepath, '.run_state.json')
@property
def configfile(self):
return os.path.join(self.metadir, 'config.json')
@property
def targetfile(self):
return os.path.join(self.metadir, 'target_info.json')
@property
def jobsfile(self):
return os.path.join(self.metadir, 'jobs.json')
@property
def raw_config_dir(self):
return os.path.join(self.metadir, 'raw_config')
def __init__(self, path):
self.basepath = path
self.info = None
self.state = None
if (not os.path.isfile(self.statefile) or
not os.path.isfile(self.infofile)):
msg = '"{}" does not exist or is not a valid WA output directory.'
raise ValueError(msg.format(self.basepath))
self.reload()
def reload(self):
self.info = RunInfo.from_pod(read_pod(self.infofile))
self.state = RunState.from_pod(read_pod(self.statefile))
def write_info(self):
write_pod(self.info.to_pod(), self.infofile)
def write_state(self):
write_pod(self.state.to_pod(), self.statefile)
def write_config(self, config):
write_pod(config.to_pod(), self.configfile)
def read_config(self):
if not os.path.isfile(self.configfile):
return None
return ConfigManager.from_pod(read_pod(self.configfile))
def write_target_info(self, ti):
write_pod(ti.to_pod(), self.targetfile)
def read_config(self):
if not os.path.isfile(self.targetfile):
return None
return TargetInfo.from_pod(read_pod(self.targetfile))
def write_job_specs(self, job_specs):
job_specs[0].to_pod()
js_pod = {'jobs': [js.to_pod() for js in job_specs]}
write_pod(js_pod, self.jobsfile)
def read_job_specs(self):
if not os.path.isfile(self.jobsfile):
return None
pod = read_pod(self.jobsfile)
return [JobSpec.from_pod(jp) for jp in pod['jobs']]
def init_wa_output(path, wa_state, force=False):
if os.path.exists(path):
if force:
logger.info('Removing existing output directory.')
shutil.rmtree(os.path.abspath(path))
else:
raise RuntimeError('path exists: {}'.format(path))
logger.info('Creating output directory.')
os.makedirs(path)
meta_dir = os.path.join(path, '__meta')
os.makedirs(meta_dir)
_save_raw_config(meta_dir, wa_state)
touch(os.path.join(path, 'run.log'))
info = RunInfo(
run_name=wa_state.run_config.run_name,
project=wa_state.run_config.project,
project_stage=wa_state.run_config.project_stage,
)
write_pod(info.to_pod(), os.path.join(meta_dir, 'run_info.json'))
with open(os.path.join(path, '.run_state.json'), 'w') as wfh:
wfh.write('{}')
return RunOutput(path)
def _save_raw_config(meta_dir, state):
raw_config_dir = os.path.join(meta_dir, 'raw_config')
os.makedirs(raw_config_dir)
for i, source in enumerate(state.loaded_config_sources):
if not os.path.isfile(source):
continue
basename = os.path.basename(source)
dest_path = os.path.join(raw_config_dir, 'cfg{}-{}'.format(i, basename))
shutil.copy(source, dest_path)

View File

@ -1,793 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import os
import sys
import inspect
import imp
import string
import logging
from collections import OrderedDict, defaultdict
from itertools import chain
from copy import copy
from wlauto.exceptions import NotFoundError, LoaderError, ValidationError, ConfigError, HostError
from wlauto.utils.misc import (ensure_directory_exists as _d,
walk_modules, load_class, merge_dicts_simple, get_article)
from wlauto.core.configuration import settings
from wlauto.utils.types import identifier, boolean
from wlauto.core.configuration.configuration import ConfigurationPoint as Parameter
MODNAME_TRANS = string.maketrans(':/\\.', '____')
class AttributeCollection(object):
"""
Accumulator for plugin attribute objects (such as Parameters or Artifacts). This will
replace any class member list accumulating such attributes through the magic of
metaprogramming\ [*]_.
.. [*] which is totally safe and not going backfire in any way...
"""
@property
def values(self):
return self._attrs.values()
def __init__(self, attrcls):
self._attrcls = attrcls
self._attrs = OrderedDict()
def add(self, p):
p = self._to_attrcls(p)
if p.name in self._attrs:
if p.override:
newp = copy(self._attrs[p.name])
for a, v in p.__dict__.iteritems():
if v is not None:
setattr(newp, a, v)
if not hasattr(newp, "_overridden"):
newp._overridden = p._owner
self._attrs[p.name] = newp
else:
# Duplicate attribute condition is check elsewhere.
pass
else:
self._attrs[p.name] = p
append = add
def __str__(self):
return 'AC({})'.format(map(str, self._attrs.values()))
__repr__ = __str__
def _to_attrcls(self, p):
old_owner = getattr(p, "_owner", None)
if isinstance(p, basestring):
p = self._attrcls(p)
elif isinstance(p, tuple) or isinstance(p, list):
p = self._attrcls(*p)
elif isinstance(p, dict):
p = self._attrcls(**p)
elif not isinstance(p, self._attrcls):
raise ValueError('Invalid parameter value: {}'.format(p))
if (p.name in self._attrs and not p.override and
p.name != 'modules'): # TODO: HACK due to "diamond dependecy" in workloads...
raise ValueError('Attribute {} has already been defined.'.format(p.name))
p._owner = old_owner
return p
def __iadd__(self, other):
for p in other:
self.add(p)
return self
def __iter__(self):
return iter(self.values)
def __contains__(self, p):
return p in self._attrs
def __getitem__(self, i):
return self._attrs[i]
def __len__(self):
return len(self._attrs)
class AliasCollection(AttributeCollection):
def __init__(self):
super(AliasCollection, self).__init__(Alias)
def _to_attrcls(self, p):
if isinstance(p, tuple) or isinstance(p, list):
# must be in the form (name, {param: value, ...})
p = self._attrcls(p[1], **p[1])
elif not isinstance(p, self._attrcls):
raise ValueError('Invalid parameter value: {}'.format(p))
if p.name in self._attrs:
raise ValueError('Attribute {} has already been defined.'.format(p.name))
return p
class ListCollection(list):
def __init__(self, attrcls): # pylint: disable=unused-argument
super(ListCollection, self).__init__()
class Artifact(object):
"""
This is an artifact generated during execution/post-processing of a workload.
Unlike metrics, this represents an actual artifact, such as a file, generated.
This may be "result", such as trace, or it could be "meta data" such as logs.
These are distinguished using the ``kind`` attribute, which also helps WA decide
how it should be handled. Currently supported kinds are:
:log: A log file. Not part of "results" as such but contains information about the
run/workload execution that be useful for diagnostics/meta analysis.
:meta: A file containing metadata. This is not part of "results", but contains
information that may be necessary to reproduce the results (contrast with
``log`` artifacts which are *not* necessary).
:data: This file contains new data, not available otherwise and should be considered
part of the "results" generated by WA. Most traces would fall into this category.
:export: Exported version of results or some other artifact. This signifies that
this artifact does not contain any new data that is not available
elsewhere and that it may be safely discarded without losing information.
:raw: Signifies that this is a raw dump/log that is normally processed to extract
useful information and is then discarded. In a sense, it is the opposite of
``export``, but in general may also be discarded.
.. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on
how important it is to preserve this file, e.g. when archiving, vs
how much space it takes up. Unlike ``export`` artifacts which are
(almost) always ignored by other exporters as that would never result
in data loss, ``raw`` files *may* be processed by exporters if they
decided that the risk of losing potentially (though unlikely) useful
data is greater than the time/space cost of handling the artifact (e.g.
a database uploader may choose to ignore ``raw`` artifacts, where as a
network filer archiver may choose to archive them).
.. note: The kind parameter is intended to represent the logical function of a particular
artifact, not its intended means of processing -- this is left entirely up to the
result processors.
"""
RUN = 'run'
ITERATION = 'iteration'
valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
""""
:param name: Name that uniquely identifies this artifact.
:param path: The *relative* path of the artifact. Depending on the ``level``
must be either relative to the run or iteration output directory.
Note: this path *must* be delimited using ``/`` irrespective of the
operating system.
:param kind: The type of the artifact this is (e.g. log file, result, etc.) this
will be used a hit to result processors. This must be one of ``'log'``,
``'meta'``, ``'data'``, ``'export'``, ``'raw'``.
:param level: The level at which the artifact will be generated. Must be either
``'iteration'`` or ``'run'``.
:param mandatory: Boolean value indicating whether this artifact must be present
at the end of result processing for its level.
:param description: A free-form description of what this artifact is.
"""
if kind not in self.valid_kinds:
raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds))
self.name = name
self.path = path.replace('/', os.sep) if path is not None else path
self.kind = kind
self.level = level
self.mandatory = mandatory
self.description = description
def exists(self, context):
"""Returns ``True`` if artifact exists within the specified context, and
``False`` otherwise."""
fullpath = os.path.join(context.output_directory, self.path)
return os.path.exists(fullpath)
def to_dict(self):
return copy(self.__dict__)
class Alias(object):
"""
This represents a configuration alias for an plugin, mapping an alternative name to
a set of parameter values, effectively providing an alternative set of default values.
"""
def __init__(self, name, **kwargs):
self.name = name
self.params = kwargs
self.plugin_name = None # gets set by the MetaClass
def validate(self, ext):
ext_params = set(p.name for p in ext.parameters)
for param in self.params:
if param not in ext_params:
# Raising config error because aliases might have come through
# the config.
msg = 'Parameter {} (defined in alias {}) is invalid for {}'
raise ConfigError(msg.format(param, self.name, ext.name))
class PluginMeta(type):
"""
This basically adds some magic to plugins to make implementing new plugins, such as
workloads less complicated.
It ensures that certain class attributes (specified by the ``to_propagate``
attribute of the metaclass) get propagated down the inheritance hierarchy. The assumption
is that the values of the attributes specified in the class are iterable; if that is not met,
Bad Things (tm) will happen.
This also provides virtual method implementation, similar to those in C-derived OO languages,
and alias specifications.
"""
to_propagate = [
('parameters', Parameter, AttributeCollection),
('artifacts', Artifact, AttributeCollection),
('core_modules', str, ListCollection),
]
virtual_methods = ['validate', 'initialize', 'finalize']
global_virtuals = ['initialize', 'finalize']
def __new__(mcs, clsname, bases, attrs):
mcs._propagate_attributes(bases, attrs, clsname)
cls = type.__new__(mcs, clsname, bases, attrs)
mcs._setup_aliases(cls)
mcs._implement_virtual(cls, bases)
return cls
@classmethod
def _propagate_attributes(mcs, bases, attrs, clsname):
"""
For attributes specified by to_propagate, their values will be a union of
that specified for cls and its bases (cls values overriding those of bases
in case of conflicts).
"""
for prop_attr, attr_cls, attr_collector_cls in mcs.to_propagate:
should_propagate = False
propagated = attr_collector_cls(attr_cls)
for base in bases:
if hasattr(base, prop_attr):
propagated += getattr(base, prop_attr) or []
should_propagate = True
if prop_attr in attrs:
pattrs = attrs[prop_attr] or []
for pa in pattrs:
if not isinstance(pa, basestring):
pa._owner = clsname
propagated += pattrs
should_propagate = True
if should_propagate:
for p in propagated:
override = bool(getattr(p, "override", None))
overridden = bool(getattr(p, "_overridden", None))
if override != overridden:
msg = "Overriding non existing parameter '{}' inside '{}'"
raise ValueError(msg.format(p.name, p._owner))
attrs[prop_attr] = propagated
@classmethod
def _setup_aliases(mcs, cls):
if hasattr(cls, 'aliases'):
aliases, cls.aliases = cls.aliases, AliasCollection()
for alias in aliases:
if isinstance(alias, basestring):
alias = Alias(alias)
alias.validate(cls)
alias.plugin_name = cls.name
cls.aliases.add(alias)
@classmethod
def _implement_virtual(mcs, cls, bases):
"""
This implements automatic method propagation to the bases, so
that you don't have to do something like
super(cls, self).vmname()
This also ensures that the methods that have beend identified as
"globally virtual" are executed exactly once per WA execution, even if
invoked through instances of different subclasses
"""
methods = {}
called_globals = set()
for vmname in mcs.virtual_methods:
clsmethod = getattr(cls, vmname, None)
if clsmethod:
basemethods = [getattr(b, vmname) for b in bases if hasattr(b, vmname)]
methods[vmname] = [bm for bm in basemethods if bm != clsmethod]
methods[vmname].append(clsmethod)
def generate_method_wrapper(vname): # pylint: disable=unused-argument
# this creates a closure with the method name so that it
# does not need to be passed to the wrapper as an argument,
# leaving the wrapper to accept exactly the same set of
# arguments as the method it is wrapping.
name__ = vmname # pylint: disable=cell-var-from-loop
def wrapper(self, *args, **kwargs):
for dm in methods[name__]:
if name__ in mcs.global_virtuals:
if dm not in called_globals:
dm(self, *args, **kwargs)
called_globals.add(dm)
else:
dm(self, *args, **kwargs)
return wrapper
setattr(cls, vmname, generate_method_wrapper(vmname))
class Plugin(object):
"""
Base class for all WA plugins. An plugin is basically a plug-in.
It extends the functionality of WA in some way. Plugins are discovered
and loaded dynamically by the plugin loader upon invocation of WA scripts.
Adding an plugin is a matter of placing a class that implements an appropriate
interface somewhere it would be discovered by the loader. That "somewhere" is
typically one of the plugin subdirectories under ``~/.workload_automation/``.
"""
__metaclass__ = PluginMeta
kind = None
name = None
parameters = [
Parameter('modules', kind=list,
description="""
Lists the modules to be loaded by this plugin. A module is a plug-in that
further extends functionality of an plugin.
"""),
]
artifacts = []
aliases = []
core_modules = []
@classmethod
def get_default_config(cls):
return {p.name: p.default for p in cls.parameters}
@property
def dependencies_directory(self):
return _d(os.path.join(settings.dependencies_directory, self.name))
@property
def _classname(self):
return self.__class__.__name__
def __init__(self, **kwargs):
self.logger = logging.getLogger(self._classname)
self._modules = []
self.capabilities = getattr(self.__class__, 'capabilities', [])
for param in self.parameters:
param.set_value(self, kwargs.get(param.name))
for key in kwargs:
if key not in self.parameters:
message = 'Unexpected parameter "{}" for {}'
raise ConfigError(message.format(key, self.name))
def get_config(self):
"""
Returns current configuration (i.e. parameter values) of this plugin.
"""
config = {}
for param in self.parameters:
config[param.name] = getattr(self, param.name, None)
return config
def validate(self):
"""
Perform basic validation to ensure that this plugin is capable of running.
This is intended as an early check to ensure the plugin has not been mis-configured,
rather than a comprehensive check (that may, e.g., require access to the execution
context).
This method may also be used to enforce (i.e. set as well as check) inter-parameter
constraints for the plugin (e.g. if valid values for parameter A depend on the value
of parameter B -- something that is not possible to enfroce using ``Parameter``\ 's
``constraint`` attribute.
"""
if self.name is None:
raise ValidationError('Name not set for {}'.format(self._classname))
for param in self.parameters:
param.validate(self)
def initialize(self, context):
pass
def finalize(self, context):
pass
def check_artifacts(self, context, level):
"""
Make sure that all mandatory artifacts have been generated.
"""
for artifact in self.artifacts:
if artifact.level != level or not artifact.mandatory:
continue
fullpath = os.path.join(context.output_directory, artifact.path)
if not os.path.exists(fullpath):
message = 'Mandatory "{}" has not been generated for {}.'
raise ValidationError(message.format(artifact.path, self.name))
def __getattr__(self, name):
if name == '_modules':
raise ValueError('_modules accessed too early!')
for module in self._modules:
if hasattr(module, name):
return getattr(module, name)
raise AttributeError(name)
def load_modules(self, loader):
"""
Load the modules specified by the "modules" Parameter using the provided loader. A loader
can be any object that has an atribute called "get_module" that implements the following
signature::
get_module(name, owner, **kwargs)
and returns an instance of :class:`wlauto.core.plugin.Module`. If the module with the
specified name is not found, the loader must raise an appropriate exception.
"""
modules = list(reversed(self.core_modules)) + list(reversed(self.modules or []))
if not modules:
return
for module_spec in modules:
if not module_spec:
continue
module = self._load_module(loader, module_spec)
self._install_module(module)
def has(self, capability):
"""Check if this plugin has the specified capability. The alternative method ``can`` is
identical to this. Which to use is up to the caller depending on what makes semantic sense
in the context of the capability, e.g. ``can('hard_reset')`` vs ``has('active_cooling')``."""
return capability in self.capabilities
can = has
def _load_module(self, loader, module_spec):
if isinstance(module_spec, basestring):
name = module_spec
params = {}
elif isinstance(module_spec, dict):
if len(module_spec) != 1:
message = 'Invalid module spec: {}; dict must have exctly one key -- the module name.'
raise ValueError(message.format(module_spec))
name, params = module_spec.items()[0]
else:
message = 'Invalid module spec: {}; must be a string or a one-key dict.'
raise ValueError(message.format(module_spec))
if not isinstance(params, dict):
message = 'Invalid module spec: {}; dict value must also be a dict.'
raise ValueError(message.format(module_spec))
module = loader.get_module(name, owner=self, **params)
module.initialize(None)
return module
def _install_module(self, module):
for capability in module.capabilities:
if capability not in self.capabilities:
self.capabilities.append(capability)
self._modules.append(module)
class PluginLoaderItem(object):
def __init__(self, ext_tuple):
self.name = ext_tuple.name
self.default_package = ext_tuple.default_package
self.default_path = ext_tuple.default_path
self.cls = load_class(ext_tuple.cls)
class PluginLoader(object):
"""
Discovers, enumerates and loads available devices, configs, etc.
The loader will attempt to discover things on construction by looking
in predetermined set of locations defined by default_paths. Optionally,
additional locations may specified through paths parameter that must
be a list of additional Python module paths (i.e. dot-delimited).
"""
def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False):
"""
params::
:packages: List of packages to load plugins from.
:paths: List of paths to be searched for Python modules containing
WA plugins.
:ignore_paths: List of paths to ignore when search for WA plugins (these would
typically be subdirectories of one or more locations listed in
``paths`` parameter.
:keep_going: Specifies whether to keep going if an error occurs while loading
plugins.
"""
self.logger = logging.getLogger('pluginloader')
self.keep_going = keep_going
self.packages = packages or []
self.paths = paths or []
self.ignore_paths = ignore_paths or []
self.plugins = {}
self.kind_map = defaultdict(dict)
self.aliases = {}
self.global_param_aliases = {}
self._discover_from_packages(self.packages)
self._discover_from_paths(self.paths, self.ignore_paths)
def update(self, packages=None, paths=None, ignore_paths=None):
""" Load plugins from the specified paths/packages
without clearing or reloading existing plugin. """
msg = 'Updating from: packages={} paths={}'
self.logger.debug(msg.format(packages, paths))
if packages:
self.packages.extend(packages)
self._discover_from_packages(packages)
if paths:
self.paths.extend(paths)
self.ignore_paths.extend(ignore_paths or [])
self._discover_from_paths(paths, ignore_paths or [])
def clear(self):
""" Clear all discovered items. """
self.plugins = []
self.kind_map.clear()
def reload(self):
""" Clear all discovered items and re-run the discovery. """
self.logger.debug('Reloading')
self.clear()
self._discover_from_packages(self.packages)
self._discover_from_paths(self.paths, self.ignore_paths)
def get_plugin_class(self, name, kind=None):
"""
Return the class for the specified plugin if found or raises ``ValueError``.
"""
name, _ = self.resolve_alias(name)
if kind is None:
try:
return self.plugins[name]
except KeyError:
raise NotFoundError('plugins {} not found.'.format(name))
if kind not in self.kind_map:
raise ValueError('Unknown plugin type: {}'.format(kind))
store = self.kind_map[kind]
if name not in store:
msg = 'plugins {} is not {} {}.'
raise NotFoundError(msg.format(name, get_article(kind), kind))
return store[name]
def get_plugin(self, name=None, kind=None, *args, **kwargs):
"""
Return plugin of the specified kind with the specified name. Any
additional parameters will be passed to the plugin's __init__.
"""
name, base_kwargs = self.resolve_alias(name)
kwargs = OrderedDict(chain(base_kwargs.iteritems(), kwargs.iteritems()))
cls = self.get_plugin_class(name, kind)
plugin = cls(*args, **kwargs)
return plugin
def get_default_config(self, name):
"""
Returns the default configuration for the specified plugin name. The
name may be an alias, in which case, the returned config will be
augmented with appropriate alias overrides.
"""
real_name, alias_config = self.resolve_alias(name)
base_default_config = self.get_plugin_class(real_name).get_default_config()
return merge_dicts_simple(base_default_config, alias_config)
def list_plugins(self, kind=None):
"""
List discovered plugin classes. Optionally, only list plugins of a
particular type.
"""
if kind is None:
return self.plugins.values()
if kind not in self.kind_map:
raise ValueError('Unknown plugin type: {}'.format(kind))
return self.kind_map[kind].values()
def has_plugin(self, name, kind=None):
"""
Returns ``True`` if an plugins with the specified ``name`` has been
discovered by the loader. If ``kind`` was specified, only returns ``True``
if the plugin has been found, *and* it is of the specified kind.
"""
try:
self.get_plugin_class(name, kind)
return True
except NotFoundError:
return False
def resolve_alias(self, alias_name):
"""
Try to resolve the specified name as an plugin alias. Returns a
two-tuple, the first value of which is actual plugin name, and the
iisecond is a dict of parameter values for this alias. If the name passed
is already an plugin name, then the result is ``(alias_name, {})``.
"""
alias_name = identifier(alias_name.lower())
if alias_name in self.plugins:
return (alias_name, {})
if alias_name in self.aliases:
alias = self.aliases[alias_name]
return (alias.plugin_name, alias.params)
raise NotFoundError('Could not find plugin or alias "{}"'.format(alias_name))
# Internal methods.
def __getattr__(self, name):
"""
This resolves methods for specific plugins types based on corresponding
generic plugin methods. So it's possible to say things like ::
loader.get_device('foo')
instead of ::
loader.get_plugin('foo', kind='device')
"""
if name.startswith('get_'):
name = name.replace('get_', '', 1)
if name in self.kind_map:
def __wrapper(pname, *args, **kwargs):
return self.get_plugin(pname, name, *args, **kwargs)
return __wrapper
if name.startswith('list_'):
name = name.replace('list_', '', 1).rstrip('s')
if name in self.kind_map:
def __wrapper(*args, **kwargs): # pylint: disable=E0102
return self.list_plugins(name, *args, **kwargs)
return __wrapper
if name.startswith('has_'):
name = name.replace('has_', '', 1)
if name in self.kind_map:
def __wrapper(pname, *args, **kwargs): # pylint: disable=E0102
return self.has_plugin(pname, name, *args, **kwargs)
return __wrapper
raise AttributeError(name)
def _discover_from_packages(self, packages):
self.logger.debug('Discovering plugins in packages')
try:
for package in packages:
for module in walk_modules(package):
self._discover_in_module(module)
except HostError as e:
message = 'Problem loading plugins from {}: {}'
raise LoaderError(message.format(e.module, str(e.orig_exc)))
def _discover_from_paths(self, paths, ignore_paths):
paths = paths or []
ignore_paths = ignore_paths or []
self.logger.debug('Discovering plugins in paths')
for path in paths:
self.logger.debug('Checking path %s', path)
if os.path.isfile(path):
self._discover_from_file(path)
for root, _, files in os.walk(path, followlinks=True):
should_skip = False
for igpath in ignore_paths:
if root.startswith(igpath):
should_skip = True
break
if should_skip:
continue
for fname in files:
if os.path.splitext(fname)[1].lower() != '.py':
continue
filepath = os.path.join(root, fname)
self._discover_from_file(filepath)
def _discover_from_file(self, filepath):
try:
modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS)
module = imp.load_source(modname, filepath)
self._discover_in_module(module)
except (SystemExit, ImportError), e:
if self.keep_going:
self.logger.warning('Failed to load {}'.format(filepath))
self.logger.warning('Got: {}'.format(e))
else:
raise LoaderError('Failed to load {}'.format(filepath), sys.exc_info())
except Exception as e:
message = 'Problem loading plugins from {}: {}'
raise LoaderError(message.format(filepath, e))
def _discover_in_module(self, module): # NOQA pylint: disable=too-many-branches
self.logger.debug('Checking module %s', module.__name__)
#log.indent()
try:
for obj in vars(module).itervalues():
if inspect.isclass(obj):
if not issubclass(obj, Plugin):
continue
if not obj.kind:
message = 'Skipping plugin {} as it does not define a kind'
self.logger.debug(message.format(obj.__name__))
continue
if not obj.name:
message = 'Skipping {} {} as it does not define a name'
self.logger.debug(message.format(obj.kind, obj.__name__))
continue
try:
self._add_found_plugin(obj)
except LoaderError as e:
if self.keep_going:
self.logger.warning(e)
else:
raise e
finally:
# log.dedent()
pass
def _add_found_plugin(self, obj):
"""
:obj: Found plugin class
:ext: matching plugin item.
"""
self.logger.debug('Adding %s %s', obj.kind, obj.name)
key = identifier(obj.name.lower())
if key in self.plugins or key in self.aliases:
raise LoaderError('{} "{}" already exists.'.format(obj.kind, obj.name))
# plugins are tracked both, in a common plugins
# dict, and in per-plugin kind dict (as retrieving
# plugins by kind is a common use case.
self.plugins[key] = obj
self.kind_map[obj.kind][key] = obj
for alias in obj.aliases:
alias_id = identifier(alias.name.lower())
if alias_id in self.plugins or alias_id in self.aliases:
raise LoaderError('{} "{}" already exists.'.format(obj.kind, obj.name))
self.aliases[alias_id] = alias

View File

@ -1,89 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
class __LoaderWrapper(object):
@property
def kinds(self):
if not self._loader:
self.reset()
return self._loader.kind_map.keys()
@property
def kind_map(self):
if not self._loader:
self.reset()
return self._loader.kind_map
def __init__(self):
self._loader = None
def reset(self):
# These imports cannot be done at top level, because of
# sys.modules manipulation below
from wlauto.core.plugin import PluginLoader
from wlauto.core.configuration import settings
self._loader = PluginLoader(settings.plugin_packages,
[settings.plugins_directory], [])
def update(self, packages=None, paths=None, ignore_paths=None):
if not self._loader:
self.reset()
self._loader.update(packages, paths, ignore_paths)
def reload(self):
if not self._loader:
self.reset()
self._loader.reload()
def list_plugins(self, kind=None):
if not self._loader:
self.reset()
return self._loader.list_plugins(kind)
def has_plugin(self, name, kind=None):
if not self._loader:
self.reset()
return self._loader.has_plugin(name, kind)
def get_plugin_class(self, name, kind=None):
if not self._loader:
self.reset()
return self._loader.get_plugin_class(name, kind)
def get_plugin(self, name=None, kind=None, *args, **kwargs):
if not self._loader:
self.reset()
return self._loader.get_plugin(name=name, kind=kind, *args, **kwargs)
def get_default_config(self, name):
if not self._loader:
self.reset()
return self._loader.get_default_config(name)
def resolve_alias(self, name):
if not self._loader:
self.reset()
return self._loader.resolve_alias(name)
def __getattr__(self, name):
if not self._loader:
self.reset()
return getattr(self._loader, name)
sys.modules[__name__] = __LoaderWrapper()

View File

@ -1,111 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Defines infrastructure for resource resolution. This is used to find
various dependencies/assets/etc that WA objects rely on in a flexible way.
"""
import logging
from collections import defaultdict
# Note: this is the modified louie library in wlauto/external.
# prioritylist does not exist in vanilla louie.
from wlauto.utils.types import prioritylist # pylint: disable=E0611,F0401
from wlauto.exceptions import ResourceError
from wlauto.core import pluginloader
class ResourceResolver(object):
"""
Discovers and registers getters, and then handles requests for
resources using registered getters.
"""
def __init__(self, config):
self.logger = logging.getLogger(self.__class__.__name__)
self.getters = defaultdict(prioritylist)
self.config = config
def load(self):
"""
Discover getters under the specified source. The source could
be either a python package/module or a path.
"""
for rescls in pluginloader.list_resource_getters():
getter = self.config.get_plugin(name=rescls.name, kind="resource_getter", resolver=self)
getter.register()
def get(self, resource, strict=True, *args, **kwargs):
"""
Uses registered getters to attempt to discover a resource of the specified
kind and matching the specified criteria. Returns path to the resource that
has been discovered. If a resource has not been discovered, this will raise
a ``ResourceError`` or, if ``strict`` has been set to ``False``, will return
``None``.
"""
self.logger.debug('Resolving {}'.format(resource))
for getter in self.getters[resource.name]:
self.logger.debug('Trying {}'.format(getter))
result = getter.get(resource, *args, **kwargs)
if result is not None:
self.logger.debug('Resource {} found using {}:'.format(resource, getter))
self.logger.debug('\t{}'.format(result))
return result
if strict:
raise ResourceError('{} could not be found'.format(resource))
self.logger.debug('Resource {} not found.'.format(resource))
return None
def register(self, getter, kind, priority=0):
"""
Register the specified resource getter as being able to discover a resource
of the specified kind with the specified priority.
This method would typically be invoked by a getter inside its __init__.
The idea being that getters register themselves for resources they know
they can discover.
*priorities*
getters that are registered with the highest priority will be invoked first. If
multiple getters are registered under the same priority, they will be invoked
in the order they were registered (i.e. in the order they were discovered). This is
essentially non-deterministic.
Generally getters that are more likely to find a resource, or would find a
"better" version of the resource should register with higher (positive) priorities.
Fall-back getters that should only be invoked if a resource is not found by usual
means should register with lower (negative) priorities.
"""
self.logger.debug('Registering {} for {} resources'.format(getter.name, kind))
self.getters[kind].add(getter, priority)
def unregister(self, getter, kind):
"""
Unregister a getter that has been registered earlier.
"""
self.logger.debug('Unregistering {}'.format(getter.name))
try:
self.getters[kind].remove(getter)
except ValueError:
raise ValueError('Resource getter {} is not installed.'.format(getter.name))

View File

@ -1,185 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.core.configuration import settings
from wlauto.core.plugin import Plugin
class GetterPriority(object):
"""
Enumerates standard ResourceGetter priorities. In general, getters should register
under one of these, rather than specifying other priority values.
:cached: The cached version of the resource. Look here first. This priority also implies
that the resource at this location is a "cache" and is not the only version of the
resource, so it may be cleared without losing access to the resource.
:preferred: Take this resource in favour of the environment resource.
:environment: Found somewhere under ~/.workload_automation/ or equivalent, or
from environment variables, external configuration files, etc.
These will override resource supplied with the package.
:external_package: Resource provided by another package.
:package: Resource provided with the package.
:remote: Resource will be downloaded from a remote location (such as an HTTP server
or a samba share). Try this only if no other getter was successful.
"""
cached = 20
preferred = 10
remote = 5
environment = 0
external_package = -5
package = -10
class Resource(object):
"""
Represents a resource that needs to be resolved. This can be pretty much
anything: a file, environment variable, a Python object, etc. The only thing
a resource *has* to have is an owner (which would normally be the
Workload/Instrument/Device/etc object that needs the resource). In addition,
a resource have any number of attributes to identify, but all of them are resource
type specific.
"""
name = None
def __init__(self, owner):
self.owner = owner
def delete(self, instance):
"""
Delete an instance of this resource type. This must be implemented by the concrete
subclasses based on what the resource looks like, e.g. deleting a file or a directory
tree, or removing an entry from a database.
:note: Implementation should *not* contain any logic for deciding whether or not
a resource should be deleted, only the actual deletion. The assumption is
that if this method is invoked, then the decision has already been made.
"""
raise NotImplementedError()
def __str__(self):
return '<{}\'s {}>'.format(self.owner, self.name)
class ResourceGetter(Plugin):
"""
Base class for implementing resolvers. Defines resolver interface. Resolvers are
responsible for discovering resources (such as particular kinds of files) they know
about based on the parameters that are passed to them. Each resolver also has a dict of
attributes that describe its operation, and may be used to determine which get invoked.
There is no pre-defined set of attributes and resolvers may define their own.
Class attributes:
:name: Name that uniquely identifies this getter. Must be set by any concrete subclass.
:resource_type: Identifies resource type(s) that this getter can handle. This must
be either a string (for a single type) or a list of strings for
multiple resource types. This must be set by any concrete subclass.
:priority: Priority with which this getter will be invoked. This should be one of
the standard priorities specified in ``GetterPriority`` enumeration. If not
set, this will default to ``GetterPriority.environment``.
"""
kind = "resource_getter"
name = None
resource_type = None
priority = GetterPriority.environment
def __init__(self, resolver=None, **kwargs):
super(ResourceGetter, self).__init__(**kwargs)
self.resolver = resolver
def register(self):
"""
Registers with a resource resolver. Concrete implementations must override this
to invoke ``self.resolver.register()`` method to register ``self`` for specific
resource types.
"""
if self.resource_type is None:
raise ValueError('No resource type specified for {}'.format(self.name))
elif isinstance(self.resource_type, list):
for rt in self.resource_type:
self.resolver.register(self, rt, self.priority)
else:
self.resolver.register(self, self.resource_type, self.priority)
def unregister(self):
"""Unregister from a resource resolver."""
if self.resource_type is None:
raise ValueError('No resource type specified for {}'.format(self.name))
elif isinstance(self.resource_type, list):
for rt in self.resource_type:
self.resolver.unregister(self, rt)
else:
self.resolver.unregister(self, self.resource_type)
def get(self, resource, **kwargs):
"""
This will get invoked by the resolver when attempting to resolve a resource, passing
in the resource to be resolved as the first parameter. Any additional parameters would
be specific to a particular resource type.
This method will only be invoked for resource types that the getter has registered for.
:param resource: an instance of :class:`wlauto.core.resource.Resource`.
:returns: Implementations of this method must return either the discovered resource or
``None`` if the resource could not be discovered.
"""
raise NotImplementedError()
def delete(self, resource, *args, **kwargs):
"""
Delete the resource if it is discovered. All arguments are passed to a call
to``self.get()``. If that call returns a resource, it is deleted.
:returns: ``True`` if the specified resource has been discovered and deleted,
and ``False`` otherwise.
"""
discovered = self.get(resource, *args, **kwargs)
if discovered:
resource.delete(discovered)
return True
else:
return False
def __str__(self):
return '<ResourceGetter {}>'.format(self.name)
class __NullOwner(object):
"""Represents an owner for a resource not owned by anyone."""
name = 'noone'
dependencies_directory = settings.dependencies_directory
def __getattr__(self, name):
return None
def __str__(self):
return 'no-one'
__repr__ = __str__
NO_ONE = __NullOwner()

View File

@ -1,319 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=no-member
"""
This module defines the classes used to handle result
processing inside Workload Automation. There will be a
:class:`wlauto.core.workload.WorkloadResult` object generated for
every workload iteration executed. This object will have a list of
:class:`wlauto.core.workload.WorkloadMetric` objects. This list will be
populated by the workload itself and may also be updated by instrumentation
(e.g. to add power measurements). Once the result object has been fully
populated, it will be passed into the ``process_iteration_result`` method of
:class:`ResultProcessor`. Once the entire run has completed, a list containing
result objects from all iterations will be passed into ``process_results``
method of :class`ResultProcessor`.
Which result processors will be active is defined by the ``result_processors``
list in the ``~/.workload_automation/config.py``. Only the result_processors
who's names appear in this list will be used.
A :class:`ResultsManager` keeps track of active results processors.
"""
import logging
import traceback
from copy import copy
from contextlib import contextmanager
from datetime import datetime
from wlauto.core.plugin import Plugin
from wlauto.core.configuration.configuration import ITERATION_STATUS
from wlauto.exceptions import WAError
from wlauto.utils.types import numeric
from wlauto.utils.misc import enum_metaclass, merge_dicts_simple
class ResultManager(object):
"""
Keeps track of result processors and passes on the results onto the individual processors.
"""
def __init__(self):
self.logger = logging.getLogger('ResultsManager')
self.processors = []
self._bad = []
def install(self, processor):
self.logger.debug('Installing results processor %s', processor.name)
self.processors.append(processor)
def uninstall(self, processor):
if processor in self.processors:
self.logger.debug('Uninstalling results processor %s', processor.name)
self.processors.remove(processor)
else:
self.logger.warning('Attempting to uninstall results processor %s, which is not installed.',
processor.name)
def initialize(self, context):
# Errors aren't handled at this stage, because this gets executed
# before workload execution starts and we just want to propagte them
# and terminate (so that error can be corrected and WA restarted).
for processor in self.processors:
processor.initialize(context)
def add_result(self, result, context):
with self._manage_processors(context):
for processor in self.processors:
with self._handle_errors(processor):
processor.process_iteration_result(result, context)
for processor in self.processors:
with self._handle_errors(processor):
processor.export_iteration_result(result, context)
def process_run_result(self, result, context):
with self._manage_processors(context):
for processor in self.processors:
with self._handle_errors(processor):
processor.process_run_result(result, context)
for processor in self.processors:
with self._handle_errors(processor):
processor.export_run_result(result, context)
def finalize(self, context):
with self._manage_processors(context):
for processor in self.processors:
with self._handle_errors(processor):
processor.finalize(context)
def validate(self):
for processor in self.processors:
processor.validate()
@contextmanager
def _manage_processors(self, context, finalize_bad=True):
yield
for processor in self._bad:
if finalize_bad:
processor.finalize(context)
self.uninstall(processor)
self._bad = []
@contextmanager
def _handle_errors(self, processor):
try:
yield
except KeyboardInterrupt, e:
raise e
except WAError, we:
self.logger.error('"{}" result processor has encountered an error'.format(processor.name))
self.logger.error('{}("{}")'.format(we.__class__.__name__, we.message))
self._bad.append(processor)
except Exception, e: # pylint: disable=W0703
self.logger.error('"{}" result processor has encountered an error'.format(processor.name))
self.logger.error('{}("{}")'.format(e.__class__.__name__, e))
self.logger.error(traceback.format_exc())
self._bad.append(processor)
class ResultProcessor(Plugin):
"""
Base class for result processors. Defines an interface that should be implemented
by the subclasses. A result processor can be used to do any kind of post-processing
of the results, from writing them out to a file, to uploading them to a database,
performing calculations, generating plots, etc.
"""
kind = "result_processor"
def initialize(self, context):
pass
def process_iteration_result(self, result, context):
pass
def export_iteration_result(self, result, context):
pass
def process_run_result(self, result, context):
pass
def export_run_result(self, result, context):
pass
def finalize(self, context):
pass
class RunResult(object):
"""
Contains overall results for a run.
"""
__metaclass__ = enum_metaclass('values', return_name=True)
values = [
'OK',
'OKISH',
'PARTIAL',
'FAILED',
'UNKNOWN',
]
@property
def status(self):
if not self.iteration_results or all([s.status == IterationResult.FAILED for s in self.iteration_results]):
return self.FAILED
elif any([s.status == IterationResult.FAILED for s in self.iteration_results]):
return self.PARTIAL
elif any([s.status == IterationResult.ABORTED for s in self.iteration_results]):
return self.PARTIAL
elif (any([s.status == IterationResult.PARTIAL for s in self.iteration_results]) or
self.non_iteration_errors):
return self.OKISH
elif all([s.status == IterationResult.OK for s in self.iteration_results]):
return self.OK
else:
return self.UNKNOWN # should never happen
def __init__(self, run_info, output_directory=None):
self.info = run_info
self.iteration_results = []
self.artifacts = []
self.events = []
self.non_iteration_errors = False
self.output_directory = output_directory
class RunEvent(object):
"""
An event that occured during a run.
"""
def __init__(self, message):
self.timestamp = datetime.utcnow()
self.message = message
def to_dict(self):
return copy(self.__dict__)
def __str__(self):
return '{} {}'.format(self.timestamp, self.message)
__repr__ = __str__
class IterationResult(object):
"""
Contains the result of running a single iteration of a workload. It is the
responsibility of a workload to instantiate a IterationResult, populate it,
and return it form its get_result() method.
Status explanations:
:NOT_STARTED: This iteration has not yet started.
:RUNNING: This iteration is currently running and no errors have been detected.
:OK: This iteration has completed and no errors have been detected
:PARTIAL: One or more instruments have failed (the iteration may still be running).
:FAILED: The workload itself has failed.
:ABORTED: The user interupted the workload
:SKIPPED: The iteration was skipped due to a previous failure
"""
__metaclass__ = enum_metaclass('values', return_name=True)
values = ITERATION_STATUS
def __init__(self, spec):
self.spec = spec
self.id = spec.id
self.workload = spec.workload
self.classifiers = copy(spec.classifiers)
self.iteration = None
self.status = self.NOT_STARTED
self.output_directory = None
self.events = []
self.metrics = []
self.artifacts = []
def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
self.metrics.append(Metric(name, value, units, lower_is_better,
merge_dicts_simple(self.classifiers, classifiers)))
def has_metric(self, name):
for metric in self.metrics:
if metric.name == name:
return True
return False
def add_event(self, message):
self.events.append(RunEvent(message))
def to_dict(self):
d = copy(self.__dict__)
d['events'] = [e.to_dict() for e in self.events]
return d
def __iter__(self):
return iter(self.metrics)
def __getitem__(self, name):
for metric in self.metrics:
if metric.name == name:
return metric
raise KeyError('Metric {} not found.'.format(name))
class Metric(object):
"""
This is a single metric collected from executing a workload.
:param name: the name of the metric. Uniquely identifies the metric
within the results.
:param value: The numerical value of the metric for this execution of
a workload. This can be either an int or a float.
:param units: Units for the collected value. Can be None if the value
has no units (e.g. it's a count or a standardised score).
:param lower_is_better: Boolean flag indicating where lower values are
better than higher ones. Defaults to False.
:param classifiers: A set of key-value pairs to further classify this metric
beyond current iteration (e.g. this can be used to identify
sub-tests).
"""
def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None):
self.name = name
self.value = numeric(value)
self.units = units
self.lower_is_better = lower_is_better
self.classifiers = classifiers or {}
def to_dict(self):
return self.__dict__
def __str__(self):
result = '{}: {}'.format(self.name, self.value)
if self.units:
result += ' ' + self.units
result += ' ({})'.format('-' if self.lower_is_better else '+')
return '<{}>'.format(result)
__repr__ = __str__

View File

@ -1,272 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module wraps louie signalling mechanism. It relies on modified version of loiue
that has prioritization added to handler invocation.
"""
import logging
from contextlib import contextmanager
from louie import dispatcher
from wlauto.utils.types import prioritylist
logger = logging.getLogger('dispatcher')
class Signal(object):
"""
This class implements the signals to be used for notifiying callbacks
registered to respond to different states and stages of the execution of workload
automation.
"""
def __init__(self, name, description='no description', invert_priority=False):
"""
Instantiates a Signal.
:param name: name is the identifier of the Signal object. Signal instances with
the same name refer to the same execution stage/stage.
:param invert_priority: boolean parameter that determines whether multiple
callbacks for the same signal should be ordered with
ascending or descending priorities. Typically this flag
should be set to True if the Signal is triggered AFTER an
a state/stage has been reached. That way callbacks with high
priorities will be called right after the event has occured.
"""
self.name = name
self.description = description
self.invert_priority = invert_priority
def __str__(self):
return self.name
__repr__ = __str__
def __hash__(self):
return id(self.name)
# These are paired events -- if the before_event is sent, the after_ signal is
# guaranteed to also be sent. In particular, the after_ signals will be sent
# even if there is an error, so you cannot assume in the handler that the
# device has booted successfully. In most cases, you should instead use the
# non-paired signals below.
BEFORE_FLASHING = Signal('before-flashing-signal', invert_priority=True)
SUCCESSFUL_FLASHING = Signal('successful-flashing-signal')
AFTER_FLASHING = Signal('after-flashing-signal')
BEFORE_BOOT = Signal('before-boot-signal', invert_priority=True)
SUCCESSFUL_BOOT = Signal('successful-boot-signal')
AFTER_BOOT = Signal('after-boot-signal')
BEFORE_INITIAL_BOOT = Signal('before-initial-boot-signal', invert_priority=True)
SUCCESSFUL_INITIAL_BOOT = Signal('successful-initial-boot-signal')
AFTER_INITIAL_BOOT = Signal('after-initial-boot-signal')
BEFORE_FIRST_ITERATION_BOOT = Signal('before-first-iteration-boot-signal', invert_priority=True)
SUCCESSFUL_FIRST_ITERATION_BOOT = Signal('successful-first-iteration-boot-signal')
AFTER_FIRST_ITERATION_BOOT = Signal('after-first-iteration-boot-signal')
BEFORE_WORKLOAD_SETUP = Signal('before-workload-setup-signal', invert_priority=True)
SUCCESSFUL_WORKLOAD_SETUP = Signal('successful-workload-setup-signal')
AFTER_WORKLOAD_SETUP = Signal('after-workload-setup-signal')
BEFORE_WORKLOAD_EXECUTION = Signal('before-workload-execution-signal', invert_priority=True)
SUCCESSFUL_WORKLOAD_EXECUTION = Signal('successful-workload-execution-signal')
AFTER_WORKLOAD_EXECUTION = Signal('after-workload-execution-signal')
BEFORE_WORKLOAD_RESULT_UPDATE = Signal('before-iteration-result-update-signal', invert_priority=True)
SUCCESSFUL_WORKLOAD_RESULT_UPDATE = Signal('successful-iteration-result-update-signal')
AFTER_WORKLOAD_RESULT_UPDATE = Signal('after-iteration-result-update-signal')
BEFORE_WORKLOAD_TEARDOWN = Signal('before-workload-teardown-signal', invert_priority=True)
SUCCESSFUL_WORKLOAD_TEARDOWN = Signal('successful-workload-teardown-signal')
AFTER_WORKLOAD_TEARDOWN = Signal('after-workload-teardown-signal')
BEFORE_OVERALL_RESULTS_PROCESSING = Signal('before-overall-results-process-signal', invert_priority=True)
SUCCESSFUL_OVERALL_RESULTS_PROCESSING = Signal('successful-overall-results-process-signal')
AFTER_OVERALL_RESULTS_PROCESSING = Signal('after-overall-results-process-signal')
# These are the not-paired signals; they are emitted independently. E.g. the
# fact that RUN_START was emitted does not mean run end will be.
RUN_START = Signal('start-signal', invert_priority=True)
RUN_END = Signal('end-signal')
WORKLOAD_SPEC_START = Signal('workload-spec-start-signal', invert_priority=True)
WORKLOAD_SPEC_END = Signal('workload-spec-end-signal')
ITERATION_START = Signal('iteration-start-signal', invert_priority=True)
ITERATION_END = Signal('iteration-end-signal')
RUN_INIT = Signal('run-init-signal')
SPEC_INIT = Signal('spec-init-signal')
ITERATION_INIT = Signal('iteration-init-signal')
RUN_FIN = Signal('run-fin-signal')
# These signals are used by the LoggerFilter to tell about logging events
ERROR_LOGGED = Signal('error_logged')
WARNING_LOGGED = Signal('warning_logged')
class CallbackPriority(object):
EXTREMELY_HIGH = 30
VERY_HIGH = 20
HIGH = 10
NORMAL = 0
LOW = -10
VERY_LOW = -20
EXTREMELY_LOW = -30
def __init__(self):
raise ValueError('Cannot instantiate')
class _prioritylist_wrapper(prioritylist):
"""
This adds a NOP append() method so that when louie invokes it to add the
handler to receivers, nothing will happen; the handler is actually added inside
the connect() below according to priority, before louie's connect() gets invoked.
"""
def append(self, *args, **kwargs):
pass
def connect(handler, signal, sender=dispatcher.Any, priority=0):
"""
Connects a callback to a signal, so that the callback will be automatically invoked
when that signal is sent.
Parameters:
:handler: This can be any callable that that takes the right arguments for
the signal. For most signals this means a single argument that
will be an ``ExecutionContext`` instance. But please see documentation
for individual signals in the :ref:`signals reference <instrumentation_method_map>`.
:signal: The signal to which the handler will be subscribed. Please see
:ref:`signals reference <instrumentation_method_map>` for the list of standard WA
signals.
.. note:: There is nothing that prevents instrumentation from sending their
own signals that are not part of the standard set. However the signal
must always be an :class:`wlauto.core.signal.Signal` instance.
:sender: The handler will be invoked only for the signals emitted by this sender. By
default, this is set to :class:`louie.dispatcher.Any`, so the handler will
be invoked for signals from any sender.
:priority: An integer (positive or negative) the specifies the priority of the handler.
Handlers with higher priority will be called before handlers with lower
priority. The call order of handlers with the same priority is not specified.
Defaults to 0.
.. note:: Priorities for some signals are inverted (so highest priority
handlers get executed last). Please see :ref:`signals reference <instrumentation_method_map>`
for details.
"""
if getattr(signal, 'invert_priority', False):
priority = -priority
senderkey = id(sender)
if senderkey in dispatcher.connections:
signals = dispatcher.connections[senderkey]
else:
dispatcher.connections[senderkey] = signals = {}
if signal in signals:
receivers = signals[signal]
else:
receivers = signals[signal] = _prioritylist_wrapper()
receivers.add(handler, priority)
dispatcher.connect(handler, signal, sender)
def disconnect(handler, signal, sender=dispatcher.Any):
"""
Disconnect a previously connected handler form the specified signal, optionally, only
for the specified sender.
Parameters:
:handler: The callback to be disconnected.
:signal: The signal the handler is to be disconnected form. It will
be an :class:`wlauto.core.signal.Signal` instance.
:sender: If specified, the handler will only be disconnected from the signal
sent by this sender.
"""
dispatcher.disconnect(handler, signal, sender)
def send(signal, sender=dispatcher.Anonymous, *args, **kwargs):
"""
Sends a signal, causing connected handlers to be invoked.
Paramters:
:signal: Signal to be sent. This must be an instance of :class:`wlauto.core.signal.Signal`
or its subclasses.
:sender: The sender of the signal (typically, this would be ``self``). Some handlers may only
be subscribed to signals from a particular sender.
The rest of the parameters will be passed on as aruments to the handler.
"""
return dispatcher.send(signal, sender, *args, **kwargs)
# This will normally be set to log_error() by init_logging(); see wa.framework/log.py.
# Done this way to prevent a circular import dependency.
log_error_func = logger.error
def safe_send(signal, sender=dispatcher.Anonymous,
propagate=[KeyboardInterrupt], *args, **kwargs):
"""
Same as ``send``, except this will catch and log all exceptions raised
by handlers, except those specified in ``propagate`` argument (defaults
to just ``[KeyboardInterrupt]``).
"""
try:
send(singnal, sender, *args, **kwargs)
except Exception as e:
if any(isinstance(e, p) for p in propagate):
raise e
log_error_func(e)
@contextmanager
def wrap(signal_name, sender=dispatcher.Anonymous, safe=False, *args, **kwargs):
"""Wraps the suite in before/after signals, ensuring
that after signal is always sent."""
signal_name = signal_name.upper().replace('-', '_')
send_func = safe_send if safe else send
try:
before_signal = globals()['BEFORE_' + signal_name]
success_signal = globals()['SUCCESSFUL_' + signal_name]
after_signal = globals()['AFTER_' + signal_name]
except KeyError:
raise ValueError('Invalid wrapped signal name: {}'.format(signal_name))
try:
send_func(before_signal, sender, *args, **kwargs)
yield
send_func(success_signal, sender, *args, **kwargs)
finally:
send_func(after_signal, sender, *args, **kwargs)

View File

@ -1,26 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision'])
version = VersionTuple(2, 4, 0)
def get_wa_version():
version_string = '{}.{}.{}'.format(version.major, version.minor, version.revision)
return version_string

View File

@ -1,104 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A workload is the unit of execution. It represents a set of activities are are performed
and measured together, as well as the necessary setup and teardown procedures. A single
execution of a workload produces one :class:`wlauto.core.result.WorkloadResult` that is populated with zero or more
:class:`wlauto.core.result.WorkloadMetric`\ s and/or
:class:`wlauto.core.result.Artifact`\s by the workload and active instrumentation.
"""
from wlauto.core.plugin import Plugin
from wlauto.exceptions import WorkloadError
class Workload(Plugin):
"""
This is the base class for the workloads executed by the framework.
Each of the methods throwing NotImplementedError *must* be implemented
by the derived classes.
"""
kind = "workload"
supported_devices = []
supported_platforms = []
summary_metrics = []
def __init__(self, device, **kwargs):
"""
Creates a new Workload.
:param device: the Device on which the workload will be executed.
"""
super(Workload, self).__init__(**kwargs)
if self.supported_devices and device.name not in self.supported_devices:
raise WorkloadError('Workload {} does not support device {}'.format(self.name, device.name))
if self.supported_platforms and device.os not in self.supported_platforms:
raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.os))
self.device = device
def init_resources(self, context):
"""
This method may be used to perform early resource discovery and initialization. This is invoked
during the initial loading stage and before the device is ready, so cannot be used for any
device-dependent initialization. This method is invoked before the workload instance is
validated.
"""
pass
def initialize(self, context):
"""
This method should be used to perform once-per-run initialization of a workload instance, i.e.,
unlike ``setup()`` it will not be invoked on each iteration.
"""
pass
def setup(self, context):
"""
Perform the setup necessary to run the workload, such as copying the necessary files
to the device, configuring the environments, etc.
This is also the place to perform any on-device checks prior to attempting to execute
the workload.
"""
pass
def run(self, context):
"""Execute the workload. This is the method that performs the actual "work" of the"""
pass
def update_result(self, context):
"""
Update the result within the specified execution context with the metrics
form this workload iteration.
"""
pass
def teardown(self, context):
""" Perform any final clean up for the Workload. """
pass
def finalize(self, context):
pass
def __str__(self):
return '<Workload {}>'.format(self.name)

View File

@ -1,162 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.utils.misc import get_traceback
from devlib.exception import DevlibError, HostError, TargetError, TimeoutError
class WAError(Exception):
"""Base class for all Workload Automation exceptions."""
pass
class NotFoundError(WAError):
"""Raised when the specified item is not found."""
pass
class ValidationError(WAError):
"""Raised on failure to validate an plugin."""
pass
class DeviceError(WAError):
"""General Device error."""
pass
class DeviceNotRespondingError(WAError):
"""The device is not responding."""
def __init__(self, device):
super(DeviceNotRespondingError, self).__init__('Device {} is not responding.'.format(device))
class WorkloadError(WAError):
"""General Workload error."""
pass
class HostError(WAError):
"""Problem with the host on which WA is running."""
pass
class ModuleError(WAError):
"""
Problem with a module.
.. note:: Modules for specific plugin types should raise execeptions
appropriate to that plugin. E.g. a ``Device`` module should raise
``DeviceError``. This is intended for situation where a module is
unsure (and/or doesn't care) what its owner is.
"""
pass
class InstrumentError(WAError):
"""General Instrument error."""
pass
class ResultProcessorError(WAError):
"""General ResultProcessor error."""
pass
class ResourceError(WAError):
"""General Resolver error."""
pass
class CommandError(WAError):
"""Raised by commands when they have encountered an error condition
during execution."""
pass
class ToolError(WAError):
"""Raised by tools when they have encountered an error condition
during execution."""
pass
class LoaderError(WAError):
"""Raised when there is an error loading an plugin or
an external resource. Apart form the usual message, the __init__
takes an exc_info parameter which should be the result of
sys.exc_info() for the original exception (if any) that
caused the error."""
def __init__(self, message, exc_info=None):
super(LoaderError, self).__init__(message)
self.exc_info = exc_info
def __str__(self):
if self.exc_info:
orig = self.exc_info[1]
orig_name = type(orig).__name__
if isinstance(orig, WAError):
reason = 'because of:\n{}: {}'.format(orig_name, orig)
else:
reason = 'because of:\n{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
return '\n'.join([self.message, reason])
else:
return self.message
class ConfigError(WAError):
"""Raised when configuration provided is invalid. This error suggests that
the user should modify their config and try again."""
pass
class WorkerThreadError(WAError):
"""
This should get raised in the main thread if a non-WAError-derived exception occurs on
a worker/background thread. If a WAError-derived exception is raised in the worker, then
it that exception should be re-raised on the main thread directly -- the main point of this is
to preserve the backtrace in the output, and backtrace doesn't get output for WAErrors.
"""
def __init__(self, thread, exc_info):
self.thread = thread
self.exc_info = exc_info
orig = self.exc_info[1]
orig_name = type(orig).__name__
message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread)
message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
super(WorkerThreadError, self).__init__(message)
class SerializerSyntaxError(Exception):
"""
Error loading a serialized structure from/to a file handle.
"""
def __init__(self, message, line=None, column=None):
super(SerializerSyntaxError, self).__init__(message)
self.line = line
self.column = column
def __str__(self):
linestring = ' on line {}'.format(self.line) if self.line else ''
colstring = ' in column {}'.format(self.column) if self.column else ''
message = 'Syntax Error{}: {}'
return message.format(''.join([linestring, colstring]), self.message)

View File

@ -1,74 +0,0 @@
This directory contains external libraries and standalone utilities which have
been written/modified to work with Workload Automation (and thus need to be
included with WA rather than obtained from orignal sources).
bbench_server
=============
This is a small sever that is used to detect when ``bbench`` workload has completed.
``bbench`` navigates though a bunch of web pages in a browser using javascript.
It will cause the browser to sent a GET request to the port the bbench_server is
listening on, indicating the end of workload.
daq_server
==========
Contains Daq server files that will run on a Windows machine. Please refer to
daq instrument documentation.
louie (third party)
=====
Python package that is itself a fork (and now, a replacement for) pydispatcher.
This library provides a signal dispatching mechanism. This has been modified for
WA to add prioritization to callbacks.
pmu_logger
==========
Source for the kernel driver that enable the logging of CCI counters to ftrace
on periodic basis. This driver is required by the ``cci_pmu_logger`` instrument.
readenergy
==========
Outputs Juno internal energy/power/voltage/current measurments by reading APB
regesiters from memory. This is used by ``juno_energy`` instrument.
revent
======
This is a tool that is used to both record and playback key press and screen tap
events. It is used to record UI manipulation for some workloads (such as games)
where it is not possible to use the Android UI Automator.
The tools is also included in binary form in wlauto/common/. In order to build
the tool from source, you will need to have Android NDK in your PATH.
stacktracer.py (third party)
==============
A module based on an ActiveState recipe that allows tracing thread stacks during
execution of a Python program. This is used through the ``--debug`` flag in WA
to ease debuging multi-threaded parts of the code.
terminalsize.py (third party)
===============
Implements a platform-agnostic way of determining terminal window size. Taken
from a public Github gist.
uiauto
======
Contains the utilities library for UI automation.

View File

@ -1,31 +0,0 @@
#!/bin/bash
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
BUILD_COMMAND=ndk-build
if [[ $(which $BUILD_COMMAND) ]] ; then
$BUILD_COMMAND
if [[ $? ]]; then
echo Coping to ../../workloads/bbench/
cp libs/armeabi/bbench_server ../../workloads/bbench/bin/armeabi/bbench_server
fi
else
echo Please make sure you have Android NDK in your PATH.
exit 1
fi

View File

@ -1,9 +0,0 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= bbench_server.cpp
LOCAL_MODULE := bbench_server
LOCAL_MODULE_TAGS := optional
LOCAL_STATIC_LIBRARIES := libc
LOCAL_SHARED_LIBRARIES :=
include $(BUILD_EXECUTABLE)

View File

@ -1,151 +0,0 @@
/* Copyright 2012-2015 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**************************************************************************/
/* Simple HTTP server program that will return on accepting connection */
/**************************************************************************/
/* Tested on Android ICS browser and FireFox browser */
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>
#include <arpa/inet.h>
#include <sys/wait.h>
#define SERVERPORT "3030"
void ExitOnError(int condition, const char *msg)
{
if(condition) { printf("Server: %s\n", msg); exit(1);}
}
void *GetInetAddr(struct sockaddr *sa)
{
if (sa->sa_family == AF_INET)
{
return &(((struct sockaddr_in*)sa)->sin_addr);
}
else
{
return &(((struct sockaddr_in6*)sa)->sin6_addr);
}
}
int main(int argc, char *argv[])
{
socklen_t addr_size;
struct addrinfo hints, *res;
int server_fd, client_fd;
int retval;
int timeout_in_seconds;
// Get the timeout value in seconds
if(argc < 2)
{
printf("Usage %s <timeout in seconds>\n", argv[0]);
exit(1);
}
else
{
timeout_in_seconds = atoi(argv[1]);
printf("Server: Waiting for connection on port %s with timeout of %d seconds\n", SERVERPORT, timeout_in_seconds);
}
/**************************************************************************/
/* Listen to a socket */
/**************************************************************************/
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_UNSPEC; // use IPv4 or IPv6, whichever
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE; // fill in my IP for me
getaddrinfo(NULL, SERVERPORT, &hints, &res);
server_fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
ExitOnError(server_fd < 0, "Socket creation failed");
retval = bind(server_fd, res->ai_addr, res->ai_addrlen);
ExitOnError(retval < 0, "Bind failed");
retval = listen(server_fd, 10);
ExitOnError(retval < 0, "Listen failed");
/**************************************************************************/
/* Wait for connection to arrive or time out */
/**************************************************************************/
fd_set readfds;
FD_ZERO(&readfds);
FD_SET(server_fd, &readfds);
// Timeout parameter
timeval tv;
tv.tv_sec = timeout_in_seconds;
tv.tv_usec = 0;
int ret = select(server_fd+1, &readfds, NULL, NULL, &tv);
ExitOnError(ret <= 0, "No connection established, timed out");
ExitOnError(FD_ISSET(server_fd, &readfds) == 0, "Error occured in select");
/**************************************************************************/
/* Accept connection and print the information */
/**************************************************************************/
{
struct sockaddr_storage client_addr;
char client_addr_string[INET6_ADDRSTRLEN];
addr_size = sizeof client_addr;
client_fd = accept(server_fd, (struct sockaddr *)&client_addr, &addr_size);
ExitOnError(client_fd < 0, "Accept failed");
inet_ntop(client_addr.ss_family,
GetInetAddr((struct sockaddr *)&client_addr),
client_addr_string,
sizeof client_addr_string);
printf("Server: Received connection from %s\n", client_addr_string);
}
/**************************************************************************/
/* Send a acceptable HTTP response */
/**************************************************************************/
{
char response[] = "HTTP/1.1 200 OK\r\n"
"Content-Type: text/html\r\n"
"Connection: close\r\n"
"\r\n"
"<html>"
"<head>Local Server: Connection Accepted</head>"
"<body></body>"
"</html>";
int bytes_sent;
bytes_sent = send(client_fd, response, strlen(response), 0);
ExitOnError(bytes_sent < 0, "Sending Response failed");
}
close(client_fd);
close(server_fd);
return 0;
}

Binary file not shown.

View File

@ -1,25 +0,0 @@
#!/bin/bash
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
python setup.py sdist
rm -rf build
rm -f MANIFEST
if [[ -d dist ]]; then
mv dist/*.tar.gz ..
rm -rf dist
fi
find . -iname \*.pyc -delete

View File

@ -1,17 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '1.0.5'

View File

@ -1,380 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101,E1103,wrong-import-position
import os
import sys
from twisted.internet import reactor
from twisted.internet.protocol import Protocol, ClientFactory, ReconnectingClientFactory
from twisted.internet.error import ConnectionLost, ConnectionDone
from twisted.protocols.basic import LineReceiver
if __name__ == '__main__': # for debugging
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from daqpower import log
from daqpower.common import DaqServerRequest, DaqServerResponse, Status
from daqpower.config import get_config_parser
__all__ = ['execute_command', 'run_send_command', 'Status']
class Command(object):
def __init__(self, name, **params):
self.name = name
self.params = params
class CommandResult(object):
def __init__(self):
self.status = None
self.message = None
self.data = None
def __str__(self):
return '{} {}'.format(self.status, self.message)
class CommandExecutorProtocol(Protocol):
def __init__(self, command, timeout=10, retries=1):
self.command = command
self.sent_request = None
self.waiting_for_response = False
self.keep_going = None
self.ports_to_pull = None
self.factory = None
self.timeoutCallback = None
self.timeout = timeout
self.retries = retries
self.retry_count = 0
def connectionMade(self):
if self.command.name == 'get_data':
self.sendRequest('list_port_files')
else:
self.sendRequest(self.command.name, **self.command.params)
def connectionLost(self, reason=ConnectionDone):
if isinstance(reason, ConnectionLost):
self.errorOut('connection lost: {}'.format(reason))
elif self.waiting_for_response:
self.errorOut('Server closed connection without sending a response.')
else:
log.debug('connection terminated.')
def sendRequest(self, command, **params):
self.sent_request = DaqServerRequest(command, params)
request_string = self.sent_request.serialize()
log.debug('sending request: {}'.format(request_string))
self.transport.write(''.join([request_string, '\r\n']))
self.timeoutCallback = reactor.callLater(self.timeout, self.requestTimedOut)
self.waiting_for_response = True
def dataReceived(self, data):
self.keep_going = False
if self.waiting_for_response:
self.waiting_for_response = False
self.timeoutCallback.cancel()
try:
response = DaqServerResponse.deserialize(data)
except Exception, e: # pylint: disable=W0703
self.errorOut('Invalid response: {} ({})'.format(data, e))
else:
if response.status != Status.ERROR:
self.processResponse(response) # may set self.keep_going
if not self.keep_going:
self.commandCompleted(response.status, response.message, response.data)
else:
self.errorOut(response.message)
else:
self.errorOut('unexpected data received: {}\n'.format(data))
def processResponse(self, response):
if self.sent_request.command in ['list_ports', 'list_port_files']:
self.processPortsResponse(response)
elif self.sent_request.command == 'list_devices':
self.processDevicesResponse(response)
elif self.sent_request.command == 'pull':
self.processPullResponse(response)
def processPortsResponse(self, response):
if 'ports' not in response.data:
self.errorOut('Response did not containt ports data: {} ({}).'.format(response, response.data))
ports = response.data['ports']
response.data = ports
if self.command.name == 'get_data':
if ports:
self.ports_to_pull = ports
self.sendPullRequest(self.ports_to_pull.pop())
else:
response.status = Status.OKISH
response.message = 'No ports were returned.'
def processDevicesResponse(self, response):
if response.status == Status.OK:
if 'devices' not in response.data:
self.errorOut('Response did not containt devices data: {} ({}).'.format(response, response.data))
devices = response.data['devices']
response.data = devices
def sendPullRequest(self, port_id):
self.sendRequest('pull', port_id=port_id)
self.keep_going = True
def processPullResponse(self, response):
if 'port_number' not in response.data:
self.errorOut('Response does not contain port number: {} ({}).'.format(response, response.data))
port_number = response.data.pop('port_number')
filename = self.sent_request.params['port_id'] + '.csv'
self.factory.initiateFileTransfer(filename, port_number)
if self.ports_to_pull:
self.sendPullRequest(self.ports_to_pull.pop())
def commandCompleted(self, status, message=None, data=None):
self.factory.result.status = status
self.factory.result.message = message
self.factory.result.data = data
self.transport.loseConnection()
def requestTimedOut(self):
self.retry_count += 1
if self.retry_count > self.retries:
self.errorOut("Request timed out; server failed to respond.")
else:
log.debug('Retrying...')
self.connectionMade()
def errorOut(self, message):
self.factory.errorOut(message)
class CommandExecutorFactory(ClientFactory):
protocol = CommandExecutorProtocol
wait_delay = 1
def __init__(self, config, command, timeout=10, retries=1):
self.config = config
self.command = command
self.timeout = timeout
self.retries = retries
self.result = CommandResult()
self.done = False
self.transfers_in_progress = {}
if command.name == 'get_data':
if 'output_directory' not in command.params:
self.errorOut('output_directory not specifed for get_data command.')
self.output_directory = command.params['output_directory']
if not os.path.isdir(self.output_directory):
log.debug('Creating output directory {}'.format(self.output_directory))
os.makedirs(self.output_directory)
def buildProtocol(self, addr):
protocol = CommandExecutorProtocol(self.command, self.timeout, self.retries)
protocol.factory = self
return protocol
def initiateFileTransfer(self, filename, port):
log.debug('Downloading {} from port {}'.format(filename, port))
filepath = os.path.join(self.output_directory, filename)
session = FileReceiverFactory(filepath, self)
connector = reactor.connectTCP(self.config.host, port, session)
self.transfers_in_progress[session] = connector
def transferComplete(self, session):
connector = self.transfers_in_progress[session]
log.debug('Transfer on port {} complete.'.format(connector.port))
del self.transfers_in_progress[session]
def clientConnectionLost(self, connector, reason):
if self.transfers_in_progress:
log.debug('Waiting for the transfer(s) to complete.')
self.waitForTransfersToCompleteAndExit()
def clientConnectionFailed(self, connector, reason):
self.result.status = Status.ERROR
self.result.message = 'Could not connect to server.'
self.waitForTransfersToCompleteAndExit()
def waitForTransfersToCompleteAndExit(self):
if self.transfers_in_progress:
reactor.callLater(self.wait_delay, self.waitForTransfersToCompleteAndExit)
else:
log.debug('Stopping the reactor.')
reactor.stop()
def errorOut(self, message):
self.result.status = Status.ERROR
self.result.message = message
reactor.crash()
def __str__(self):
return '<CommandExecutorProtocol {}>'.format(self.command.name)
__repr__ = __str__
class FileReceiver(LineReceiver): # pylint: disable=W0223
def __init__(self, path):
self.path = path
self.fh = None
self.factory = None
def connectionMade(self):
if os.path.isfile(self.path):
log.warning('overriding existing file.')
os.remove(self.path)
self.fh = open(self.path, 'w')
def connectionLost(self, reason=ConnectionDone):
if self.fh:
self.fh.close()
def lineReceived(self, line):
line = line.rstrip('\r\n') + '\n'
self.fh.write(line)
class FileReceiverFactory(ReconnectingClientFactory):
def __init__(self, path, owner):
self.path = path
self.owner = owner
def buildProtocol(self, addr):
protocol = FileReceiver(self.path)
protocol.factory = self
self.resetDelay()
return protocol
def clientConnectionLost(self, conector, reason):
if isinstance(reason, ConnectionLost):
log.error('Connection lost: {}'.format(reason))
ReconnectingClientFactory.clientConnectionLost(self, conector, reason)
else:
self.owner.transferComplete(self)
def clientConnectionFailed(self, conector, reason):
if isinstance(reason, ConnectionLost):
log.error('Connection failed: {}'.format(reason))
ReconnectingClientFactory.clientConnectionFailed(self, conector, reason)
def __str__(self):
return '<FileReceiver {}>'.format(self.path)
__repr__ = __str__
def execute_command(server_config, command, **kwargs):
before_fds = _get_open_fds() # see the comment in the finally clause below
if isinstance(command, basestring):
command = Command(command, **kwargs)
timeout = 300 if command.name in ['stop', 'pull'] else 10
factory = CommandExecutorFactory(server_config, command, timeout)
# reactors aren't designed to be re-startable. In order to be
# able to call execute_command multiple times, we need to froce
# re-installation of the reactor; hence this hackery.
# TODO: look into implementing restartable reactors. According to the
# Twisted FAQ, there is no good reason why there isn't one:
# http://twistedmatrix.com/trac/wiki/FrequentlyAskedQuestions#WhycanttheTwistedsreactorberestarted
from twisted.internet import default
del sys.modules['twisted.internet.reactor']
default.install()
global reactor # pylint: disable=W0603
reactor = sys.modules['twisted.internet.reactor']
try:
reactor.connectTCP(server_config.host, server_config.port, factory)
reactor.run()
return factory.result
finally:
# re-startable reactor hack part 2.
# twisted hijacks SIGINT and doesn't bother to un-hijack it when the reactor
# stops. So we have to do it for it *rolls eye*.
import signal
signal.signal(signal.SIGINT, signal.default_int_handler)
# OK, the reactor is also leaking file descriptors. Tracking down all
# of them is non trivial, so instead we're just comparing the before
# and after lists of open FDs for the current process, and closing all
# new ones, as execute_command should never leave anything open after
# it exits (even when downloading data files from the server).
# TODO: This is way too hacky even compared to the rest of this function.
# Additionally, the current implementation ties this to UNIX,
# so in the long run, we need to do this properly and get the FDs
# from the reactor.
after_fds = _get_open_fds()
for fd in after_fds - before_fds:
try:
os.close(int(fd[1:]))
except OSError:
pass
# Below is the alternative code that gets FDs from the reactor, however
# at the moment it doesn't seem to get everything, which is why code
# above is used instead.
#for fd in readtor._selectables:
# os.close(fd)
#reactor._poller.close()
def _get_open_fds():
if os.name == 'posix':
import subprocess
pid = os.getpid()
procs = subprocess.check_output(["lsof", '-w', '-Ff', "-p", str(pid)])
return set(procs.split())
else:
# TODO: Implement the Windows equivalent.
return []
def run_send_command():
"""Main entry point when running as a script -- should not be invoked form another module."""
parser = get_config_parser()
parser.add_argument('command')
parser.add_argument('-o', '--output-directory', metavar='DIR', default='.',
help='Directory used to output data files (defaults to the current directory).')
parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False)
args = parser.parse_args()
if not args.device_config.labels:
args.device_config.labels = ['PORT_{}'.format(i) for i in xrange(len(args.device_config.resistor_values))]
if args.verbose:
log.start_logging('DEBUG')
else:
log.start_logging('INFO', fmt='%(levelname)-8s %(message)s')
if args.command == 'configure':
args.device_config.validate()
command = Command(args.command, config=args.device_config)
elif args.command == 'get_data':
command = Command(args.command, output_directory=args.output_directory)
else:
command = Command(args.command)
result = execute_command(args.server_config, command)
print result
if result.data:
print result.data
if __name__ == '__main__':
run_send_command()

View File

@ -1,103 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import json
class Serializer(json.JSONEncoder):
def default(self, o): # pylint: disable=E0202
if isinstance(o, Serializable):
return o.serialize()
if isinstance(o, EnumEntry):
return o.name
return json.JSONEncoder.default(self, o)
class Serializable(object):
@classmethod
def deserialize(cls, text):
return cls(**json.loads(text))
def serialize(self, d=None):
if d is None:
d = self.__dict__
return json.dumps(d, cls=Serializer)
class DaqServerRequest(Serializable):
def __init__(self, command, params=None): # pylint: disable=W0231
self.command = command
self.params = params or {}
class DaqServerResponse(Serializable):
def __init__(self, status, message=None, data=None): # pylint: disable=W0231
self.status = status
self.message = message.strip().replace('\r\n', ' ') if message else ''
self.data = data or {}
def __str__(self):
return '{} {}'.format(self.status, self.message or '')
class EnumEntry(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __cmp__(self, other):
return cmp(self.name, str(other))
class Enum(object):
"""
Assuming MyEnum = Enum('A', 'B'),
MyEnum.A and MyEnum.B are valid values.
a = MyEnum.A
(a == MyEnum.A) == True
(a in MyEnum) == True
MyEnum('A') == MyEnum.A
str(MyEnum.A) == 'A'
"""
def __init__(self, *args):
for a in args:
setattr(self, a, EnumEntry(a))
def __call__(self, value):
if value not in self.__dict__:
raise ValueError('Not enum value: {}'.format(value))
return self.__dict__[value]
def __iter__(self):
for e in self.__dict__:
yield self.__dict__[e]
Status = Enum('OK', 'OKISH', 'ERROR')

View File

@ -1,153 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from daqpower.common import Serializable
class ConfigurationError(Exception):
"""Raised when configuration passed into DaqServer is invaid."""
pass
class DeviceConfiguration(Serializable):
"""Encapulates configuration for the DAQ, typically, passed from
the client."""
valid_settings = ['device_id', 'v_range', 'dv_range', 'sampling_rate', 'resistor_values', 'labels']
default_device_id = 'Dev1'
default_v_range = 2.5
default_dv_range = 0.2
default_sampling_rate = 10000
# Channel map used in DAQ 6363 and similar.
default_channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23)
@property
def number_of_ports(self):
return len(self.resistor_values)
def __init__(self, **kwargs): # pylint: disable=W0231
try:
self.device_id = kwargs.pop('device_id') or self.default_device_id
self.v_range = float(kwargs.pop('v_range') or self.default_v_range)
self.dv_range = float(kwargs.pop('dv_range') or self.default_dv_range)
self.sampling_rate = int(kwargs.pop('sampling_rate') or self.default_sampling_rate)
self.resistor_values = kwargs.pop('resistor_values') or []
self.channel_map = kwargs.pop('channel_map') or self.default_channel_map
self.labels = (kwargs.pop('labels') or
['PORT_{}.csv'.format(i) for i in xrange(len(self.resistor_values))])
except KeyError, e:
raise ConfigurationError('Missing config: {}'.format(e.message))
if kwargs:
raise ConfigurationError('Unexpected config: {}'.format(kwargs))
def validate(self):
if not self.number_of_ports:
raise ConfigurationError('No resistor values were specified.')
if len(self.resistor_values) != len(self.labels):
message = 'The number of resistors ({}) does not match the number of labels ({})'
raise ConfigurationError(message.format(len(self.resistor_values), len(self.labels)))
def __str__(self):
return self.serialize()
__repr__ = __str__
class ServerConfiguration(object):
"""Client-side server configuration."""
valid_settings = ['host', 'port']
default_host = '127.0.0.1'
default_port = 45677
def __init__(self, **kwargs):
self.host = kwargs.pop('host', None) or self.default_host
self.port = kwargs.pop('port', None) or self.default_port
if kwargs:
raise ConfigurationError('Unexpected config: {}'.format(kwargs))
def validate(self):
if not self.host:
raise ConfigurationError('Server host not specified.')
if not self.port:
raise ConfigurationError('Server port not specified.')
elif not isinstance(self.port, int):
raise ConfigurationError('Server port must be an integer.')
class UpdateDeviceConfig(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setting = option_string.strip('-').replace('-', '_')
if setting not in DeviceConfiguration.valid_settings:
raise ConfigurationError('Unkown option: {}'.format(option_string))
setattr(namespace._device_config, setting, values) # pylint: disable=protected-access
class UpdateServerConfig(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setting = option_string.strip('-').replace('-', '_')
if setting not in namespace.server_config.valid_settings:
raise ConfigurationError('Unkown option: {}'.format(option_string))
setattr(namespace.server_config, setting, values)
class ConfigNamespace(object):
class _N(object):
def __init__(self):
self.device_id = None
self.v_range = None
self.dv_range = None
self.sampling_rate = None
self.resistor_values = None
self.labels = None
self.channel_map = None
@property
def device_config(self):
return DeviceConfiguration(**self._device_config.__dict__)
def __init__(self):
self._device_config = self._N()
self.server_config = ServerConfiguration()
class ConfigArgumentParser(argparse.ArgumentParser):
def parse_args(self, *args, **kwargs):
kwargs['namespace'] = ConfigNamespace()
return super(ConfigArgumentParser, self).parse_args(*args, **kwargs)
def get_config_parser(server=True, device=True):
parser = ConfigArgumentParser()
if device:
parser.add_argument('--device-id', action=UpdateDeviceConfig)
parser.add_argument('--v-range', action=UpdateDeviceConfig, type=float)
parser.add_argument('--dv-range', action=UpdateDeviceConfig, type=float)
parser.add_argument('--sampling-rate', action=UpdateDeviceConfig, type=int)
parser.add_argument('--resistor-values', action=UpdateDeviceConfig, type=float, nargs='*')
parser.add_argument('--labels', action=UpdateDeviceConfig, nargs='*')
if server:
parser.add_argument('--host', action=UpdateServerConfig)
parser.add_argument('--port', action=UpdateServerConfig, type=int)
return parser

View File

@ -1,347 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Creates a new DAQ device class. This class assumes that there is a
DAQ connected and mapped as Dev1. It assumes a specific syndesmology on the DAQ (it is not
meant to be a generic DAQ interface). The following diagram shows the wiring for one DaqDevice
port::
Port 0
========
| A0+ <--- Vr -------------------------|
| |
| A0- <--- GND -------------------// |
| |
| A1+ <--- V+ ------------|-------V+ |
| r | |
| A1- <--- Vr --/\/\/\----| |
| | |
| | |
| |--------------------------|
========
:number_of_ports: The number of ports connected on the DAQ. Each port requires 2 DAQ Channels
one for the source voltage and one for the Voltage drop over the
resistor r (V+ - Vr) allows us to detect the current.
:resistor_value: The resistance of r. Typically a few milliOhm
:downsample: The number of samples combined to create one Power point. If set to one
each sample corresponds to one reported power point.
:sampling_rate: The rate at which DAQ takes a sample from each channel.
"""
# pylint: disable=F0401,E1101,W0621,no-name-in-module,wrong-import-position,wrong-import-order
import os
import sys
import csv
import time
import threading
from Queue import Queue, Empty
import numpy
from PyDAQmx import Task, DAQError
try:
from PyDAQmx.DAQmxFunctions import DAQmxGetSysDevNames
CAN_ENUMERATE_DEVICES = True
except ImportError: # earlier driver version
DAQmxGetSysDevNames = None
CAN_ENUMERATE_DEVICES = False
from PyDAQmx.DAQmxTypes import int32, byref, create_string_buffer
from PyDAQmx.DAQmxConstants import (DAQmx_Val_Diff, DAQmx_Val_Volts, DAQmx_Val_GroupByScanNumber, DAQmx_Val_Auto,
DAQmx_Val_Rising, DAQmx_Val_ContSamps)
try:
from PyDAQmx.DAQmxConstants import DAQmx_Val_Acquired_Into_Buffer
callbacks_supported = True
except ImportError: # earlier driver version
DAQmx_Val_Acquired_Into_Buffer = None
callbacks_supported = False
from daqpower import log
def list_available_devices():
"""Returns the list of DAQ devices visible to the driver."""
if DAQmxGetSysDevNames:
bufsize = 2048 # Should be plenty for all but the most pathalogical of situations.
buf = create_string_buffer('\000' * bufsize)
DAQmxGetSysDevNames(buf, bufsize)
return buf.value.split(',')
else:
return []
class ReadSamplesBaseTask(Task):
def __init__(self, config, consumer):
Task.__init__(self)
self.config = config
self.consumer = consumer
self.sample_buffer_size = (self.config.sampling_rate + 1) * self.config.number_of_ports * 2
self.samples_read = int32()
self.remainder = []
# create voltage channels
for i in xrange(0, 2 * self.config.number_of_ports, 2):
self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i]),
'', DAQmx_Val_Diff,
-config.v_range, config.v_range,
DAQmx_Val_Volts, None)
self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i + 1]),
'', DAQmx_Val_Diff,
-config.dv_range, config.dv_range,
DAQmx_Val_Volts, None)
# configure sampling rate
self.CfgSampClkTiming('',
self.config.sampling_rate,
DAQmx_Val_Rising,
DAQmx_Val_ContSamps,
self.config.sampling_rate)
class ReadSamplesCallbackTask(ReadSamplesBaseTask):
"""
More recent verisons of the driver (on Windows) support callbacks
"""
def __init__(self, config, consumer):
ReadSamplesBaseTask.__init__(self, config, consumer)
# register callbacks
self.AutoRegisterEveryNSamplesEvent(DAQmx_Val_Acquired_Into_Buffer, self.config.sampling_rate // 2, 0)
self.AutoRegisterDoneEvent(0)
def EveryNCallback(self):
# Note to future self: do NOT try to "optimize" this but re-using the same array and just
# zeroing it out each time. The writes happen asynchronously and if your zero it out too soon,
# you'll see a whole bunch of 0.0's in the output. If you wanna go down that route, you'll need
# cycler through several arrays and have the code that's actually doing the writing zero them out
# mark them as available to be used by this call. But, honestly, numpy array allocation does not
# appear to be a bottleneck at the moment, so the current solution is "good enough".
samples_buffer = numpy.zeros((self.sample_buffer_size,), dtype=numpy.float64)
self.ReadAnalogF64(DAQmx_Val_Auto, 0.0, DAQmx_Val_GroupByScanNumber, samples_buffer,
self.sample_buffer_size, byref(self.samples_read), None)
self.consumer.write((samples_buffer, self.samples_read.value))
def DoneCallback(self, status): # pylint: disable=W0613,R0201
return 0 # The function should return an integer
class ReadSamplesThreadedTask(ReadSamplesBaseTask):
"""
Earlier verisons of the driver (on CentOS) do not support callbacks. So need
to create a thread to periodically poll the buffer
"""
def __init__(self, config, consumer):
ReadSamplesBaseTask.__init__(self, config, consumer)
self.poller = DaqPoller(self)
def StartTask(self):
ReadSamplesBaseTask.StartTask(self)
self.poller.start()
def StopTask(self):
self.poller.stop()
ReadSamplesBaseTask.StopTask(self)
class DaqPoller(threading.Thread):
def __init__(self, task, wait_period=1):
super(DaqPoller, self).__init__()
self.task = task
self.wait_period = wait_period
self._stop_signal = threading.Event()
self.samples_buffer = numpy.zeros((self.task.sample_buffer_size,), dtype=numpy.float64)
def run(self):
while not self._stop_signal.is_set():
# Note to future self: see the comment inside EventNCallback() above
samples_buffer = numpy.zeros((self.task.sample_buffer_size,), dtype=numpy.float64)
try:
self.task.ReadAnalogF64(DAQmx_Val_Auto, self.wait_period, DAQmx_Val_GroupByScanNumber, samples_buffer,
self.task.sample_buffer_size, byref(self.task.samples_read), None)
except DAQError:
pass
self.task.consumer.write((samples_buffer, self.task.samples_read.value))
def stop(self):
self._stop_signal.set()
self.join()
class AsyncWriter(threading.Thread):
def __init__(self, wait_period=1):
super(AsyncWriter, self).__init__()
self.daemon = True
self.wait_period = wait_period
self.running = threading.Event()
self._stop_signal = threading.Event()
self._queue = Queue()
def write(self, stuff):
if self._stop_signal.is_set():
raise IOError('Attempting to writer to {} after it has been closed.'.format(self.__class__.__name__))
self._queue.put(stuff)
def do_write(self, stuff):
raise NotImplementedError()
def run(self):
self.running.set()
while True:
if self._stop_signal.is_set() and self._queue.empty():
break
try:
self.do_write(self._queue.get(block=True, timeout=self.wait_period))
except Empty:
pass # carry on
self.running.clear()
def stop(self):
self._stop_signal.set()
def wait(self):
while self.running.is_set():
time.sleep(self.wait_period)
class PortWriter(object):
def __init__(self, path):
self.path = path
self.fh = open(path, 'w', 0)
self.writer = csv.writer(self.fh)
self.writer.writerow(['power', 'voltage'])
def write(self, row):
self.writer.writerow(row)
def close(self):
self.fh.close()
def __del__(self):
self.close()
class SamplePorcessorError(Exception):
pass
class SampleProcessor(AsyncWriter):
def __init__(self, resistor_values, output_directory, labels):
super(SampleProcessor, self).__init__()
self.resistor_values = resistor_values
self.output_directory = output_directory
self.labels = labels
self.number_of_ports = len(resistor_values)
if len(self.labels) != self.number_of_ports:
message = 'Number of labels ({}) does not match number of ports ({}).'
raise SamplePorcessorError(message.format(len(self.labels), self.number_of_ports))
self.port_writers = []
def do_write(self, sample_tuple):
samples, number_of_samples = sample_tuple
for i in xrange(0, number_of_samples * self.number_of_ports * 2, self.number_of_ports * 2):
for j in xrange(self.number_of_ports):
V = float(samples[i + 2 * j])
DV = float(samples[i + 2 * j + 1])
P = V * (DV / self.resistor_values[j])
self.port_writers[j].write([P, V])
def start(self):
for label in self.labels:
port_file = self.get_port_file_path(label)
writer = PortWriter(port_file)
self.port_writers.append(writer)
super(SampleProcessor, self).start()
def stop(self):
super(SampleProcessor, self).stop()
self.wait()
for writer in self.port_writers:
writer.close()
def get_port_file_path(self, port_id):
if port_id in self.labels:
return os.path.join(self.output_directory, port_id + '.csv')
else:
raise SamplePorcessorError('Invalid port ID: {}'.format(port_id))
def __del__(self):
self.stop()
class DaqRunner(object):
@property
def number_of_ports(self):
return self.config.number_of_ports
def __init__(self, config, output_directory):
self.config = config
self.processor = SampleProcessor(config.resistor_values, output_directory, config.labels)
if callbacks_supported:
self.task = ReadSamplesCallbackTask(config, self.processor)
else:
self.task = ReadSamplesThreadedTask(config, self.processor) # pylint: disable=redefined-variable-type
self.is_running = False
def start(self):
log.debug('Starting sample processor.')
self.processor.start()
log.debug('Starting DAQ Task.')
self.task.StartTask()
self.is_running = True
log.debug('Runner started.')
def stop(self):
self.is_running = False
log.debug('Stopping DAQ Task.')
self.task.StopTask()
log.debug('Stopping sample processor.')
self.processor.stop()
log.debug('Runner stopped.')
def get_port_file_path(self, port_id):
return self.processor.get_port_file_path(port_id)
if __name__ == '__main__':
from collections import namedtuple
DeviceConfig = namedtuple('DeviceConfig', ['device_id', 'channel_map', 'resistor_values',
'v_range', 'dv_range', 'sampling_rate',
'number_of_ports', 'labels'])
channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23)
resistor_values = [0.005]
labels = ['PORT_0']
dev_config = DeviceConfig('Dev1', channel_map, resistor_values, 2.5, 0.2, 10000, len(resistor_values), labels)
if len(sys.argv) != 3:
print 'Usage: {} OUTDIR DURATION'.format(os.path.basename(__file__))
sys.exit(1)
output_directory = sys.argv[1]
duration = float(sys.argv[2])
print "Avialable devices:", list_available_devices()
runner = DaqRunner(dev_config, output_directory)
runner.start()
time.sleep(duration)
runner.stop()

View File

@ -1,58 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from twisted.python import log
__all__ = ['debug', 'info', 'warning', 'error', 'critical', 'start_logging']
debug = lambda x: log.msg(x, logLevel=logging.DEBUG)
info = lambda x: log.msg(x, logLevel=logging.INFO)
warning = lambda x: log.msg(x, logLevel=logging.WARNING)
error = lambda x: log.msg(x, logLevel=logging.ERROR)
critical = lambda x: log.msg(x, logLevel=logging.CRITICAL)
class CustomLoggingObserver(log.PythonLoggingObserver):
def __init__(self, loggerName="twisted"):
super(CustomLoggingObserver, self).__init__(loggerName)
if hasattr(self, '_newObserver'): # new vesions of Twisted
self.logger = self._newObserver.logger # pylint: disable=no-member
def emit(self, eventDict):
if 'logLevel' in eventDict:
level = eventDict['logLevel']
elif eventDict['isError']:
level = logging.ERROR
else:
# All of that just just to override this one line from
# default INFO level...
level = logging.DEBUG
text = log.textFromEventDict(eventDict)
if text is None:
return
self.logger.log(level, text)
logObserver = CustomLoggingObserver()
logObserver.start()
def start_logging(level, fmt='%(asctime)s %(levelname)-8s: %(message)s'):
logging.basicConfig(level=getattr(logging, level), format=fmt)

View File

@ -1,526 +0,0 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101,W0613,wrong-import-position
from __future__ import division
import os
import sys
import argparse
import shutil
import socket
import time
from datetime import datetime, timedelta
from zope.interface import implements
from twisted.protocols.basic import LineReceiver
from twisted.internet.protocol import Factory, Protocol
from twisted.internet import reactor, interfaces
from twisted.internet.error import ConnectionLost, ConnectionDone
if __name__ == "__main__": # for debugging
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from daqpower import log
from daqpower.config import DeviceConfiguration
from daqpower.common import DaqServerRequest, DaqServerResponse, Status
try:
from daqpower.daq import DaqRunner, list_available_devices, CAN_ENUMERATE_DEVICES
__import_error = None
except ImportError as e:
# May be using debug mode.
__import_error = e
DaqRunner = None
list_available_devices = lambda: ['Dev1']
class ProtocolError(Exception):
pass
class DummyDaqRunner(object):
"""Dummy stub used when running in debug mode."""
num_rows = 200
@property
def number_of_ports(self):
return self.config.number_of_ports
def __init__(self, config, output_directory):
log.info('Creating runner with {} {}'.format(config, output_directory))
self.config = config
self.output_directory = output_directory
self.is_running = False
def start(self):
import csv, random # pylint: disable=multiple-imports
log.info('runner started')
for i in xrange(self.config.number_of_ports):
rows = [['power', 'voltage']] + [[random.gauss(1.0, 1.0), random.gauss(1.0, 0.1)]
for _ in xrange(self.num_rows)]
with open(self.get_port_file_path(self.config.labels[i]), 'wb') as wfh:
writer = csv.writer(wfh)
writer.writerows(rows)
self.is_running = True
def stop(self):
self.is_running = False
log.info('runner stopped')
def get_port_file_path(self, port_id):
if port_id in self.config.labels:
return os.path.join(self.output_directory, '{}.csv'.format(port_id))
else:
raise Exception('Invalid port id: {}'.format(port_id))
class DaqServer(object):
def __init__(self, base_output_directory):
self.base_output_directory = os.path.abspath(base_output_directory)
if os.path.isdir(self.base_output_directory):
log.info('Using output directory: {}'.format(self.base_output_directory))
else:
log.info('Creating new output directory: {}'.format(self.base_output_directory))
os.makedirs(self.base_output_directory)
self.runner = None
self.output_directory = None
self.labels = None
def configure(self, config_string):
message = None
if self.runner:
message = 'Configuring a new session before previous session has been terminated.'
log.warning(message)
if self.runner.is_running:
self.runner.stop()
config = DeviceConfiguration.deserialize(config_string)
config.validate()
self.output_directory = self._create_output_directory()
self.labels = config.labels
log.info('Writing port files to {}'.format(self.output_directory))
self.runner = DaqRunner(config, self.output_directory)
return message
def start(self):
if self.runner:
if not self.runner.is_running:
self.runner.start()
else:
message = 'Calling start() before stop() has been called. Data up to this point will be lost.'
log.warning(message)
self.runner.stop()
self.runner.start()
return message
else:
raise ProtocolError('Start called before a session has been configured.')
def stop(self):
if self.runner:
if self.runner.is_running:
self.runner.stop()
else:
message = 'Attempting to stop() before start() was invoked.'
log.warning(message)
self.runner.stop()
return message
else:
raise ProtocolError('Stop called before a session has been configured.')
def list_devices(self): # pylint: disable=no-self-use
return list_available_devices()
def list_ports(self):
return self.labels
def list_port_files(self):
if not self.runner:
raise ProtocolError('Attempting to list port files before session has been configured.')
ports_with_files = []
for port_id in self.labels:
path = self.get_port_file_path(port_id)
if os.path.isfile(path):
ports_with_files.append(port_id)
return ports_with_files
def get_port_file_path(self, port_id):
if not self.runner:
raise ProtocolError('Attepting to get port file path before session has been configured.')
return self.runner.get_port_file_path(port_id)
def terminate(self):
message = None
if self.runner:
if self.runner.is_running:
message = 'Terminating session before runner has been stopped.'
log.warning(message)
self.runner.stop()
self.runner = None
if self.output_directory and os.path.isdir(self.output_directory):
shutil.rmtree(self.output_directory)
self.output_directory = None
log.info('Session terminated.')
else: # Runner has not been created.
message = 'Attempting to close session before it has been configured.'
log.warning(message)
return message
def _create_output_directory(self):
basename = datetime.now().strftime('%Y-%m-%d_%H%M%S%f')
dirname = os.path.join(self.base_output_directory, basename)
os.makedirs(dirname)
return dirname
def __del__(self):
if self.runner:
self.runner.stop()
def __str__(self):
return '({})'.format(self.base_output_directory)
__repr__ = __str__
class DaqControlProtocol(LineReceiver): # pylint: disable=W0223
def __init__(self, daq_server):
self.daq_server = daq_server
self.factory = None
def lineReceived(self, line):
line = line.strip()
log.info('Received: {}'.format(line))
try:
request = DaqServerRequest.deserialize(line)
except Exception, e: # pylint: disable=W0703
# PyDAQmx exceptions use "mess" rather than the standard "message"
# to pass errors...
message = getattr(e, 'mess', e.message)
self.sendError('Received bad request ({}: {})'.format(e.__class__.__name__, message))
else:
self.processRequest(request)
def processRequest(self, request):
try:
if request.command == 'configure':
self.configure(request)
elif request.command == 'start':
self.start(request)
elif request.command == 'stop':
self.stop(request)
elif request.command == 'list_devices':
self.list_devices(request)
elif request.command == 'list_ports':
self.list_ports(request)
elif request.command == 'list_port_files':
self.list_port_files(request)
elif request.command == 'pull':
self.pull_port_data(request)
elif request.command == 'close':
self.terminate(request)
else:
self.sendError('Received unknown command: {}'.format(request.command))
except Exception, e: # pylint: disable=W0703
message = getattr(e, 'mess', e.message)
self.sendError('{}: {}'.format(e.__class__.__name__, message))
def configure(self, request):
if 'config' in request.params:
result = self.daq_server.configure(request.params['config'])
if not result:
self.sendResponse(Status.OK)
else:
self.sendResponse(Status.OKISH, message=result)
else:
self.sendError('Invalid config; config string not provided.')
def start(self, request):
result = self.daq_server.start()
if not result:
self.sendResponse(Status.OK)
else:
self.sendResponse(Status.OKISH, message=result)
def stop(self, request):
result = self.daq_server.stop()
if not result:
self.sendResponse(Status.OK)
else:
self.sendResponse(Status.OKISH, message=result)
def pull_port_data(self, request):
if 'port_id' in request.params:
port_id = request.params['port_id']
port_file = self.daq_server.get_port_file_path(port_id)
if os.path.isfile(port_file):
port = self._initiate_file_transfer(port_file)
self.sendResponse(Status.OK, data={'port_number': port})
else:
self.sendError('File for port {} does not exist.'.format(port_id))
else:
self.sendError('Invalid pull request; port id not provided.')
def list_devices(self, request):
if CAN_ENUMERATE_DEVICES:
devices = self.daq_server.list_devices()
self.sendResponse(Status.OK, data={'devices': devices})
else:
message = "Server does not support DAQ device enumration"
self.sendResponse(Status.OKISH, message=message)
def list_ports(self, request):
port_labels = self.daq_server.list_ports()
self.sendResponse(Status.OK, data={'ports': port_labels})
def list_port_files(self, request):
port_labels = self.daq_server.list_port_files()
self.sendResponse(Status.OK, data={'ports': port_labels})
def terminate(self, request):
status = Status.OK
message = ''
if self.factory.transfer_sessions:
message = 'Terminating with file tranfer sessions in progress. '
log.warning(message)
for session in self.factory.transfer_sessions:
self.factory.transferComplete(session)
message += self.daq_server.terminate() or ''
if message:
status = Status.OKISH
self.sendResponse(status, message)
def sendError(self, message):
log.error(message)
self.sendResponse(Status.ERROR, message)
def sendResponse(self, status, message=None, data=None):
response = DaqServerResponse(status, message=message, data=data)
self.sendLine(response.serialize())
def sendLine(self, line):
log.info('Responding: {}'.format(line))
LineReceiver.sendLine(self, line.replace('\r\n', ''))
def _initiate_file_transfer(self, filepath):
sender_factory = FileSenderFactory(filepath, self.factory)
connector = reactor.listenTCP(0, sender_factory)
self.factory.transferInitiated(sender_factory, connector)
return connector.getHost().port
class DaqFactory(Factory):
protocol = DaqControlProtocol
check_alive_period = 5 * 60
max_transfer_lifetime = 30 * 60
def __init__(self, server, cleanup_period=24 * 60 * 60, cleanup_after_days=5):
self.server = server
self.cleanup_period = cleanup_period
self.cleanup_threshold = timedelta(cleanup_after_days)
self.transfer_sessions = {}
def buildProtocol(self, addr):
proto = DaqControlProtocol(self.server)
proto.factory = self
reactor.callLater(self.check_alive_period, self.pulse)
reactor.callLater(self.cleanup_period, self.perform_cleanup)
return proto
def clientConnectionLost(self, connector, reason):
log.msg('client connection lost: {}.'.format(reason))
if not isinstance(reason, ConnectionLost):
log.msg('ERROR: Client terminated connection mid-transfer.')
for session in self.transfer_sessions:
self.transferComplete(session)
def transferInitiated(self, session, connector):
self.transfer_sessions[session] = (time.time(), connector)
def transferComplete(self, session, reason='OK'):
if reason != 'OK':
log.error(reason)
self.transfer_sessions[session][1].stopListening()
del self.transfer_sessions[session]
def pulse(self):
"""Close down any file tranfer sessions that have been open for too long."""
current_time = time.time()
for session in self.transfer_sessions:
start_time, conn = self.transfer_sessions[session]
if (current_time - start_time) > self.max_transfer_lifetime:
message = '{} session on port {} timed out'
self.transferComplete(session, message.format(session, conn.getHost().port))
if self.transfer_sessions:
reactor.callLater(self.check_alive_period, self.pulse)
def perform_cleanup(self):
"""
Cleanup and old uncollected data files to recover disk space.
"""
log.msg('Performing cleanup of the output directory...')
base_directory = self.server.base_output_directory
current_time = datetime.now()
for entry in os.listdir(base_directory):
entry_path = os.path.join(base_directory, entry)
entry_ctime = datetime.fromtimestamp(os.path.getctime(entry_path))
existence_time = current_time - entry_ctime
if existence_time > self.cleanup_threshold:
log.debug('Removing {} (existed for {})'.format(entry, existence_time))
shutil.rmtree(entry_path)
else:
log.debug('Keeping {} (existed for {})'.format(entry, existence_time))
log.msg('Cleanup complete.')
def __str__(self):
return '<DAQ {}>'.format(self.server)
__repr__ = __str__
class FileReader(object):
implements(interfaces.IPushProducer)
def __init__(self, filepath):
self.fh = open(filepath)
self.proto = None
self.done = False
self._paused = True
def setProtocol(self, proto):
self.proto = proto
def resumeProducing(self):
if not self.proto:
raise ProtocolError('resumeProducing called with no protocol set.')
self._paused = False
try:
while not self._paused:
line = self.fh.next().rstrip('\n') + '\r\n'
self.proto.transport.write(line)
except StopIteration:
log.debug('Sent everything.')
self.stopProducing()
def pauseProducing(self):
self._paused = True
def stopProducing(self):
self.done = True
self.fh.close()
self.proto.transport.unregisterProducer()
self.proto.transport.loseConnection()
class FileSenderProtocol(Protocol):
def __init__(self, reader):
self.reader = reader
self.factory = None
def connectionMade(self):
self.transport.registerProducer(self.reader, True)
self.reader.resumeProducing()
def connectionLost(self, reason=ConnectionDone):
if self.reader.done:
self.factory.transferComplete()
else:
self.reader.pauseProducing()
self.transport.unregisterProducer()
class FileSenderFactory(Factory):
@property
def done(self):
if self.reader:
return self.reader.done
else:
return None
def __init__(self, path, owner):
self.path = os.path.abspath(path)
self.reader = None
self.owner = owner
def buildProtocol(self, addr):
if not self.reader:
self.reader = FileReader(self.path)
proto = FileSenderProtocol(self.reader)
proto.factory = self
self.reader.setProtocol(proto)
return proto
def transferComplete(self):
self.owner.transferComplete(self)
def __hash__(self):
return hash(self.path)
def __str__(self):
return '<FileSender {}>'.format(self.path)
__repr__ = __str__
def run_server():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', help='Working directory', metavar='DIR', default='.')
parser.add_argument('-p', '--port', help='port the server will listen on.',
metavar='PORT', default=45677, type=int)
parser.add_argument('-c', '--cleanup-after', type=int, default=5, metavar='DAYS',
help="""
Sever will perodically clean up data files that are older than the number of
days specfied by this parameter.
""")
parser.add_argument('--cleanup-period', type=int, default=1, metavar='DAYS',
help='Specifies how ofte the server will attempt to clean up old files.')
parser.add_argument('--debug', help='Run in debug mode (no DAQ connected).',
action='store_true', default=False)
parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False)
args = parser.parse_args()
if args.debug:
global DaqRunner # pylint: disable=W0603
DaqRunner = DummyDaqRunner
else:
if not DaqRunner:
raise __import_error # pylint: disable=raising-bad-type
if args.verbose or args.debug:
log.start_logging('DEBUG')
else:
log.start_logging('INFO')
# days to seconds
cleanup_period = args.cleanup_period * 24 * 60 * 60
server = DaqServer(args.directory)
factory = DaqFactory(server, cleanup_period, args.cleanup_after)
reactor.listenTCP(args.port, factory).getHost()
try:
hostname = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
hostname = 'localhost'
log.info('Listening on {}:{}'.format(hostname, args.port))
reactor.run()
if __name__ == "__main__":
run_server()

View File

@ -1,3 +0,0 @@
#!/usr/bin/env python
from daqpower.server import run_server
run_server()

View File

@ -1,3 +0,0 @@
#!/usr/bin/env python
from daqpower.client import run_send_command
run_send_command()

View File

@ -1,52 +0,0 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from distutils.core import setup
import daqpower
warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
params = dict(
name='daqpower',
version=daqpower.__version__,
packages=[
'daqpower',
],
scripts=[
'scripts/run-daq-server',
'scripts/send-daq-command',
],
url='N/A',
maintainer='workload-automation',
maintainer_email='workload-automation@arm.com',
install_requires=[
'twisted',
'PyDAQmx',
],
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: Other/Proprietary License',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
],
)
setup(**params)

View File

@ -1,7 +0,0 @@
# To build the pmu_logger module use the following command line
# make ARCH=arm CROSS_COMPILE=arm-linux-gnueabi- -C ../kernel/out SUBDIRS=$PWD modules
# where
# CROSS_COMPILE - prefix of the arm linux compiler
# -C - location of the configured kernel source tree
obj-m := pmu_logger.o

View File

@ -1,35 +0,0 @@
The pmu_logger module provides the ability to periodically trace CCI PMU counters. The trace destinations can be ftrace buffer and/or kernel logs. This file gives a quick overview of the funcationality provided by the module and how to use it.
The pmu_logger module creates a directory in the debugfs filesystem called cci_pmu_logger which can be used to enable/disable the counters and control the events that are counted.
To configure the events being counted write the corresponding event id to the counter* files. The list of CCI PMU events can be found at http://arminfo.emea.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0470d/CJHICFBF.html.
The "period_jiffies" can be used to control the periodicity of tracing. It accepts values in kernel jiffies.
To enable tracing, write a 1 to "control". To disable write another 1 to "control". The files "enable_console" and "enable_ftrace" control where the trace is written to. To check if the counters are currently running or not, you can read the control file.
The current values of the counters can be read from the "values" file.
Eg. To trace, A15 and A7 snoop hit rate every 10 jiffies the following command are required -
trace-cmd reset
echo 0x63 > counter0
echo 0x6A > counter1
echo 0x83 > counter2
echo 0x8A > counter3
echo 10 > period_jiffies
trace-cmd start -b 20000 -e "sched:sched_wakeup"
echo 1 > control
# perform the activity for which you would like to collect the CCI PMU trace.
trace-cmd stop && trace-cmd extract
echo 1 > control
trace-cmd report trace.dat | grep print # shows the trace of the CCI PMU counters along with the cycle counter values.

View File

@ -1,294 +0,0 @@
/* Copyright 2013-2015 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* pmu_logger.c - Kernel module to log the CCI PMU counters
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/timer.h>
#include <asm/io.h>
#define MODULE_NAME "cci_pmu_logger"
// CCI_BASE needs to be modified to point to the mapped location of CCI in
// memory on your device.
#define CCI_BASE 0x2C090000 // TC2
//#define CCI_BASE 0x10D20000
#define CCI_SIZE 0x00010000
#define PMCR 0x100
#define PMCR_CEN (1 << 0)
#define PMCR_RST (1 << 1)
#define PMCR_CCR (1 << 2)
#define PMCR_CCD (1 << 3)
#define PMCR_EX (1 << 4)
#define PMCR_DP (1 << 5)
#define CC_BASE 0x9000
#define PC0_BASE 0xA000
#define PC1_BASE 0xB000
#define PC2_BASE 0xC000
#define PC3_BASE 0xD000
#define PC_ESR 0x0
#define CNT_VALUE 0x4
#define CNT_CONTROL 0x8
#define CNT_ENABLE (1 << 0)
u32 counter0_event = 0x6A;
u32 counter1_event = 0x63;
u32 counter2_event = 0x8A;
u32 counter3_event = 0x83;
u32 enable_console = 0;
u32 enable_ftrace = 1;
void *cci_base = 0;
static struct dentry *module_debugfs_root;
static int enabled = false;
u32 delay = 10; //jiffies. This translates to 1 sample every 100 ms
struct timer_list timer;
static void call_after_delay(void)
{
timer.expires = jiffies + delay;
add_timer(&timer);
}
static void setup_and_call_after_delay(void (*fn)(unsigned long))
{
init_timer(&timer);
timer.data = (unsigned long)&timer;
timer.function = fn;
call_after_delay();
}
static void print_counter_configuration(void)
{
if (enable_ftrace)
trace_printk("Counter_0: %02x Counter_1: %02x Counter_2: %02x Counter_3: %02x\n", \
counter0_event, counter1_event, counter2_event, counter3_event);
if (enable_console)
printk("Counter_0: %02x Counter_1: %02x Counter_2: %02x Counter_3: %02x\n", \
counter0_event, counter1_event, counter2_event, counter3_event);
}
static void initialize_cci_pmu(void)
{
u32 val;
// Select the events counted
iowrite32(counter0_event, cci_base + PC0_BASE + PC_ESR);
iowrite32(counter1_event, cci_base + PC1_BASE + PC_ESR);
iowrite32(counter2_event, cci_base + PC2_BASE + PC_ESR);
iowrite32(counter3_event, cci_base + PC3_BASE + PC_ESR);
// Enable the individual PMU counters
iowrite32(CNT_ENABLE, cci_base + PC0_BASE + CNT_CONTROL);
iowrite32(CNT_ENABLE, cci_base + PC1_BASE + CNT_CONTROL);
iowrite32(CNT_ENABLE, cci_base + PC2_BASE + CNT_CONTROL);
iowrite32(CNT_ENABLE, cci_base + PC3_BASE + CNT_CONTROL);
iowrite32(CNT_ENABLE, cci_base + CC_BASE + CNT_CONTROL);
// Reset the counters and configure the Cycle Count Divider
val = ioread32(cci_base + PMCR);
iowrite32(val | PMCR_RST | PMCR_CCR | PMCR_CCD, cci_base + PMCR);
}
static void enable_cci_pmu_counters(void)
{
u32 val = ioread32(cci_base + PMCR);
iowrite32(val | PMCR_CEN, cci_base + PMCR);
}
static void disable_cci_pmu_counters(void)
{
u32 val = ioread32(cci_base + PMCR);
iowrite32(val & ~PMCR_CEN, cci_base + PMCR);
}
static void trace_values(unsigned long arg)
{
u32 cycles;
u32 counter[4];
cycles = ioread32(cci_base + CC_BASE + CNT_VALUE);
counter[0] = ioread32(cci_base + PC0_BASE + CNT_VALUE);
counter[1] = ioread32(cci_base + PC1_BASE + CNT_VALUE);
counter[2] = ioread32(cci_base + PC2_BASE + CNT_VALUE);
counter[3] = ioread32(cci_base + PC3_BASE + CNT_VALUE);
if (enable_ftrace)
trace_printk("Cycles: %08x Counter_0: %08x"
" Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \
cycles, counter[0], counter[1], counter[2], counter[3]);
if (enable_console)
printk("Cycles: %08x Counter_0: %08x"
" Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \
cycles, counter[0], counter[1], counter[2], counter[3]);
if (enabled) {
u32 val;
// Reset the counters
val = ioread32(cci_base + PMCR);
iowrite32(val | PMCR_RST | PMCR_CCR, cci_base + PMCR);
call_after_delay();
}
}
static ssize_t read_control(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
char status[16];
/* printk(KERN_DEBUG "%s\n", __func__); */
if (enabled)
snprintf(status, 16, "enabled\n");
else
snprintf(status, 16, "disabled\n");
return simple_read_from_buffer(buf, count, ppos, status, strlen(status));
}
static ssize_t write_control(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
if (enabled) {
disable_cci_pmu_counters();
enabled = false;
} else {
initialize_cci_pmu();
enable_cci_pmu_counters();
enabled = true;
print_counter_configuration();
setup_and_call_after_delay(trace_values);
}
return count;
}
static ssize_t read_values(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
char values[256];
/* u32 val; */
snprintf(values, 256, "Cycles: %08x Counter_0: %08x"
" Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \
ioread32(cci_base + CC_BASE + CNT_VALUE), \
ioread32(cci_base + PC0_BASE + CNT_VALUE), \
ioread32(cci_base + PC1_BASE + CNT_VALUE), \
ioread32(cci_base + PC2_BASE + CNT_VALUE), \
ioread32(cci_base + PC3_BASE + CNT_VALUE));
return simple_read_from_buffer(buf, count, ppos, values, strlen(values));
}
static const struct file_operations control_fops = {
.owner = THIS_MODULE,
.read = read_control,
.write = write_control,
};
static const struct file_operations value_fops = {
.owner = THIS_MODULE,
.read = read_values,
};
static int __init pmu_logger_init(void)
{
struct dentry *retval;
module_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL);
if (!module_debugfs_root || IS_ERR(module_debugfs_root)) {
printk(KERN_ERR "error creating debugfs dir.\n");
goto out;
}
retval = debugfs_create_file("control", S_IRUGO | S_IWUGO, module_debugfs_root, NULL, &control_fops);
if (!retval)
goto out;
retval = debugfs_create_file("values", S_IRUGO, module_debugfs_root, NULL, &value_fops);
if (!retval)
goto out;
retval = debugfs_create_bool("enable_console", S_IRUGO | S_IWUGO, module_debugfs_root, &enable_console);
if (!retval)
goto out;
retval = debugfs_create_bool("enable_ftrace", S_IRUGO | S_IWUGO, module_debugfs_root, &enable_ftrace);
if (!retval)
goto out;
retval = debugfs_create_u32("period_jiffies", S_IRUGO | S_IWUGO, module_debugfs_root, &delay);
if (!retval)
goto out;
retval = debugfs_create_x32("counter0", S_IRUGO | S_IWUGO, module_debugfs_root, &counter0_event);
if (!retval)
goto out;
retval = debugfs_create_x32("counter1", S_IRUGO | S_IWUGO, module_debugfs_root, &counter1_event);
if (!retval)
goto out;
retval = debugfs_create_x32("counter2", S_IRUGO | S_IWUGO, module_debugfs_root, &counter2_event);
if (!retval)
goto out;
retval = debugfs_create_x32("counter3", S_IRUGO | S_IWUGO, module_debugfs_root, &counter3_event);
if (!retval)
goto out;
cci_base = ioremap(CCI_BASE, CCI_SIZE);
if (!cci_base)
goto out;
printk(KERN_INFO "CCI PMU Logger loaded.\n");
return 0;
out:
debugfs_remove_recursive(module_debugfs_root);
return 1;
}
static void __exit pmu_logger_exit(void)
{
if (module_debugfs_root) {
debugfs_remove_recursive(module_debugfs_root);
module_debugfs_root = NULL;
}
if (cci_base)
iounmap(cci_base);
printk(KERN_INFO "CCI PMU Logger removed.\n");
}
module_init(pmu_logger_init);
module_exit(pmu_logger_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Punit Agrawal");
MODULE_DESCRIPTION("logger for CCI PMU counters");

Binary file not shown.

View File

@ -1,11 +0,0 @@
# To build:
#
# CROSS_COMPILE=aarch64-linux-gnu- make
#
CROSS_COMPILE?=aarch64-linux-gnu-
CC=$(CROSS_COMPILE)gcc
CFLAGS='-Wl,-static -Wl,-lc'
readenergy: readenergy.c
$(CC) $(CFLAGS) readenergy.c -o readenergy
cp readenergy ../../instrumentation/juno_energy/readenergy

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More