1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2025-09-02 03:12:34 +01:00

Initial commit of open source Workload Automation.

This commit is contained in:
Sergei Trofimov
2015-03-10 13:09:31 +00:00
commit a747ec7e4c
412 changed files with 41401 additions and 0 deletions

36
wlauto/__init__.py Normal file
View File

@@ -0,0 +1,36 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.core.bootstrap import settings # NOQA
from wlauto.core.device import Device, RuntimeParameter, CoreParameter # NOQA
from wlauto.core.command import Command # NOQA
from wlauto.core.workload import Workload # NOQA
from wlauto.core.extension import Module, Parameter, Artifact, Alias # NOQA
from wlauto.core.extension_loader import ExtensionLoader # NOQA
from wlauto.core.instrumentation import Instrument # NOQA
from wlauto.core.result import ResultProcessor, IterationResult # NOQA
from wlauto.core.resource import ResourceGetter, Resource, GetterPriority, NO_ONE # NOQA
from wlauto.core.exttype import get_extension_type # NOQA Note: MUST be imported after other core imports.
from wlauto.common.resources import File, ExtensionAsset, Executable
from wlauto.common.linux.device import LinuxDevice # NOQA
from wlauto.common.android.device import AndroidDevice, BigLittleDevice # NOQA
from wlauto.common.android.resources import ApkFile, JarFile
from wlauto.common.android.workload import (UiAutomatorWorkload, ApkWorkload, AndroidBenchmark, # NOQA
AndroidUiAutoBenchmark, GameWorkload) # NOQA
from wlauto.core.version import get_wa_version
__version__ = get_wa_version()

View File

@@ -0,0 +1,79 @@
# This agenda specifies configuration that may be used for regression runs
# on big.LITTLE systems. This agenda will with a TC2 device configured as
# described in the documentation.
config:
device: tc2
run_name: big.LITTLE_regression
global:
iterations: 5
sections:
- id: mp_a15only
boot_parameters:
os_mode: mp_a15_only
runtime_parameters:
a15_governor: interactive
a15_governor_tunables:
above_hispeed_delay: 20000
- id: mp_a7bc
boot_parameters:
os_mode: mp_a7_bootcluster
runtime_parameters:
a7_governor: interactive
a7_min_frequency: 500000
a7_governor_tunables:
above_hispeed_delay: 20000
a15_governor: interactive
a15_governor_tunables:
above_hispeed_delay: 20000
- id: mp_a15bc
boot_parameters:
os_mode: mp_a15_bootcluster
runtime_parameters:
a7_governor: interactive
a7_min_frequency: 500000
a7_governor_tunables:
above_hispeed_delay: 20000
a15_governor: interactive
a15_governor_tunables:
above_hispeed_delay: 20000
workloads:
- id: b01
name: andebench
workload_parameters:
number_of_threads: 5
- id: b02
name: andebench
label: andebenchst
workload_parameters:
number_of_threads: 1
- id: b03
name: antutu
label: antutu4.0.3
workload_parameters:
version: 4.0.3
- id: b04
name: benchmarkpi
- id: b05
name: caffeinemark
- id: b06
name: cfbench
- id: b07
name: geekbench
label: geekbench3
workload_parameters:
version: 3
- id: b08
name: linpack
- id: b09
name: quadrant
- id: b10
name: smartbench
- id: b11
name: sqlite
- id: b12
name: vellamo
- id: w01
name: bbench_with_audio
- id: w02
name: audio

View File

@@ -0,0 +1,43 @@
# This an agenda that is built-up during the explantion of the agenda features
# in the documentation. This should work out-of-the box on most rooted Android
# devices.
config:
project: governor_comparison
run_name: performance_vs_interactive
device: generic_android
reboot_policy: never
instrumentation: [coreutil, cpufreq]
coreutil:
threshold: 80
sysfs_extractor:
paths: [/proc/meminfo]
result_processors: [sqlite]
sqlite:
database: ~/my_wa_results.sqlite
global:
iterations: 5
sections:
- id: perf
runtime_params:
sysfile_values:
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
- id: inter
runtime_params:
sysfile_values:
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive
workloads:
- id: 01_dhry
name: dhrystone
label: dhrystone_15over6
workload_params:
threads: 6
mloops: 15
- id: 02_memc
name: memcpy
instrumentation: [sysfs_extractor]
- id: 03_cycl
name: cyclictest
iterations: 10

View File

@@ -0,0 +1,16 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

300
wlauto/commands/create.py Normal file
View File

@@ -0,0 +1,300 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import stat
import string
import textwrap
import argparse
import shutil
import getpass
from wlauto import ExtensionLoader, Command, settings
from wlauto.exceptions import CommandError
from wlauto.utils.cli import init_argument_parser
from wlauto.utils.misc import (capitalize, check_output,
ensure_file_directory_exists as _f, ensure_directory_exists as _d)
from wlauto.utils.types import identifier
from wlauto.utils.doc import format_body
__all__ = ['create_workload']
TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates')
UIAUTO_BUILD_SCRIPT = """#!/bin/bash
class_dir=bin/classes/com/arm/wlauto/uiauto
base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
mkdir -p $$class_dir
cp $$base_class $$class_dir
ant build
if [[ -f bin/${package_name}.jar ]]; then
cp bin/${package_name}.jar ..
fi
"""
class CreateSubcommand(object):
name = None
help = None
usage = None
description = None
epilog = None
formatter_class = None
def __init__(self, logger, subparsers):
self.logger = logger
self.group = subparsers
parser_params = dict(help=(self.help or self.description), usage=self.usage,
description=format_body(textwrap.dedent(self.description), 80),
epilog=self.epilog)
if self.formatter_class:
parser_params['formatter_class'] = self.formatter_class
self.parser = subparsers.add_parser(self.name, **parser_params)
init_argument_parser(self.parser) # propagate top-level options
self.initialize()
def initialize(self):
pass
class CreateWorkloadSubcommand(CreateSubcommand):
name = 'workload'
description = '''Create a new workload. By default, a basic workload template will be
used but you can use options to specify a different template.'''
def initialize(self):
self.parser.add_argument('name', metavar='NAME',
help='Name of the workload to be created')
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
help='The location at which the workload will be created. If not specified, ' +
'this defaults to "~/.workload_automation/workloads".')
self.parser.add_argument('-f', '--force', action='store_true',
help='Create the new workload even if a workload with the specified ' +
'name already exists.')
template_group = self.parser.add_mutually_exclusive_group()
template_group.add_argument('-A', '--android-benchmark', action='store_true',
help='Use android benchmark template. This template allows you to specify ' +
' an APK file that will be installed and run on the device. You should ' +
' place the APK file into the workload\'s directory at the same level ' +
'as the __init__.py.')
template_group.add_argument('-U', '--ui-automation', action='store_true',
help='Use UI automation template. This template generates a UI automation ' +
'Android project as well as the Python class. This a more general ' +
'version of the android benchmark template that makes no assumptions ' +
'about the nature of your workload, apart from the fact that you need ' +
'UI automation. If you need to install an APK, start an app on device, ' +
'etc., you will need to do that explicitly in your code.')
template_group.add_argument('-B', '--android-uiauto-benchmark', action='store_true',
help='Use android uiauto benchmark template. This generates a UI automation ' +
'project as well as a Python class. This template should be used ' +
'if you have a APK file that needs to be run on the device. You ' +
'should place the APK file into the workload\'s directory at the ' +
'same level as the __init__.py.')
def execute(self, args): # pylint: disable=R0201
where = args.path or 'local'
check_name = not args.force
if args.android_benchmark:
kind = 'android'
elif args.ui_automation:
kind = 'uiauto'
elif args.android_uiauto_benchmark:
kind = 'android_uiauto'
else:
kind = 'basic'
try:
create_workload(args.name, kind, where, check_name)
except CommandError, e:
print "ERROR:", e
class CreatePackageSubcommand(CreateSubcommand):
name = 'package'
description = '''Create a new empty Python package for WA extensions. On installation,
this package will "advertise" itself to WA so that Extensions with in it will
be loaded by WA when it runs.'''
def initialize(self):
self.parser.add_argument('name', metavar='NAME',
help='Name of the package to be created')
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
help='The location at which the new pacakge will be created. If not specified, ' +
'current working directory will be used.')
self.parser.add_argument('-f', '--force', action='store_true',
help='Create the new package even if a file or directory with the same name '
'already exists at the specified location.')
def execute(self, args): # pylint: disable=R0201
package_dir = args.path or os.path.abspath('.')
template_path = os.path.join(TEMPLATES_DIR, 'setup.template')
self.create_extensions_package(package_dir, args.name, template_path, args.force)
def create_extensions_package(self, location, name, setup_template_path, overwrite=False):
package_path = os.path.join(location, name)
if os.path.exists(package_path):
if overwrite:
self.logger.info('overwriting existing "{}"'.format(package_path))
shutil.rmtree(package_path)
else:
raise CommandError('Location "{}" already exists.'.format(package_path))
actual_package_path = os.path.join(package_path, name)
os.makedirs(actual_package_path)
setup_text = render_template(setup_template_path, {'package_name': name, 'user': getpass.getuser()})
with open(os.path.join(package_path, 'setup.py'), 'w') as wfh:
wfh.write(setup_text)
touch(os.path.join(actual_package_path, '__init__.py'))
class CreateCommand(Command):
name = 'create'
description = '''Used to create various WA-related objects (see positional arguments list for what
objects may be created).\n\nUse "wa create <object> -h" for object-specific arguments.'''
formatter_class = argparse.RawDescriptionHelpFormatter
subcmd_classes = [CreateWorkloadSubcommand, CreatePackageSubcommand]
def initialize(self):
subparsers = self.parser.add_subparsers(dest='what')
self.subcommands = [] # pylint: disable=W0201
for subcmd_cls in self.subcmd_classes:
subcmd = subcmd_cls(self.logger, subparsers)
self.subcommands.append(subcmd)
def execute(self, args):
for subcmd in self.subcommands:
if subcmd.name == args.what:
subcmd.execute(args)
break
else:
raise CommandError('Not a valid create parameter: {}'.format(args.name))
def create_workload(name, kind='basic', where='local', check_name=True, **kwargs):
if check_name:
extloader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths)
if name in [wl.name for wl in extloader.list_workloads()]:
raise CommandError('Workload with name "{}" already exists.'.format(name))
class_name = get_class_name(name)
if where == 'local':
workload_dir = _d(os.path.join(settings.environment_root, 'workloads', name))
else:
workload_dir = _d(os.path.join(where, name))
if kind == 'basic':
create_basic_workload(workload_dir, name, class_name, **kwargs)
elif kind == 'uiauto':
create_uiautomator_workload(workload_dir, name, class_name, **kwargs)
elif kind == 'android':
create_android_benchmark(workload_dir, name, class_name, **kwargs)
elif kind == 'android_uiauto':
create_android_uiauto_benchmark(workload_dir, name, class_name, **kwargs)
else:
raise CommandError('Unknown workload type: {}'.format(kind))
print 'Workload created in {}'.format(workload_dir)
def create_basic_workload(path, name, class_name):
source_file = os.path.join(path, '__init__.py')
with open(source_file, 'w') as wfh:
wfh.write(render_template('basic_workload', {'name': name, 'class_name': class_name}))
def create_uiautomator_workload(path, name, class_name):
uiauto_path = _d(os.path.join(path, 'uiauto'))
create_uiauto_project(uiauto_path, name)
source_file = os.path.join(path, '__init__.py')
with open(source_file, 'w') as wfh:
wfh.write(render_template('uiauto_workload', {'name': name, 'class_name': class_name}))
def create_android_benchmark(path, name, class_name):
source_file = os.path.join(path, '__init__.py')
with open(source_file, 'w') as wfh:
wfh.write(render_template('android_benchmark', {'name': name, 'class_name': class_name}))
def create_android_uiauto_benchmark(path, name, class_name):
uiauto_path = _d(os.path.join(path, 'uiauto'))
create_uiauto_project(uiauto_path, name)
source_file = os.path.join(path, '__init__.py')
with open(source_file, 'w') as wfh:
wfh.write(render_template('android_uiauto_benchmark', {'name': name, 'class_name': class_name}))
def create_uiauto_project(path, name, target='1'):
sdk_path = get_sdk_path()
android_path = os.path.join(sdk_path, 'tools', 'android')
package_name = 'com.arm.wlauto.uiauto.' + name.lower()
# ${ANDROID_HOME}/tools/android create uitest-project -n com.arm.wlauto.uiauto.linpack -t 1 -p ../test2
command = '{} create uitest-project --name {} --target {} --path {}'.format(android_path,
package_name,
target,
path)
check_output(command, shell=True)
build_script = os.path.join(path, 'build.sh')
with open(build_script, 'w') as wfh:
template = string.Template(UIAUTO_BUILD_SCRIPT)
wfh.write(template.substitute({'package_name': package_name}))
os.chmod(build_script, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
source_file = _f(os.path.join(path, 'src',
os.sep.join(package_name.split('.')[:-1]),
'UiAutomation.java'))
with open(source_file, 'w') as wfh:
wfh.write(render_template('UiAutomation.java', {'name': name, 'package_name': package_name}))
# Utility functions
def get_sdk_path():
sdk_path = os.getenv('ANDROID_HOME')
if not sdk_path:
raise CommandError('Please set ANDROID_HOME environment variable to point to ' +
'the locaton of Android SDK')
return sdk_path
def get_class_name(name, postfix=''):
name = identifier(name)
return ''.join(map(capitalize, name.split('_'))) + postfix
def render_template(name, params):
filepath = os.path.join(TEMPLATES_DIR, name)
with open(filepath) as fh:
text = fh.read()
template = string.Template(text)
return template.substitute(params)
def touch(path):
with open(path, 'w') as wfh: # pylint: disable=unused-variable
pass

59
wlauto/commands/list.py Normal file
View File

@@ -0,0 +1,59 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto import ExtensionLoader, Command, settings
from wlauto.utils.formatter import DescriptionListFormatter
from wlauto.utils.doc import get_summary
class ListCommand(Command):
name = 'list'
description = 'List available WA extensions with a short description of each.'
def initialize(self):
extension_types = ['{}s'.format(ext.name) for ext in settings.extensions]
self.parser.add_argument('kind', metavar='KIND',
help=('Specify the kind of extension to list. Must be '
'one of: {}'.format(', '.join(extension_types))),
choices=extension_types)
self.parser.add_argument('-n', '--name', help='Filter results by the name specified')
def execute(self, args):
filters = {}
if args.name:
filters['name'] = args.name
ext_loader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths)
results = ext_loader.list_extensions(args.kind[:-1])
if filters:
filtered_results = []
for result in results:
passed = True
for k, v in filters.iteritems():
if getattr(result, k) != v:
passed = False
break
if passed:
filtered_results.append(result)
else: # no filters specified
filtered_results = results
if filtered_results:
output = DescriptionListFormatter()
for result in sorted(filtered_results, key=lambda x: x.name):
output.add_item(get_summary(result), result.name)
print output.format_data()

87
wlauto/commands/run.py Normal file
View File

@@ -0,0 +1,87 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import shutil
import wlauto
from wlauto import Command, settings
from wlauto.core.agenda import Agenda
from wlauto.core.execution import Executor
from wlauto.utils.log import add_log_file
class RunCommand(Command):
name = 'run'
description = 'Execute automated workloads on a remote device and process the resulting output.'
def initialize(self):
self.parser.add_argument('agenda', metavar='AGENDA',
help='Agenda for this workload automation run. This defines which workloads will ' +
'be executed, how many times, with which tunables, etc. ' +
'See example agendas in {} '.format(os.path.dirname(wlauto.__file__)) +
'for an example of how this file should be structured.')
self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None,
help='Specify a directory where the output will be generated. If the directory' +
'already exists, the script will abort unless -f option (see below) is used,' +
'in which case the contents of the directory will be overwritten. If this option' +
'is not specified, then {} will be used instead.'.format(settings.output_directory))
self.parser.add_argument('-f', '--force', action='store_true',
help='Overwrite output directory if it exists. By default, the script will abort in this' +
'situation to prevent accidental data loss.')
self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID',
help='Specify a workload spec ID from an agenda to run. If this is specified, only that particular ' +
'spec will be run, and other workloads in the agenda will be ignored. This option may be used to ' +
'specify multiple IDs.')
def execute(self, args): # NOQA
self.set_up_output_directory(args)
add_log_file(settings.log_file)
if os.path.isfile(args.agenda):
agenda = Agenda(args.agenda)
settings.agenda = args.agenda
shutil.copy(args.agenda, settings.meta_directory)
else:
self.logger.debug('{} is not a file; assuming workload name.'.format(args.agenda))
agenda = Agenda()
agenda.add_workload_entry(args.agenda)
file_name = 'config_{}.py'
for file_number, path in enumerate(settings.get_config_paths(), 1):
shutil.copy(path, os.path.join(settings.meta_directory, file_name.format(file_number)))
executor = Executor()
executor.execute(agenda, selectors={'ids': args.only_run_ids})
def set_up_output_directory(self, args):
if args.output_directory:
settings.output_directory = args.output_directory
self.logger.debug('Using output directory: {}'.format(settings.output_directory))
if os.path.exists(settings.output_directory):
if args.force:
self.logger.info('Removing existing output directory.')
shutil.rmtree(settings.output_directory)
else:
self.logger.error('Output directory {} exists.'.format(settings.output_directory))
self.logger.error('Please specify another location, or use -f option to overwrite.\n')
sys.exit(1)
self.logger.info('Creating output directory.')
os.makedirs(settings.output_directory)
os.makedirs(settings.meta_directory)

101
wlauto/commands/show.py Normal file
View File

@@ -0,0 +1,101 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import subprocess
from cStringIO import StringIO
from terminalsize import get_terminal_size # pylint: disable=import-error
from wlauto import Command, ExtensionLoader, settings
from wlauto.utils.doc import (get_summary, get_description, get_type_name, format_column, format_body,
format_paragraph, indent, strip_inlined_text)
from wlauto.utils.misc import get_pager
class ShowCommand(Command):
name = 'show'
description = """
Display documentation for the specified extension (workload, instrument, etc.).
"""
def initialize(self):
self.parser.add_argument('name', metavar='EXTENSION',
help='''The name of the extension for which information will
be shown.''')
def execute(self, args):
ext_loader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths)
extension = ext_loader.get_extension_class(args.name)
out = StringIO()
term_width, term_height = get_terminal_size()
format_extension(extension, out, term_width)
text = out.getvalue()
pager = get_pager()
if len(text.split('\n')) > term_height and pager:
sp = subprocess.Popen(pager, stdin=subprocess.PIPE)
sp.communicate(text)
else:
sys.stdout.write(text)
def format_extension(extension, out, width):
format_extension_name(extension, out)
out.write('\n')
format_extension_summary(extension, out, width)
out.write('\n')
if extension.parameters:
format_extension_parameters(extension, out, width)
out.write('\n')
format_extension_description(extension, out, width)
def format_extension_name(extension, out):
out.write('\n{}\n'.format(extension.name))
def format_extension_summary(extension, out, width):
out.write('{}\n'.format(format_body(strip_inlined_text(get_summary(extension)), width)))
def format_extension_description(extension, out, width):
# skip the initial paragraph of multi-paragraph description, as already
# listed above.
description = get_description(extension).split('\n\n', 1)[-1]
out.write('{}\n'.format(format_body(strip_inlined_text(description), width)))
def format_extension_parameters(extension, out, width, shift=4):
out.write('parameters:\n\n')
param_texts = []
for param in extension.parameters:
description = format_paragraph(strip_inlined_text(param.description or ''), width - shift)
param_text = '{}'.format(param.name)
if param.mandatory:
param_text += " (MANDATORY)"
param_text += '\n{}\n'.format(description)
param_text += indent('type: {}\n'.format(get_type_name(param.kind)))
if param.allowed_values:
param_text += indent('allowed values: {}\n'.format(', '.join(map(str, param.allowed_values))))
elif param.constraint:
param_text += indent('constraint: {}\n'.format(get_type_name(param.constraint)))
if param.default:
param_text += indent('default: {}\n'.format(param.default))
param_texts.append(indent(param_text, shift))
out.write(format_column('\n'.join(param_texts), width))

View File

@@ -0,0 +1,25 @@
package ${package_name};
import android.app.Activity;
import android.os.Bundle;
import android.util.Log;
import android.view.KeyEvent;
// Import the uiautomator libraries
import com.android.uiautomator.core.UiObject;
import com.android.uiautomator.core.UiObjectNotFoundException;
import com.android.uiautomator.core.UiScrollable;
import com.android.uiautomator.core.UiSelector;
import com.android.uiautomator.testrunner.UiAutomatorTestCase;
import com.arm.wlauto.uiauto.BaseUiAutomation;
public class UiAutomation extends BaseUiAutomation {
public static String TAG = "${name}";
public void runUiAutomation() throws Exception {
// UI Automation code goes here
}
}

View File

@@ -0,0 +1,27 @@
from wlauto import AndroidBenchmark, Parameter
class ${class_name}(AndroidBenchmark):
name = '${name}'
# NOTE: Please do not leave these comments in the code.
#
# Replace with the package for the app in the APK file.
package = 'com.foo.bar'
# Replace with the full path to the activity to run.
activity = '.RunBuzz'
description = "This is an placeholder description"
parameters = [
# Workload parameters go here e.g.
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
description='This is an example parameter')
]
def run(self, context):
pass
def update_result(self, context):
super(${class_name}, self).update_result(context)
# process results and add them using
# context.result.add_metric

View File

@@ -0,0 +1,24 @@
from wlauto import AndroidUiAutoBenchmark, Parameter
class ${class_name}(AndroidUiAutoBenchmark):
name = '${name}'
# NOTE: Please do not leave these comments in the code.
#
# Replace with the package for the app in the APK file.
package = 'com.foo.bar'
# Replace with the full path to the activity to run.
activity = '.RunBuzz'
description = "This is an placeholder description"
parameters = [
# Workload parameters go here e.g.
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
description='This is an example parameter')
]
def update_result(self, context):
super(${class_name}, self).update_result(context)
# process results and add them using
# context.result.add_metric

View File

@@ -0,0 +1,28 @@
from wlauto import Workload, Parameter
class ${class_name}(Workload):
name = '${name}'
description = "This is an placeholder description"
parameters = [
# Workload parameters go here e.g.
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
description='This is an example parameter')
]
def setup(self, context):
pass
def run(self, context):
pass
def update_result(self, context):
pass
def teardown(self, context):
pass
def validate(self):
pass

View File

@@ -0,0 +1,102 @@
import os
import sys
import warnings
from multiprocessing import Process
try:
from setuptools.command.install import install as orig_install
from setuptools import setup
except ImportError:
from distutils.command.install import install as orig_install
from distutils.core import setup
try:
import pwd
except ImportError:
pwd = None
warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
try:
os.remove('MANIFEST')
except OSError:
pass
packages = []
data_files = {}
source_dir = os.path.dirname(__file__)
for root, dirs, files in os.walk('$package_name'):
rel_dir = os.path.relpath(root, source_dir)
data = []
if '__init__.py' in files:
for f in files:
if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']:
data.append(f)
package_name = rel_dir.replace(os.sep, '.')
package_dir = root
packages.append(package_name)
data_files[package_name] = data
else:
# use previous package name
filepaths = [os.path.join(root, f) for f in files]
data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])
params = dict(
name='$package_name',
version='0.0.1',
packages=packages,
package_data=data_files,
url='N/A',
maintainer='$user',
maintainer_email='$user@example.com',
install_requires=[
'wlauto',
],
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: Other/Proprietary License',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
],
)
def update_wa_packages():
sudo_user = os.getenv('SUDO_USER')
if sudo_user:
user_entry = pwd.getpwnam(sudo_user)
os.setgid(user_entry.pw_gid)
os.setuid(user_entry.pw_uid)
env_root = os.getenv('WA_USER_DIRECTORY', os.path.join(os.path.expanduser('~'), '.workload_automation'))
if not os.path.isdir(env_root):
os.makedirs(env_root)
wa_packages_file = os.path.join(env_root, 'packages')
if os.path.isfile(wa_packages_file):
with open(wa_packages_file, 'r') as wfh:
package_list = wfh.read().split()
if params['name'] not in package_list:
package_list.append(params['name'])
else: # no existing package file
package_list = [params['name']]
with open(wa_packages_file, 'w') as wfh:
wfh.write('\n'.join(package_list))
class install(orig_install):
def run(self):
orig_install.run(self)
# Must be done in a separate process because will drop privileges if
# sudo, and won't be able to reacquire them.
p = Process(target=update_wa_packages)
p.start()
p.join()
params['cmdclass'] = {'install': install}
setup(**params)

View File

@@ -0,0 +1,35 @@
from wlauto import UiAutomatorWorkload, Parameter
class ${class_name}(UiAutomatorWorkload):
name = '${name}'
description = "This is an placeholder description"
parameters = [
# Workload parameters go here e.g.
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
description='This is an example parameter')
]
def setup(self, context):
super(${class_name}, self).setup(context)
# Perform any necessary setup before starting the UI automation
# e.g. copy files to the device, start apps, reset logs, etc.
def update_result(self, context):
pass
# Process workload execution artifacts to extract metrics
# and add them to the run result using
# context.result.add_metric()
def teardown(self, context):
super(${class_name}, self).teardown(context)
# Preform any necessary cleanup
def validate(self):
pass
# Validate inter-parameter assumptions etc

16
wlauto/common/__init__.py Normal file
View File

@@ -0,0 +1,16 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

Binary file not shown.

View File

@@ -0,0 +1,16 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

View File

@@ -0,0 +1,678 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import os
import sys
import re
import time
import tempfile
import shutil
import threading
from subprocess import CalledProcessError
from wlauto.core.extension import Parameter
from wlauto.common.linux.device import BaseLinuxDevice
from wlauto.exceptions import DeviceError, WorkerThreadError, TimeoutError, DeviceNotRespondingError
from wlauto.utils.misc import convert_new_lines
from wlauto.utils.types import boolean, regex
from wlauto.utils.android import (adb_shell, adb_background_shell, adb_list_devices,
adb_command, AndroidProperties, ANDROID_VERSION_MAP)
SCREEN_STATE_REGEX = re.compile('(?:mPowerState|mScreenOn)=([0-9]+|true|false)', re.I)
class AndroidDevice(BaseLinuxDevice): # pylint: disable=W0223
"""
Device running Android OS.
"""
platform = 'android'
parameters = [
Parameter('adb_name',
description='The unique ID of the device as output by "adb devices".'),
Parameter('android_prompt', kind=regex, default=re.compile('^.*(shell|root)@.*:/ [#$] ', re.MULTILINE),
description='The format of matching the shell prompt in Android.'),
Parameter('working_directory', default='/sdcard/wa-working',
description='Directory that will be used WA on the device for output files etc.'),
Parameter('binaries_directory', default='/system/bin',
description='Location of binaries on the device.'),
Parameter('package_data_directory', default='/data/data',
description='Location of of data for an installed package (APK).'),
Parameter('external_storage_directory', default='/sdcard',
description='Mount point for external storage.'),
Parameter('connection', default='usb', allowed_values=['usb', 'ethernet'],
description='Specified the nature of adb connection.'),
Parameter('logcat_poll_period', kind=int,
description="""
If specified and is not ``0``, logcat will be polled every
``logcat_poll_period`` seconds, and buffered on the host. This
can be used if a lot of output is expected in logcat and the fixed
logcat buffer on the device is not big enough. The trade off is that
this introduces some minor runtime overhead. Not set by default.
"""),
Parameter('enable_screen_check', kind=boolean, default=False,
description="""
Specified whether the device should make sure that the screen is on
during initialization.
"""),
]
default_timeout = 30
delay = 2
long_delay = 3 * delay
ready_timeout = 60
# Overwritten from Device. For documentation, see corresponding method in
# Device.
@property
def is_rooted(self):
if self._is_rooted is None:
try:
result = adb_shell(self.adb_name, 'su', timeout=1)
if 'not found' in result:
self._is_rooted = False
else:
self._is_rooted = True
except TimeoutError:
self._is_rooted = True
except DeviceError:
self._is_rooted = False
return self._is_rooted
@property
def abi(self):
return self.getprop()['ro.product.cpu.abi'].split('-')[0]
@property
def supported_eabi(self):
props = self.getprop()
result = [props['ro.product.cpu.abi']]
if 'ro.product.cpu.abi2' in props:
result.append(props['ro.product.cpu.abi2'])
if 'ro.product.cpu.abilist' in props:
for eabi in props['ro.product.cpu.abilist'].split(','):
if eabi not in result:
result.append(eabi)
return result
def __init__(self, **kwargs):
super(AndroidDevice, self).__init__(**kwargs)
self._logcat_poller = None
def reset(self):
self._is_ready = False
self._just_rebooted = True
adb_command(self.adb_name, 'reboot', timeout=self.default_timeout)
def hard_reset(self):
super(AndroidDevice, self).hard_reset()
self._is_ready = False
self._just_rebooted = True
def boot(self, **kwargs):
self.reset()
def connect(self): # NOQA pylint: disable=R0912
iteration_number = 0
max_iterations = self.ready_timeout / self.delay
available = False
self.logger.debug('Polling for device {}...'.format(self.adb_name))
while iteration_number < max_iterations:
devices = adb_list_devices()
if self.adb_name:
for device in devices:
if device.name == self.adb_name and device.status != 'offline':
available = True
else: # adb_name not set
if len(devices) == 1:
available = True
elif len(devices) > 1:
raise DeviceError('More than one device is connected and adb_name is not set.')
if available:
break
else:
time.sleep(self.delay)
iteration_number += 1
else:
raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name))
while iteration_number < max_iterations:
available = (1 == int('0' + adb_shell(self.adb_name, 'getprop sys.boot_completed', timeout=self.default_timeout)))
if available:
break
else:
time.sleep(self.delay)
iteration_number += 1
else:
raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name))
if self._just_rebooted:
self.logger.debug('Waiting for boot to complete...')
# On some devices, adb connection gets reset some time after booting.
# This causes errors during execution. To prevent this, open a shell
# session and wait for it to be killed. Once its killed, give adb
# enough time to restart, and then the device should be ready.
# TODO: This is more of a work-around rather than an actual solution.
# Need to figure out what is going on the "proper" way of handling it.
try:
adb_shell(self.adb_name, '', timeout=20)
time.sleep(5) # give adb time to re-initialize
except TimeoutError:
pass # timed out waiting for the session to be killed -- assume not going to be.
self.logger.debug('Boot completed.')
self._just_rebooted = False
self._is_ready = True
def initialize(self, context, *args, **kwargs):
self.execute('mkdir -p {}'.format(self.working_directory))
if self.is_rooted:
if not self.executable_is_installed('busybox'):
self.busybox = self.deploy_busybox(context)
else:
self.busybox = 'busybox'
self.disable_screen_lock()
self.disable_selinux()
if self.enable_screen_check:
self.ensure_screen_is_on()
self.init(context, *args, **kwargs)
def disconnect(self):
if self._logcat_poller:
self._logcat_poller.close()
def ping(self):
try:
# May be triggered inside initialize()
adb_shell(self.adb_name, 'ls /', timeout=10)
except (TimeoutError, CalledProcessError):
raise DeviceNotRespondingError(self.adb_name or self.name)
def start(self):
if self.logcat_poll_period:
if self._logcat_poller:
self._logcat_poller.close()
self._logcat_poller = _LogcatPoller(self, self.logcat_poll_period, timeout=self.default_timeout)
self._logcat_poller.start()
def stop(self):
if self._logcat_poller:
self._logcat_poller.stop()
def get_android_version(self):
return ANDROID_VERSION_MAP.get(self.get_sdk_version(), None)
def get_android_id(self):
"""
Get the device's ANDROID_ID. Which is
"A 64-bit number (as a hex string) that is randomly generated when the user
first sets up the device and should remain constant for the lifetime of the
user's device."
.. note:: This will get reset on userdata erasure.
"""
return self.execute('settings get secure android_id').strip()
def get_sdk_version(self):
try:
return int(self.getprop('ro.build.version.sdk'))
except (ValueError, TypeError):
return None
def get_installed_package_version(self, package):
"""
Returns the version (versionName) of the specified package if it is installed
on the device, or ``None`` otherwise.
Added in version 2.1.4
"""
output = self.execute('dumpsys package {}'.format(package))
for line in convert_new_lines(output).split('\n'):
if 'versionName' in line:
return line.split('=', 1)[1]
return None
def list_packages(self):
"""
List packages installed on the device.
Added in version 2.1.4
"""
output = self.execute('pm list packages')
output = output.replace('package:', '')
return output.split()
def package_is_installed(self, package_name):
"""
Returns ``True`` the if a package with the specified name is installed on
the device, and ``False`` otherwise.
Added in version 2.1.4
"""
return package_name in self.list_packages()
def executable_is_installed(self, executable_name):
return executable_name in self.listdir(self.binaries_directory)
def is_installed(self, name):
return self.executable_is_installed(name) or self.package_is_installed(name)
def listdir(self, path, as_root=False, **kwargs):
contents = self.execute('ls {}'.format(path), as_root=as_root)
return [x.strip() for x in contents.split()]
def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
"""
Modified in version 2.1.4: added ``as_root`` parameter.
"""
self._check_ready()
if not as_root:
adb_command(self.adb_name, "push '{}' '{}'".format(source, dest), timeout=timeout)
else:
device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep))
self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile)))
adb_command(self.adb_name, "push '{}' '{}'".format(source, device_tempfile), timeout=timeout)
self.execute('cp {} {}'.format(device_tempfile, dest), as_root=True)
def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
"""
Modified in version 2.1.4: added ``as_root`` parameter.
"""
self._check_ready()
if not as_root:
adb_command(self.adb_name, "pull '{}' '{}'".format(source, dest), timeout=timeout)
else:
device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep))
self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile)))
self.execute('cp {} {}'.format(source, device_tempfile), as_root=True)
adb_command(self.adb_name, "pull '{}' '{}'".format(device_tempfile, dest), timeout=timeout)
def delete_file(self, filepath, as_root=False): # pylint: disable=W0221
self._check_ready()
adb_shell(self.adb_name, "rm '{}'".format(filepath), as_root=as_root, timeout=self.default_timeout)
def file_exists(self, filepath):
self._check_ready()
output = adb_shell(self.adb_name, 'if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath),
timeout=self.default_timeout)
if int(output):
return True
else:
return False
def install(self, filepath, timeout=default_timeout, with_name=None): # pylint: disable=W0221
ext = os.path.splitext(filepath)[1].lower()
if ext == '.apk':
return self.install_apk(filepath, timeout)
else:
return self.install_executable(filepath, with_name)
def install_apk(self, filepath, timeout=default_timeout): # pylint: disable=W0221
self._check_ready()
ext = os.path.splitext(filepath)[1].lower()
if ext == '.apk':
return adb_command(self.adb_name, "install {}".format(filepath), timeout=timeout)
else:
raise DeviceError('Can\'t install {}: unsupported format.'.format(filepath))
def install_executable(self, filepath, with_name=None):
"""
Installs a binary executable on device. Requires root access. Returns
the path to the installed binary, or ``None`` if the installation has failed.
Optionally, ``with_name`` parameter may be used to specify a different name under
which the executable will be installed.
Added in version 2.1.3.
Updated in version 2.1.5 with ``with_name`` parameter.
"""
executable_name = with_name or os.path.basename(filepath)
on_device_file = self.path.join(self.working_directory, executable_name)
on_device_executable = self.path.join(self.binaries_directory, executable_name)
self.push_file(filepath, on_device_file)
matched = []
for entry in self.list_file_systems():
if self.binaries_directory.rstrip('/').startswith(entry.mount_point):
matched.append(entry)
if matched:
entry = sorted(matched, key=lambda x: len(x.mount_point))[-1]
if 'rw' not in entry.options:
self.execute('mount -o rw,remount {} {}'.format(entry.device, entry.mount_point), as_root=True)
self.execute('cp {} {}'.format(on_device_file, on_device_executable), as_root=True)
self.execute('chmod 0777 {}'.format(on_device_executable), as_root=True)
return on_device_executable
else:
raise DeviceError('Could not find mount point for binaries directory {}'.format(self.binaries_directory))
def uninstall(self, package):
self._check_ready()
adb_command(self.adb_name, "uninstall {}".format(package), timeout=self.default_timeout)
def uninstall_executable(self, executable_name):
"""
Requires root access.
Added in version 2.1.3.
"""
on_device_executable = self.path.join(self.binaries_directory, executable_name)
for entry in self.list_file_systems():
if entry.mount_point == '/system':
if 'rw' not in entry.options:
self.execute('mount -o rw,remount {} /system'.format(entry.device), as_root=True)
self.delete_file(on_device_executable)
def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False,
as_root=False, busybox=False, **kwargs):
"""
Execute the specified command on the device using adb.
Parameters:
:param command: The command to be executed. It should appear exactly
as if you were typing it into a shell.
:param timeout: Time, in seconds, to wait for adb to return before aborting
and raising an error. Defaults to ``AndroidDevice.default_timeout``.
:param check_exit_code: If ``True``, the return code of the command on the Device will
be check and exception will be raised if it is not 0.
Defaults to ``True``.
:param background: If ``True``, will execute adb in a subprocess, and will return
immediately, not waiting for adb to return. Defaults to ``False``
:param busybox: If ``True``, will use busybox to execute the command. Defaults to ``False``.
Added in version 2.1.3
.. note:: The device must be rooted to be able to use busybox.
:param as_root: If ``True``, will attempt to execute command in privileged mode. The device
must be rooted, otherwise an error will be raised. Defaults to ``False``.
Added in version 2.1.3
:returns: If ``background`` parameter is set to ``True``, the subprocess object will
be returned; otherwise, the contents of STDOUT from the device will be returned.
:raises: DeviceError if adb timed out or if the command returned non-zero exit
code on the device, or if attempting to execute a command in privileged mode on an
unrooted device.
"""
self._check_ready()
if as_root and not self.is_rooted:
raise DeviceError('Attempting to execute "{}" as root on unrooted device.'.format(command))
if busybox:
if not self.is_rooted:
DeviceError('Attempting to execute "{}" with busybox. '.format(command) +
'Busybox can only be deployed to rooted devices.')
command = ' '.join([self.busybox, command])
if background:
return adb_background_shell(self.adb_name, command, as_root=as_root)
else:
return adb_shell(self.adb_name, command, timeout, check_exit_code, as_root)
def kick_off(self, command):
"""
Like execute but closes adb session and returns immediately, leaving the command running on the
device (this is different from execute(background=True) which keeps adb connection open and returns
a subprocess object).
.. note:: This relies on busybox's nohup applet and so won't work on unrooted devices.
Added in version 2.1.4
"""
if not self.is_rooted:
raise DeviceError('kick_off uses busybox\'s nohup applet and so can only be run a rooted device.')
try:
command = 'cd {} && busybox nohup {}'.format(self.working_directory, command)
output = self.execute(command, timeout=1, as_root=True)
except TimeoutError:
pass
else:
raise ValueError('Background command exited before timeout; got "{}"'.format(output))
def get_properties(self, context):
"""Captures and saves the information from /system/build.prop and /proc/version"""
props = {}
props['android_id'] = self.get_android_id()
buildprop_file = os.path.join(context.host_working_directory, 'build.prop')
if not os.path.isfile(buildprop_file):
self.pull_file('/system/build.prop', context.host_working_directory)
self._update_build_properties(buildprop_file, props)
context.add_run_artifact('build_properties', buildprop_file, 'export')
version_file = os.path.join(context.host_working_directory, 'version')
if not os.path.isfile(version_file):
self.pull_file('/proc/version', context.host_working_directory)
self._update_versions(version_file, props)
context.add_run_artifact('device_version', version_file, 'export')
return props
def getprop(self, prop=None):
"""Returns parsed output of Android getprop command. If a property is
specified, only the value for that property will be returned (with
``None`` returned if the property doesn't exist. Otherwise,
``wlauto.utils.android.AndroidProperties`` will be returned, which is
a dict-like object."""
props = AndroidProperties(self.execute('getprop'))
if prop:
return props[prop]
return props
# Android-specific methods. These either rely on specifics of adb or other
# Android-only concepts in their interface and/or implementation.
def forward_port(self, from_port, to_port):
"""
Forward a port on the device to a port on localhost.
:param from_port: Port on the device which to forward.
:param to_port: Port on the localhost to which the device port will be forwarded.
Ports should be specified using adb spec. See the "adb forward" section in "adb help".
"""
adb_command(self.adb_name, 'forward {} {}'.format(from_port, to_port), timeout=self.default_timeout)
def dump_logcat(self, outfile, filter_spec=None):
"""
Dump the contents of logcat, for the specified filter spec to the
specified output file.
See http://developer.android.com/tools/help/logcat.html
:param outfile: Output file on the host into which the contents of the
log will be written.
:param filter_spec: Logcat filter specification.
see http://developer.android.com/tools/debugging/debugging-log.html#filteringOutput
"""
if self._logcat_poller:
return self._logcat_poller.write_log(outfile)
else:
if filter_spec:
command = 'logcat -d -s {} > {}'.format(filter_spec, outfile)
else:
command = 'logcat -d > {}'.format(outfile)
return adb_command(self.adb_name, command, timeout=self.default_timeout)
def clear_logcat(self):
"""Clear (flush) logcat log."""
if self._logcat_poller:
return self._logcat_poller.clear_buffer()
else:
return adb_shell(self.adb_name, 'logcat -c', timeout=self.default_timeout)
def capture_screen(self, filepath):
"""Caputers the current device screen into the specified file in a PNG format."""
on_device_file = self.path.join(self.working_directory, 'screen_capture.png')
self.execute('screencap -p {}'.format(on_device_file))
self.pull_file(on_device_file, filepath)
self.delete_file(on_device_file)
def is_screen_on(self):
"""Returns ``True`` if the device screen is currently on, ``False`` otherwise."""
output = self.execute('dumpsys power')
match = SCREEN_STATE_REGEX.search(output)
if match:
return boolean(match.group(1))
else:
raise DeviceError('Could not establish screen state.')
def ensure_screen_is_on(self):
if not self.is_screen_on():
self.execute('input keyevent 26')
def disable_screen_lock(self):
"""
Attempts to disable he screen lock on the device.
.. note:: This does not always work...
Added inversion 2.1.4
"""
lockdb = '/data/system/locksettings.db'
sqlcommand = "update locksettings set value=\\'0\\' where name=\\'screenlock.disabled\\';"
self.execute('sqlite3 {} "{}"'.format(lockdb, sqlcommand), as_root=True)
def disable_selinux(self):
# This may be invoked from intialize() so we can't use execute() or the
# standard API for doing this.
api_level = int(adb_shell(self.adb_name, 'getprop ro.build.version.sdk',
timeout=self.default_timeout).strip())
# SELinux was added in Android 4.3 (API level 18). Trying to
# 'getenforce' in earlier versions will produce an error.
if api_level >= 18:
se_status = self.execute('getenforce', as_root=True).strip()
if se_status == 'Enforcing':
self.execute('setenforce 0', as_root=True)
# Internal methods: do not use outside of the class.
def _update_build_properties(self, filepath, props):
try:
with open(filepath) as fh:
for line in fh:
line = re.sub(r'#.*', '', line).strip()
if not line:
continue
key, value = line.split('=', 1)
props[key] = value
except ValueError:
self.logger.warning('Could not parse build.prop.')
def _update_versions(self, filepath, props):
with open(filepath) as fh:
text = fh.read()
props['version'] = text
text = re.sub(r'#.*', '', text).strip()
match = re.search(r'^(Linux version .*?)\s*\((gcc version .*)\)$', text)
if match:
props['linux_version'] = match.group(1).strip()
props['gcc_version'] = match.group(2).strip()
else:
self.logger.warning('Could not parse version string.')
class _LogcatPoller(threading.Thread):
join_timeout = 5
def __init__(self, device, period, timeout=None):
super(_LogcatPoller, self).__init__()
self.adb_device = device.adb_name
self.logger = device.logger
self.period = period
self.timeout = timeout
self.stop_signal = threading.Event()
self.lock = threading.RLock()
self.buffer_file = tempfile.mktemp()
self.last_poll = 0
self.daemon = True
self.exc = None
def run(self):
self.logger.debug('Starting logcat polling.')
try:
while True:
if self.stop_signal.is_set():
break
with self.lock:
current_time = time.time()
if (current_time - self.last_poll) >= self.period:
self._poll()
time.sleep(0.5)
except Exception: # pylint: disable=W0703
self.exc = WorkerThreadError(self.name, sys.exc_info())
self.logger.debug('Logcat polling stopped.')
def stop(self):
self.logger.debug('Stopping logcat polling.')
self.stop_signal.set()
self.join(self.join_timeout)
if self.is_alive():
self.logger.error('Could not join logcat poller thread.')
if self.exc:
raise self.exc # pylint: disable=E0702
def clear_buffer(self):
self.logger.debug('Clearing logcat buffer.')
with self.lock:
adb_shell(self.adb_device, 'logcat -c', timeout=self.timeout)
with open(self.buffer_file, 'w') as _: # NOQA
pass
def write_log(self, outfile):
self.logger.debug('Writing logbuffer to {}.'.format(outfile))
with self.lock:
self._poll()
if os.path.isfile(self.buffer_file):
shutil.copy(self.buffer_file, outfile)
else: # there was no logcat trace at this time
with open(outfile, 'w') as _: # NOQA
pass
def close(self):
self.logger.debug('Closing logcat poller.')
if os.path.isfile(self.buffer_file):
os.remove(self.buffer_file)
def _poll(self):
with self.lock:
self.last_poll = time.time()
adb_command(self.adb_device, 'logcat -d >> {}'.format(self.buffer_file), timeout=self.timeout)
adb_command(self.adb_device, 'logcat -c', timeout=self.timeout)
class BigLittleDevice(AndroidDevice): # pylint: disable=W0223
parameters = [
Parameter('scheduler', default='hmp', override=True),
]

View File

@@ -0,0 +1,36 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.common.resources import FileResource
class ReventFile(FileResource):
name = 'revent'
def __init__(self, owner, stage):
super(ReventFile, self).__init__(owner)
self.stage = stage
class JarFile(FileResource):
name = 'jar'
class ApkFile(FileResource):
name = 'apk'

View File

@@ -0,0 +1,425 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
from wlauto.core.extension import Parameter
from wlauto.core.workload import Workload
from wlauto.core.resource import NO_ONE
from wlauto.common.resources import ExtensionAsset, Executable
from wlauto.exceptions import WorkloadError, ResourceError
from wlauto.utils.android import ApkInfo
from wlauto.utils.types import boolean
import wlauto.common.android.resources
DELAY = 5
class UiAutomatorWorkload(Workload):
"""
Base class for all workloads that rely on a UI Automator JAR file.
This class should be subclassed by workloads that rely on android UiAutomator
to work. This class handles transferring the UI Automator JAR file to the device
and invoking it to run the workload. By default, it will look for the JAR file in
the same directory as the .py file for the workload (this can be changed by overriding
the ``uiauto_file`` property in the subclassing workload).
To inintiate UI Automation, the fully-qualified name of the Java class and the
corresponding method name are needed. By default, the package part of the class name
is derived from the class file, and class and method names are ``UiAutomation``
and ``runUiAutomaton`` respectively. If you have generated the boilder plate for the
UiAutomatior code using ``create_workloads`` utility, then everything should be named
correctly. If you're creating the Java project manually, you need to make sure the names
match what is expected, or you could override ``uiauto_package``, ``uiauto_class`` and
``uiauto_method`` class attributes with the value that match your Java code.
You can also pass parameters to the JAR file. To do this add the parameters to
``self.uiauto_params`` dict inside your class's ``__init__`` or ``setup`` methods.
"""
supported_platforms = ['android']
uiauto_package = ''
uiauto_class = 'UiAutomation'
uiauto_method = 'runUiAutomation'
# Can be overidden by subclasses to adjust to run time of specific
# benchmarks.
run_timeout = 4 * 60 # seconds
def __init__(self, device, _call_super=True, **kwargs): # pylint: disable=W0613
if _call_super:
super(UiAutomatorWorkload, self).__init__(device, **kwargs)
self.uiauto_file = None
self.device_uiauto_file = None
self.command = None
self.uiauto_params = {}
def init_resources(self, context):
self.uiauto_file = context.resolver.get(wlauto.common.android.resources.JarFile(self))
if not self.uiauto_file:
raise ResourceError('No UI automation JAR file found for workload {}.'.format(self.name))
self.device_uiauto_file = self.device.path.join(self.device.working_directory,
os.path.basename(self.uiauto_file))
if not self.uiauto_package:
self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0]
def setup(self, context):
method_string = '{}.{}#{}'.format(self.uiauto_package, self.uiauto_class, self.uiauto_method)
params_dict = self.uiauto_params
params_dict['workdir'] = self.device.working_directory
params = ''
for k, v in self.uiauto_params.iteritems():
params += ' -e {} {}'.format(k, v)
self.command = 'uiautomator runtest {}{} -c {}'.format(self.device_uiauto_file, params, method_string)
self.device.push_file(self.uiauto_file, self.device_uiauto_file)
self.device.killall('uiautomator')
def run(self, context):
result = self.device.execute(self.command, self.run_timeout)
if 'FAILURE' in result:
raise WorkloadError(result)
else:
self.logger.debug(result)
time.sleep(DELAY)
def update_result(self, context):
pass
def teardown(self, context):
self.device.delete_file(self.device_uiauto_file)
def validate(self):
if not self.uiauto_file:
raise WorkloadError('No UI automation JAR file found for workload {}.'.format(self.name))
if not self.uiauto_package:
raise WorkloadError('No UI automation package specified for workload {}.'.format(self.name))
class ApkWorkload(Workload):
"""
A workload based on an APK file.
Defines the following attributes:
:package: The package name of the app. This is usually a Java-style name of the form
``com.companyname.appname``.
:activity: This is the initial activity of the app. This will be used to launch the
app during the setup.
:view: The class of the main view pane of the app. This needs to be defined in order
to collect SurfaceFlinger-derived statistics (such as FPS) for the app, but
may otherwise be left as ``None``.
:install_timeout: Timeout for the installation of the APK. This may vary wildly based on
the size and nature of a specific APK, and so should be defined on
per-workload basis.
.. note:: To a lesser extent, this will also vary based on the the
device and the nature of adb connection (USB vs Ethernet),
so, as with all timeouts, so leeway must be included in
the specified value.
.. note:: Both package and activity for a workload may be obtained from the APK using
the ``aapt`` tool that comes with the ADT (Android Developemnt Tools) bundle.
"""
package = None
activity = None
view = None
install_timeout = None
default_install_timeout = 300
parameters = [
Parameter('uninstall_apk', kind=boolean, default=False,
description="If ``True``, will uninstall workload's APK as part of teardown."),
]
def __init__(self, device, _call_super=True, **kwargs):
if _call_super:
super(ApkWorkload, self).__init__(device, **kwargs)
self.apk_file = None
self.apk_version = None
self.logcat_log = None
self.force_reinstall = kwargs.get('force_reinstall', False)
if not self.install_timeout:
self.install_timeout = self.default_install_timeout
def init_resources(self, context):
self.apk_file = context.resolver.get(wlauto.common.android.resources.ApkFile(self), version=getattr(self, 'version', None))
def setup(self, context):
self.initialize_package(context)
self.start_activity()
self.device.execute('am kill-all') # kill all *background* activities
self.device.clear_logcat()
def initialize_package(self, context):
installed_version = self.device.get_installed_package_version(self.package)
host_version = ApkInfo(self.apk_file).version_name
if installed_version != host_version:
if installed_version:
message = '{} host version: {}, device version: {}; re-installing...'
self.logger.debug(message.format(os.path.basename(self.apk_file), host_version, installed_version))
else:
message = '{} host version: {}, not found on device; installing...'
self.logger.debug(message.format(os.path.basename(self.apk_file), host_version))
self.force_reinstall = True
else:
message = '{} version {} found on both device and host.'
self.logger.debug(message.format(os.path.basename(self.apk_file), host_version))
if self.force_reinstall:
if installed_version:
self.device.uninstall(self.package)
self.install_apk(context)
else:
self.reset(context)
self.apk_version = host_version
def start_activity(self):
output = self.device.execute('am start -W -n {}/{}'.format(self.package, self.activity))
if 'Error:' in output:
self.device.execute('am force-stop {}'.format(self.package)) # this will dismiss any erro dialogs
raise WorkloadError(output)
self.logger.debug(output)
def reset(self, context): # pylint: disable=W0613
self.device.execute('am force-stop {}'.format(self.package))
self.device.execute('pm clear {}'.format(self.package))
def install_apk(self, context):
output = self.device.install(self.apk_file, self.install_timeout)
if 'Failure' in output:
if 'ALREADY_EXISTS' in output:
self.logger.warn('Using already installed APK (did not unistall properly?)')
else:
raise WorkloadError(output)
else:
self.logger.debug(output)
self.do_post_install(context)
def do_post_install(self, context):
""" May be overwritten by dervied classes."""
pass
def run(self, context):
pass
def update_result(self, context):
self.logcat_log = os.path.join(context.output_directory, 'logcat.log')
self.device.dump_logcat(self.logcat_log)
context.add_iteration_artifact(name='logcat',
path='logcat.log',
kind='log',
description='Logact dump for the run.')
def teardown(self, context):
self.device.execute('am force-stop {}'.format(self.package))
if self.uninstall_apk:
self.device.uninstall(self.package)
def validate(self):
if not self.apk_file:
raise WorkloadError('No APK file found for workload {}.'.format(self.name))
AndroidBenchmark = ApkWorkload # backward compatibility
class ReventWorkload(Workload):
default_setup_timeout = 5 * 60 # in seconds
default_run_timeout = 10 * 60 # in seconds
def __init__(self, device, _call_super=True, **kwargs):
if _call_super:
super(ReventWorkload, self).__init__(device, **kwargs)
devpath = self.device.path
self.on_device_revent_binary = devpath.join(self.device.working_directory, 'revent')
self.on_device_setup_revent = devpath.join(self.device.working_directory, '{}.setup.revent'.format(self.device.name))
self.on_device_run_revent = devpath.join(self.device.working_directory, '{}.run.revent'.format(self.device.name))
self.setup_timeout = kwargs.get('setup_timeout', self.default_setup_timeout)
self.run_timeout = kwargs.get('run_timeout', self.default_run_timeout)
self.revent_setup_file = None
self.revent_run_file = None
def init_resources(self, context):
self.revent_setup_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'setup'))
self.revent_run_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'run'))
def setup(self, context):
self._check_revent_files(context)
self.device.killall('revent')
command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_setup_revent)
self.device.execute(command, timeout=self.setup_timeout)
def run(self, context):
command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_run_revent)
self.logger.debug('Replaying {}'.format(os.path.basename(self.on_device_run_revent)))
self.device.execute(command, timeout=self.run_timeout)
self.logger.debug('Replay completed.')
def update_result(self, context):
pass
def teardown(self, context):
self.device.delete_file(self.on_device_setup_revent)
self.device.delete_file(self.on_device_run_revent)
def _check_revent_files(self, context):
# check the revent binary
revent_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent'))
if not os.path.isfile(revent_binary):
message = '{} does not exist. '.format(revent_binary)
message += 'Please build revent for your system and place it in that location'
raise WorkloadError(message)
if not self.revent_setup_file:
# pylint: disable=too-few-format-args
message = '{0}.setup.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name)
raise WorkloadError(message)
if not self.revent_run_file:
# pylint: disable=too-few-format-args
message = '{0}.run.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name)
raise WorkloadError(message)
self.on_device_revent_binary = self.device.install_executable(revent_binary)
self.device.push_file(self.revent_run_file, self.on_device_run_revent)
self.device.push_file(self.revent_setup_file, self.on_device_setup_revent)
class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark):
def __init__(self, device, **kwargs):
UiAutomatorWorkload.__init__(self, device, **kwargs)
AndroidBenchmark.__init__(self, device, _call_super=False, **kwargs)
def init_resources(self, context):
UiAutomatorWorkload.init_resources(self, context)
AndroidBenchmark.init_resources(self, context)
def setup(self, context):
UiAutomatorWorkload.setup(self, context)
AndroidBenchmark.setup(self, context)
def update_result(self, context):
UiAutomatorWorkload.update_result(self, context)
AndroidBenchmark.update_result(self, context)
def teardown(self, context):
UiAutomatorWorkload.teardown(self, context)
AndroidBenchmark.teardown(self, context)
class GameWorkload(ApkWorkload, ReventWorkload):
"""
GameWorkload is the base class for all the workload that use revent files to
run.
For more in depth details on how to record revent files, please see
:ref:`revent_files_creation`. To subclass this class, please refer to
:ref:`GameWorkload`.
Additionally, this class defines the following attributes:
:asset_file: A tarball containing additional assets for the workload. These are the assets
that are not part of the APK but would need to be downloaded by the workload
(usually, on first run of the app). Since the presence of a network connection
cannot be assumed on some devices, this provides an alternative means of obtaining
the assets.
:saved_state_file: A tarball containing the saved state for a workload. This tarball gets
deployed in the same way as the asset file. The only difference being that
it is usually much slower and re-deploying the tarball should alone be
enough to reset the workload to a known state (without having to reinstall
the app or re-deploy the other assets).
:loading_time: Time it takes for the workload to load after the initial activity has been
started.
"""
# May be optionally overwritten by subclasses
asset_file = None
saved_state_file = None
view = 'SurfaceView'
install_timeout = 500
loading_time = 10
def __init__(self, device, **kwargs): # pylint: disable=W0613
ApkWorkload.__init__(self, device, **kwargs)
ReventWorkload.__init__(self, device, _call_super=False, **kwargs)
self.logcat_process = None
self.module_dir = os.path.dirname(sys.modules[self.__module__].__file__)
self.revent_dir = os.path.join(self.module_dir, 'revent_files')
def init_resources(self, context):
ApkWorkload.init_resources(self, context)
ReventWorkload.init_resources(self, context)
def setup(self, context):
ApkWorkload.setup(self, context)
self.logger.debug('Waiting for the game to load...')
time.sleep(self.loading_time)
ReventWorkload.setup(self, context)
def do_post_install(self, context):
ApkWorkload.do_post_install(self, context)
self._deploy_assets(context)
def reset(self, context):
# If saved state exists, restore it; if not, do full
# uninstall/install cycle.
if self.saved_state_file:
self._deploy_resource_tarball(context, self.saved_state_file)
else:
ApkWorkload.reset(self, context)
self._deploy_assets(context)
def run(self, context):
ReventWorkload.run(self, context)
def teardown(self, context):
if not self.saved_state_file:
ApkWorkload.teardown(self, context)
else:
self.device.execute('am force-stop {}'.format(self.package))
ReventWorkload.teardown(self, context)
def _deploy_assets(self, context, timeout=300):
if self.asset_file:
self._deploy_resource_tarball(context, self.asset_file, timeout)
if self.saved_state_file: # must be deployed *after* asset tarball!
self._deploy_resource_tarball(context, self.saved_state_file, timeout)
def _deploy_resource_tarball(self, context, resource_file, timeout=300):
kind = 'data'
if ':' in resource_file:
kind, resource_file = resource_file.split(':', 1)
ondevice_cache = self.device.path.join(self.device.resource_cache, self.name, resource_file)
if not self.device.file_exists(ondevice_cache):
asset_tarball = context.resolver.get(ExtensionAsset(self, resource_file))
if not asset_tarball:
message = 'Could not find resource {} for workload {}.'
raise WorkloadError(message.format(resource_file, self.name))
# adb push will create intermediate directories if they don't
# exist.
self.device.push_file(asset_tarball, ondevice_cache)
device_asset_directory = self.device.path.join(self.device.external_storage_directory, 'Android', kind)
deploy_command = 'cd {} && {} tar -xzf {}'.format(device_asset_directory,
self.device.busybox,
ondevice_cache)
self.device.execute(deploy_command, timeout=timeout, as_root=True)

BIN
wlauto/common/bin/arm64/busybox Executable file

Binary file not shown.

BIN
wlauto/common/bin/arm64/revent Executable file

Binary file not shown.

BIN
wlauto/common/bin/armeabi/busybox Executable file

Binary file not shown.

BIN
wlauto/common/bin/armeabi/revent Executable file

Binary file not shown.

View File

@@ -0,0 +1,16 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

View File

@@ -0,0 +1,966 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import os
import re
from collections import namedtuple
from subprocess import CalledProcessError
from wlauto.core.extension import Parameter
from wlauto.core.device import Device, RuntimeParameter, CoreParameter
from wlauto.core.resource import NO_ONE
from wlauto.exceptions import ConfigError, DeviceError, TimeoutError, DeviceNotRespondingError
from wlauto.common.resources import Executable
from wlauto.utils.cpuinfo import Cpuinfo
from wlauto.utils.misc import convert_new_lines, escape_double_quotes
from wlauto.utils.ssh import SshShell
from wlauto.utils.types import boolean, list_of_strings
# a dict of governor name and a list of it tunables that can't be read
WRITE_ONLY_TUNABLES = {
'interactive': ['boostpulse']
}
FstabEntry = namedtuple('FstabEntry', ['device', 'mount_point', 'fs_type', 'options', 'dump_freq', 'pass_num'])
PsEntry = namedtuple('PsEntry', 'user pid ppid vsize rss wchan pc state name')
class BaseLinuxDevice(Device): # pylint: disable=abstract-method
path_module = 'posixpath'
has_gpu = True
parameters = [
Parameter('scheduler', kind=str, default='unknown',
allowed_values=['unknown', 'smp', 'hmp', 'iks', 'ea', 'other'],
description="""
Specifies the type of multi-core scheduling model utilized in the device. The value
must be one of the following:
:unknown: A generic Device interface is used to interact with the underlying device
and the underlying scheduling model is unkown.
:smp: A standard single-core or Symmetric Multi-Processing system.
:hmp: ARM Heterogeneous Multi-Processing system.
:iks: Linaro In-Kernel Switcher.
:ea: ARM Energy-Aware scheduler.
:other: Any other system not covered by the above.
.. note:: most currently-available systems would fall under ``smp`` rather than
this value. ``other`` is there to future-proof against new schemes
not yet covered by WA.
"""),
Parameter('iks_switch_frequency', kind=int, default=None,
description="""
This is the switching frequency, in kilohertz, of IKS devices. This parameter *MUST NOT*
be set for non-IKS device (i.e. ``scheduler != 'iks'``). If left unset for IKS devices,
it will default to ``800000``, i.e. 800MHz.
"""),
]
runtime_parameters = [
RuntimeParameter('sysfile_values', 'get_sysfile_values', 'set_sysfile_values', value_name='params'),
CoreParameter('${core}_cores', 'get_number_of_active_cores', 'set_number_of_active_cores',
value_name='number'),
CoreParameter('${core}_min_frequency', 'get_core_min_frequency', 'set_core_min_frequency',
value_name='freq'),
CoreParameter('${core}_max_frequency', 'get_core_max_frequency', 'set_core_max_frequency',
value_name='freq'),
CoreParameter('${core}_governor', 'get_core_governor', 'set_core_governor',
value_name='governor'),
CoreParameter('${core}_governor_tunables', 'get_core_governor_tunables', 'set_core_governor_tunables',
value_name='tunables'),
]
@property
def active_cpus(self):
val = self.get_sysfile_value('/sys/devices/system/cpu/online')
cpus = re.findall(r"([\d]\-[\d]|[\d])", val)
active_cpus = []
for cpu in cpus:
if '-' in cpu:
lo, hi = cpu.split('-')
active_cpus.extend(range(int(lo), int(hi) + 1))
else:
active_cpus.append(int(cpu))
return active_cpus
@property
def number_of_cores(self):
"""
Added in version 2.1.4.
"""
if self._number_of_cores is None:
corere = re.compile('^\s*cpu\d+\s*$')
output = self.execute('ls /sys/devices/system/cpu')
self._number_of_cores = 0
for entry in output.split():
if corere.match(entry):
self._number_of_cores += 1
return self._number_of_cores
@property
def resource_cache(self):
return self.path.join(self.working_directory, '.cache')
@property
def file_transfer_cache(self):
return self.path.join(self.working_directory, '.transfer')
@property
def cpuinfo(self):
if not self._cpuinfo:
self._cpuinfo = Cpuinfo(self.execute('cat /proc/cpuinfo'))
return self._cpuinfo
def __init__(self, **kwargs):
super(BaseLinuxDevice, self).__init__(**kwargs)
self.busybox = None
self._is_initialized = False
self._is_ready = False
self._just_rebooted = False
self._is_rooted = None
self._available_frequencies = {}
self._available_governors = {}
self._available_governor_tunables = {}
self._number_of_cores = None
self._written_sysfiles = []
self._cpuinfo = None
def validate(self):
if len(self.core_names) != len(self.core_clusters):
raise ConfigError('core_names and core_clusters are of different lengths.')
if self.iks_switch_frequency is not None and self.scheduler != 'iks': # pylint: disable=E0203
raise ConfigError('iks_switch_frequency must NOT be set for non-IKS devices.')
if self.iks_switch_frequency is None and self.scheduler == 'iks': # pylint: disable=E0203
self.iks_switch_frequency = 800000 # pylint: disable=W0201
def initialize(self, context, *args, **kwargs):
self.execute('mkdir -p {}'.format(self.working_directory))
if self.is_rooted:
if not self.is_installed('busybox'):
self.busybox = self.deploy_busybox(context)
else:
self.busybox = 'busybox'
self.init(context, *args, **kwargs)
def get_sysfile_value(self, sysfile, kind=None):
"""
Get the contents of the specified sysfile.
:param sysfile: The file who's contents will be returned.
:param kind: The type of value to be expected in the sysfile. This can
be any Python callable that takes a single str argument.
If not specified or is None, the contents will be returned
as a string.
"""
output = self.execute('cat \'{}\''.format(sysfile), as_root=True).strip() # pylint: disable=E1103
if kind:
return kind(output)
else:
return output
def set_sysfile_value(self, sysfile, value, verify=True):
"""
Set the value of the specified sysfile. By default, the value will be checked afterwards.
Can be overridden by setting ``verify`` parameter to ``False``.
"""
value = str(value)
self.execute('echo {} > \'{}\''.format(value, sysfile), check_exit_code=False, as_root=True)
if verify:
output = self.get_sysfile_value(sysfile)
if not output.strip() == value: # pylint: disable=E1103
message = 'Could not set the value of {} to {}'.format(sysfile, value)
raise DeviceError(message)
self._written_sysfiles.append(sysfile)
def get_sysfile_values(self):
"""
Returns a dict mapping paths of sysfiles that were previously set to their
current values.
"""
values = {}
for sysfile in self._written_sysfiles:
values[sysfile] = self.get_sysfile_value(sysfile)
return values
def set_sysfile_values(self, params):
"""
The plural version of ``set_sysfile_value``. Takes a single parameter which is a mapping of
file paths to values to be set. By default, every value written will be verified. The can
be disabled for individual paths by appending ``'!'`` to them.
"""
for sysfile, value in params.iteritems():
verify = not sysfile.endswith('!')
sysfile = sysfile.rstrip('!')
self.set_sysfile_value(sysfile, value, verify=verify)
def deploy_busybox(self, context, force=False):
"""
Deploys the busybox Android binary (hence in android module) to the
specified device, and returns the path to the binary on the device.
:param device: device to deploy the binary to.
:param context: an instance of ExecutionContext
:param force: by default, if the binary is already present on the
device, it will not be deployed again. Setting force
to ``True`` overrides that behavior and ensures that the
binary is always copied. Defaults to ``False``.
:returns: The on-device path to the busybox binary.
"""
on_device_executable = self.path.join(self.binaries_directory, 'busybox')
if not force and self.file_exists(on_device_executable):
return on_device_executable
host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'busybox'))
return self.install(host_file)
def list_file_systems(self):
output = self.execute('mount')
fstab = []
for line in output.split('\n'):
fstab.append(FstabEntry(*line.split()))
return fstab
# Process query and control
def get_pids_of(self, process_name):
"""Returns a list of PIDs of all processes with the specified name."""
result = self.execute('ps {}'.format(process_name[-15:]), check_exit_code=False).strip()
if result and 'not found' not in result:
return [int(x.split()[1]) for x in result.split('\n')[1:]]
else:
return []
def ps(self, **kwargs):
"""
Returns the list of running processes on the device. Keyword arguments may
be used to specify simple filters for columns.
Added in version 2.1.4
"""
lines = iter(convert_new_lines(self.execute('ps')).split('\n'))
lines.next() # header
result = []
for line in lines:
parts = line.split()
if parts:
result.append(PsEntry(*(parts[0:1] + map(int, parts[1:5]) + parts[5:])))
if not kwargs:
return result
else:
filtered_result = []
for entry in result:
if all(getattr(entry, k) == v for k, v in kwargs.iteritems()):
filtered_result.append(entry)
return filtered_result
def kill(self, pid, signal=None, as_root=False): # pylint: disable=W0221
"""
Kill the specified process.
:param pid: PID of the process to kill.
:param signal: Specify which singal to send to the process. This must
be a valid value for -s option of kill. Defaults to ``None``.
Modified in version 2.1.4: added ``signal`` parameter.
"""
signal_string = '-s {}'.format(signal) if signal else ''
self.execute('kill {} {}'.format(signal_string, pid), as_root=as_root)
def killall(self, process_name, signal=None, as_root=False): # pylint: disable=W0221
"""
Kill all processes with the specified name.
:param process_name: The name of the process(es) to kill.
:param signal: Specify which singal to send to the process. This must
be a valid value for -s option of kill. Defaults to ``None``.
Modified in version 2.1.5: added ``as_root`` parameter.
"""
for pid in self.get_pids_of(process_name):
self.kill(pid, signal=signal, as_root=as_root)
# cpufreq
def list_available_cpu_governors(self, cpu):
"""Returns a list of governors supported by the cpu."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
if cpu not in self._available_governors:
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_governors'.format(cpu)
output = self.execute(cmd, check_exit_code=True)
self._available_governors[cpu] = output.strip().split() # pylint: disable=E1103
return self._available_governors[cpu]
def get_cpu_governor(self, cpu):
"""Returns the governor currently set for the specified CPU."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
return self.get_sysfile_value(sysfile)
def set_cpu_governor(self, cpu, governor, **kwargs):
"""
Set the governor for the specified CPU.
See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
:param cpu: The CPU for which the governor is to be set. This must be
the full name as it appears in sysfs, e.g. "cpu0".
:param governor: The name of the governor to be used. This must be
supported by the specific device.
Additional keyword arguments can be used to specify governor tunables for
governors that support them.
:note: On big.LITTLE all cores in a cluster must be using the same governor.
Setting the governor on any core in a cluster will also set it on all
other cores in that cluster.
:raises: ConfigError if governor is not supported by the CPU.
:raises: DeviceError if, for some reason, the governor could not be set.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
supported = self.list_available_cpu_governors(cpu)
if governor not in supported:
raise ConfigError('Governor {} not supported for cpu {}'.format(governor, cpu))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
self.set_sysfile_value(sysfile, governor)
self.set_cpu_governor_tunables(cpu, governor, **kwargs)
def list_available_cpu_governor_tunables(self, cpu):
"""Returns a list of tunables available for the governor on the specified CPU."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
governor = self.get_cpu_governor(cpu)
if governor not in self._available_governor_tunables:
try:
tunables_path = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(cpu, governor)
self._available_governor_tunables[governor] = self.listdir(tunables_path)
except DeviceError: # probably an older kernel
try:
tunables_path = '/sys/devices/system/cpu/cpufreq/{}'.format(governor)
self._available_governor_tunables[governor] = self.listdir(tunables_path)
except DeviceError: # governor does not support tunables
self._available_governor_tunables[governor] = []
return self._available_governor_tunables[governor]
def get_cpu_governor_tunables(self, cpu):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
governor = self.get_cpu_governor(cpu)
tunables = {}
for tunable in self.list_available_cpu_governor_tunables(cpu):
if tunable not in WRITE_ONLY_TUNABLES.get(governor, []):
try:
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
tunables[tunable] = self.get_sysfile_value(path)
except DeviceError: # May be an older kernel
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
tunables[tunable] = self.get_sysfile_value(path)
return tunables
def set_cpu_governor_tunables(self, cpu, governor, **kwargs):
"""
Set tunables for the specified governor. Tunables should be specified as
keyword arguments. Which tunables and values are valid depends on the
governor.
:param cpu: The cpu for which the governor will be set. This must be the
full cpu name as it appears in sysfs, e.g. ``cpu0``.
:param governor: The name of the governor. Must be all lower case.
The rest should be keyword parameters mapping tunable name onto the value to
be set for it.
:raises: ConfigError if governor specified is not a valid governor name, or if
a tunable specified is not valid for the governor.
:raises: DeviceError if could not set tunable.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
valid_tunables = self.list_available_cpu_governor_tunables(cpu)
for tunable, value in kwargs.iteritems():
if tunable in valid_tunables:
try:
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
self.set_sysfile_value(path, value)
except DeviceError: # May be an older kernel
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
self.set_sysfile_value(path, value)
else:
message = 'Unexpected tunable {} for governor {} on {}.\n'.format(tunable, governor, cpu)
message += 'Available tunables are: {}'.format(valid_tunables)
raise ConfigError(message)
def enable_cpu(self, cpu):
"""
Enable the specified core.
:param cpu: CPU core to enable. This must be the full name as it
appears in sysfs, e.g. "cpu0".
"""
self.hotplug_cpu(cpu, online=True)
def disable_cpu(self, cpu):
"""
Disable the specified core.
:param cpu: CPU core to disable. This must be the full name as it
appears in sysfs, e.g. "cpu0".
"""
self.hotplug_cpu(cpu, online=False)
def hotplug_cpu(self, cpu, online):
"""
Hotplug the specified CPU either on or off.
See https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
:param cpu: The CPU for which the governor is to be set. This must be
the full name as it appears in sysfs, e.g. "cpu0".
:param online: CPU will be enabled if this value bool()'s to True, and
will be disabled otherwise.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
status = 1 if online else 0
sysfile = '/sys/devices/system/cpu/{}/online'.format(cpu)
self.set_sysfile_value(sysfile, status)
def list_available_cpu_frequencies(self, cpu):
"""Returns a list of frequencies supported by the cpu or an empty list
if not could be found."""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
if cpu not in self._available_frequencies:
try:
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu)
output = self.execute(cmd)
self._available_frequencies[cpu] = map(int, output.strip().split()) # pylint: disable=E1103
except DeviceError:
# we return an empty list because on some devices scaling_available_frequencies
# is not generated. So we are returing an empty list as an indication
# http://adrynalyne-teachtofish.blogspot.co.uk/2011/11/how-to-enable-scalingavailablefrequenci.html
self._available_frequencies[cpu] = []
return self._available_frequencies[cpu]
def get_cpu_min_frequency(self, cpu):
"""
Returns the min frequency currently set for the specified CPU.
Warning, this method does not check if the cpu is online or not. It will
try to read the minimum frequency and the following exception will be
raised ::
:raises: DeviceError if for some reason the frequency could not be read.
"""
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
return self.get_sysfile_value(sysfile)
def set_cpu_min_frequency(self, cpu, frequency):
"""
Set's the minimum value for CPU frequency. Actual frequency will
depend on the Governor used and may vary during execution. The value should be
either an int or a string representing an integer. The Value must also be
supported by the device. The available frequencies can be obtained by calling
get_available_frequencies() or examining
/sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
on the device.
:raises: ConfigError if the frequency is not supported by the CPU.
:raises: DeviceError if, for some reason, frequency could not be set.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
available_frequencies = self.list_available_cpu_frequencies(cpu)
try:
value = int(frequency)
if available_frequencies and value not in available_frequencies:
raise ConfigError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
value,
available_frequencies))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
self.set_sysfile_value(sysfile, value)
except ValueError:
raise ValueError('value must be an integer; got: "{}"'.format(value))
def get_cpu_max_frequency(self, cpu):
"""
Returns the max frequency currently set for the specified CPU.
Warning, this method does not check if the cpu is online or not. It will
try to read the maximum frequency and the following exception will be
raised ::
:raises: DeviceError if for some reason the frequency could not be read.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
return self.get_sysfile_value(sysfile)
def set_cpu_max_frequency(self, cpu, frequency):
"""
Set's the minimum value for CPU frequency. Actual frequency will
depend on the Governor used and may vary during execution. The value should be
either an int or a string representing an integer. The Value must also be
supported by the device. The available frequencies can be obtained by calling
get_available_frequencies() or examining
/sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
on the device.
:raises: ConfigError if the frequency is not supported by the CPU.
:raises: DeviceError if, for some reason, frequency could not be set.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
available_frequencies = self.list_available_cpu_frequencies(cpu)
try:
value = int(frequency)
if available_frequencies and value not in available_frequencies:
raise DeviceError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
value,
available_frequencies))
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
self.set_sysfile_value(sysfile, value)
except ValueError:
raise ValueError('value must be an integer; got: "{}"'.format(value))
def get_cpuidle_states(self, cpu=0):
"""
Return map of cpuidle states with their descriptive names.
"""
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
cpuidle_states = {}
statere = re.compile('^\s*state\d+\s*$')
output = self.execute("ls /sys/devices/system/cpu/{}/cpuidle".format(cpu))
for entry in output.split():
if statere.match(entry):
cpuidle_states[entry] = self.get_sysfile_value("/sys/devices/system/cpu/{}/cpuidle/{}/desc".format(cpu, entry))
return cpuidle_states
# Core- and cluster-level mapping for the above cpu-level APIs above. The
# APIs make the following assumptions, which were True for all devices that
# existed at the time of writing:
# 1. A cluster can only contain cores of one type.
# 2. All cores in a cluster are tied to the same DVFS domain, therefore
# changes to cpufreq for a core will affect all other cores on the
# same cluster.
def get_core_clusters(self, core, strict=True):
"""Returns the list of clusters that contain the specified core. if ``strict``
is ``True``, raises ValueError if no clusters has been found (returns empty list
if ``strict`` is ``False``)."""
core_indexes = [i for i, c in enumerate(self.core_names) if c == core]
clusters = sorted(list(set(self.core_clusters[i] for i in core_indexes)))
if strict and not clusters:
raise ValueError('No active clusters for core {}'.format(core))
return clusters
def get_cluster_cpu(self, cluster):
"""Returns the first *active* cpu for the cluster. If the entire cluster
has been hotplugged, this will raise a ``ValueError``."""
cpu_indexes = set([i for i, c in enumerate(self.core_clusters) if c == cluster])
active_cpus = sorted(list(cpu_indexes.intersection(self.active_cpus)))
if not active_cpus:
raise ValueError('All cpus for cluster {} are offline'.format(cluster))
return active_cpus[0]
def list_available_cluster_governors(self, cluster):
return self.list_available_cpu_governors(self.get_cluster_cpu(cluster))
def get_cluster_governor(self, cluster):
return self.get_cpu_governor(self.get_cluster_cpu(cluster))
def set_cluster_governor(self, cluster, governor, **tunables):
return self.set_cpu_governor(self.get_cluster_cpu(cluster), governor, **tunables)
def list_available_cluster_governor_tunables(self, cluster):
return self.list_available_cpu_governor_tunables(self.get_cluster_cpu(cluster))
def get_cluster_governor_tunables(self, cluster):
return self.get_cpu_governor_tunables(self.get_cluster_cpu(cluster))
def set_cluster_governor_tunables(self, cluster, governor, **tunables):
return self.set_cpu_governor_tunables(self.get_cluster_cpu(cluster), governor, **tunables)
def get_cluster_min_frequency(self, cluster):
return self.get_cpu_min_frequency(self.get_cluster_cpu(cluster))
def set_cluster_min_frequency(self, cluster, freq):
return self.set_cpu_min_frequency(self.get_cluster_cpu(cluster), freq)
def get_cluster_max_frequency(self, cluster):
return self.get_cpu_max_frequency(self.get_cluster_cpu(cluster))
def set_cluster_max_frequency(self, cluster, freq):
return self.set_cpu_max_frequency(self.get_cluster_cpu(cluster), freq)
def get_core_cpu(self, core):
for cluster in self.get_core_clusters(core):
try:
return self.get_cluster_cpu(cluster)
except ValueError:
pass
raise ValueError('No active CPUs found for core {}'.format(core))
def list_available_core_governors(self, core):
return self.list_available_cpu_governors(self.get_core_cpu(core))
def get_core_governor(self, core):
return self.get_cpu_governor(self.get_core_cpu(core))
def set_core_governor(self, core, governor, **tunables):
for cluster in self.get_core_clusters(core):
self.set_cluster_governor(cluster, governor, **tunables)
def list_available_core_governor_tunables(self, core):
return self.list_available_cpu_governor_tunables(self.get_core_cpu(core))
def get_core_governor_tunables(self, core):
return self.get_cpu_governor_tunables(self.get_core_cpu(core))
def set_core_governor_tunables(self, core, tunables):
for cluster in self.get_core_clusters(core):
governor = self.get_cluster_governor(cluster)
self.set_cluster_governor_tunables(cluster, governor, **tunables)
def get_core_min_frequency(self, core):
return self.get_cpu_min_frequency(self.get_core_cpu(core))
def set_core_min_frequency(self, core, freq):
for cluster in self.get_core_clusters(core):
self.set_cluster_min_frequency(cluster, freq)
def get_core_max_frequency(self, core):
return self.get_cpu_max_frequency(self.get_core_cpu(core))
def set_core_max_frequency(self, core, freq):
for cluster in self.get_core_clusters(core):
self.set_cluster_max_frequency(cluster, freq)
def get_number_of_active_cores(self, core):
if core not in self.core_names:
raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names))))
active_cpus = self.active_cpus
num_active_cores = 0
for i, c in enumerate(self.core_names):
if c == core and i in active_cpus:
num_active_cores += 1
return num_active_cores
def set_number_of_active_cores(self, core, number):
if core not in self.core_names:
raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names))))
core_ids = [i for i, c in enumerate(self.core_names) if c == core]
max_cores = len(core_ids)
if number > max_cores:
message = 'Attempting to set the number of active {} to {}; maximum is {}'
raise ValueError(message.format(core, number, max_cores))
for i in xrange(0, number):
self.enable_cpu(core_ids[i])
for i in xrange(number, max_cores):
self.disable_cpu(core_ids[i])
# internal methods
def _check_ready(self):
if not self._is_ready:
raise AttributeError('Device not ready.')
def _get_core_cluster(self, core):
"""Returns the first cluster that has cores of the specified type. Raises
value error if no cluster for the specified type has been found"""
core_indexes = [i for i, c in enumerate(self.core_names) if c == core]
core_clusters = set(self.core_clusters[i] for i in core_indexes)
if not core_clusters:
raise ValueError('No cluster found for core {}'.format(core))
return sorted(list(core_clusters))[0]
class LinuxDevice(BaseLinuxDevice):
platform = 'linux'
default_timeout = 30
delay = 2
long_delay = 3 * delay
ready_timeout = 60
parameters = [
Parameter('host', mandatory=True, description='Host name or IP address for the device.'),
Parameter('username', mandatory=True, description='User name for the account on the device.'),
Parameter('password', description='Password for the account on the device (for password-based auth).'),
Parameter('keyfile', description='Keyfile to be used for key-based authentication.'),
Parameter('port', kind=int, description='SSH port number on the device.'),
Parameter('use_telnet', kind=boolean, default=False,
description='Optionally, telnet may be used instead of ssh, though this is discouraged.'),
Parameter('working_directory', default=None,
description='''
Working directory to be used by WA. This must be in a location where the specified user
has write permissions. This will default to /home/<username>/wa (or to /root/wa, if
username is 'root').
'''),
Parameter('binaries_directory', default='/usr/local/bin',
description='Location of executable binaries on this device (must be in PATH).'),
Parameter('property_files', kind=list_of_strings,
default=['/proc/version', '/etc/debian_version', '/etc/lsb-release', '/etc/arch-release'],
description='''
A list of paths to files containing static OS properties. These will be pulled into the
__meta directory in output for each run in order to provide information about the platfrom.
These paths do not have to exist and will be ignored if the path is not present on a
particular device.
'''),
]
@property
def is_rooted(self):
if self._is_rooted is None:
try:
self.execute('ls /', as_root=True)
self._is_rooted = True
except DeviceError:
self._is_rooted = False
return self._is_rooted
def __init__(self, *args, **kwargs):
super(LinuxDevice, self).__init__(*args, **kwargs)
self.shell = None
self.local_binaries_directory = None
self._is_rooted = None
def validate(self):
if not self.password and not self.keyfile:
raise ConfigError('Either a password or a keyfile must be provided.')
if self.working_directory is None: # pylint: disable=access-member-before-definition
if self.username == 'root':
self.working_directory = '/root/wa' # pylint: disable=attribute-defined-outside-init
else:
self.working_directory = '/home/{}/wa'.format(self.username) # pylint: disable=attribute-defined-outside-init
self.local_binaries_directory = self.path.join(self.working_directory, 'bin')
def initialize(self, context, *args, **kwargs):
self.execute('mkdir -p {}'.format(self.local_binaries_directory))
self.execute('export PATH={}:$PATH'.format(self.local_binaries_directory))
super(LinuxDevice, self).initialize(context, *args, **kwargs)
# Power control
def reset(self):
self._is_ready = False
self.execute('reboot', as_root=True)
def hard_reset(self):
super(LinuxDevice, self).hard_reset()
self._is_ready = False
def boot(self, **kwargs):
self.reset()
def connect(self): # NOQA pylint: disable=R0912
self.shell = SshShell(timeout=self.default_timeout)
self.shell.login(self.host, self.username, self.password, self.keyfile, self.port, telnet=self.use_telnet)
self._is_ready = True
def disconnect(self): # NOQA pylint: disable=R0912
self.shell.logout()
self._is_ready = False
# Execution
def has_root(self):
try:
self.execute('ls /', as_root=True)
return True
except DeviceError as e:
if 'not in the sudoers file' not in e.message:
raise e
return False
def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False,
as_root=False, strip_colors=True, **kwargs):
"""
Execute the specified command on the device using adb.
Parameters:
:param command: The command to be executed. It should appear exactly
as if you were typing it into a shell.
:param timeout: Time, in seconds, to wait for adb to return before aborting
and raising an error. Defaults to ``AndroidDevice.default_timeout``.
:param check_exit_code: If ``True``, the return code of the command on the Device will
be check and exception will be raised if it is not 0.
Defaults to ``True``.
:param background: If ``True``, will execute create a new ssh shell rather than using
the default session and will return it immediately. If this is ``True``,
``timeout``, ``strip_colors`` and (obvisously) ``check_exit_code`` will
be ignored; also, with this, ``as_root=True`` is only valid if ``username``
for the device was set to ``root``.
:param as_root: If ``True``, will attempt to execute command in privileged mode. The device
must be rooted, otherwise an error will be raised. Defaults to ``False``.
Added in version 2.1.3
:returns: If ``background`` parameter is set to ``True``, the subprocess object will
be returned; otherwise, the contents of STDOUT from the device will be returned.
"""
self._check_ready()
if background:
if as_root and self.username != 'root':
raise DeviceError('Cannot execute in background with as_root=True unless user is root.')
return self.shell.background(command)
else:
return self.shell.execute(command, timeout, check_exit_code, as_root, strip_colors)
def kick_off(self, command):
"""
Like execute but closes adb session and returns immediately, leaving the command running on the
device (this is different from execute(background=True) which keeps adb connection open and returns
a subprocess object).
"""
self._check_ready()
command = 'sh -c "{}" 1>/dev/null 2>/dev/null &'.format(escape_double_quotes(command))
return self.shell.execute(command)
# File management
def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
self._check_ready()
if not as_root or self.username == 'root':
self.shell.push_file(source, dest, timeout=timeout)
else:
tempfile = self.path.join(self.working_directory, self.path.basename(dest))
self.shell.push_file(source, tempfile, timeout=timeout)
self.shell.execute('cp -r {} {}'.format(tempfile, dest), timeout=timeout, as_root=True)
def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
self._check_ready()
if not as_root or self.username == 'root':
self.shell.pull_file(source, dest, timeout=timeout)
else:
tempfile = self.path.join(self.working_directory, self.path.basename(source))
self.shell.execute('cp -r {} {}'.format(source, tempfile), timeout=timeout, as_root=True)
self.shell.execute('chown -R {} {}'.format(self.username, tempfile), timeout=timeout, as_root=True)
self.shell.pull_file(tempfile, dest, timeout=timeout)
def delete_file(self, filepath, as_root=False): # pylint: disable=W0221
self.execute('rm -rf {}'.format(filepath), as_root=as_root)
def file_exists(self, filepath):
output = self.execute('if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
return boolean(output.strip()) # pylint: disable=maybe-no-member
def listdir(self, path, as_root=False, **kwargs):
contents = self.execute('ls -1 {}'.format(path), as_root=as_root)
return [x.strip() for x in contents.split('\n')] # pylint: disable=maybe-no-member
def install(self, filepath, timeout=default_timeout, with_name=None): # pylint: disable=W0221
if self.is_rooted:
destpath = self.path.join(self.binaries_directory,
with_name and with_name or self.path.basename(filepath))
self.push_file(filepath, destpath, as_root=True)
self.execute('chmod a+x {}'.format(destpath), timeout=timeout, as_root=True)
else:
destpath = self.path.join(self.local_binaries_directory,
with_name and with_name or self.path.basename(filepath))
self.push_file(filepath, destpath)
self.execute('chmod a+x {}'.format(destpath), timeout=timeout)
return destpath
install_executable = install # compatibility
def uninstall(self, name):
path = self.path.join(self.local_binaries_directory, name)
self.delete_file(path)
uninstall_executable = uninstall # compatibility
def is_installed(self, name):
try:
self.execute('which {}'.format(name))
return True
except DeviceError:
return False
# misc
def ping(self):
try:
# May be triggered inside initialize()
self.shell.execute('ls /', timeout=5)
except (TimeoutError, CalledProcessError):
raise DeviceNotRespondingError(self.host)
def capture_screen(self, filepath):
if not self.is_installed('scrot'):
self.logger.debug('Could not take screenshot as scrot is not installed.')
return
try:
tempfile = self.path.join(self.working_directory, os.path.basename(filepath))
self.execute('DISPLAY=:0.0 scrot {}'.format(tempfile))
self.pull_file(tempfile, filepath)
self.delete_file(tempfile)
except DeviceError as e:
if "Can't open X dispay." not in e.message:
raise e
message = e.message.split('OUTPUT:', 1)[1].strip()
self.logger.debug('Could not take screenshot: {}'.format(message))
def is_screen_on(self):
pass # TODO
def ensure_screen_is_on(self):
pass # TODO
def get_properties(self, context):
for propfile in self.property_files:
if not self.file_exists(propfile):
continue
normname = propfile.lstrip(self.path.sep).replace(self.path.sep, '.')
outfile = os.path.join(context.host_working_directory, normname)
self.pull_file(propfile, outfile)
return {}

View File

@@ -0,0 +1,64 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from wlauto.core.resource import Resource
class FileResource(Resource):
"""
Base class for all resources that are a regular file in the
file system.
"""
def delete(self, instance):
os.remove(instance)
class File(FileResource):
name = 'file'
def __init__(self, owner, path, url=None):
super(File, self).__init__(owner)
self.path = path
self.url = url
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url)
class ExtensionAsset(File):
name = 'extension_asset'
def __init__(self, owner, path):
super(ExtensionAsset, self).__init__(owner, os.path.join(owner.name, path))
class Executable(FileResource):
name = 'executable'
def __init__(self, owner, platform, filename):
super(Executable, self).__init__(owner)
self.platform = platform
self.filename = filename
def __str__(self):
return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename)

284
wlauto/config_example.py Normal file
View File

@@ -0,0 +1,284 @@
"""
Default config for Workload Automation. DO NOT MODIFY this file. This file
gets copied to ~/.workload_automation/config.py on initial run of run_workloads.
Add your configuration to that file instead.
"""
# *** WARNING: ***
# Configuration listed in this file is NOT COMPLETE. This file sets the default
# configuration for WA and gives EXAMPLES of other configuration available. It
# is not supposed to be an exhaustive list.
# PLEASE REFER TO WA DOCUMENTATION FOR THE COMPLETE LIST OF AVAILABLE
# EXTENSIONS AND THEIR CONFIGURATION.
# This defines when the device will be rebooted during Workload Automation execution. #
# #
# Valid policies are: #
# never: The device will never be rebooted. #
# as_needed: The device will only be rebooted if the need arises (e.g. if it #
# becomes unresponsive #
# initial: The device will be rebooted when the execution first starts, just before executing #
# the first workload spec. #
# each_spec: The device will be rebooted before running a new workload spec. #
# each_iteration: The device will be rebooted before each new iteration. #
# #
reboot_policy = 'as_needed'
# Defines the order in which the agenda spec will be executed. At the moment, #
# the following execution orders are supported: #
# #
# by_iteration: The first iteration of each workload spec is executed one ofter the other, #
# so all workloads are executed before proceeding on to the second iteration. #
# This is the default if no order is explicitly specified. #
# If multiple sections were specified, this will also split them up, so that specs #
# in the same section are further apart in the execution order. #
# by_section: Same as "by_iteration", but runn specs from the same section one after the other #
# by_spec: All iterations of the first spec are executed before moving on to the next #
# spec. This may also be specified as ``"classic"``, as this was the way #
# workloads were executed in earlier versions of WA. #
# random: Randomisizes the order in which specs run. #
execution_order = 'by_iteration'
####################################################################################################
######################################### Device Settings ##########################################
####################################################################################################
# Specify the device you want to run workload automation on. This must be a #
# string with the ID of the device. At the moment, only 'TC2' is supported. #
# #
device = 'generic_android'
# Configuration options that will be passed onto the device. These are obviously device-specific, #
# so check the documentation for the particular device to find out which options and values are #
# valid. The settings listed below are common to all devices #
# #
device_config = dict(
# The name used by adb to identify the device. Use "adb devices" in bash to list
# the devices currently seen by adb.
#adb_name='10.109.173.2:5555',
# The directory on the device that WA will use to push files to
#working_directory='/sdcard/wa-working',
# This specifies the device's CPU cores. The order must match how they
# appear in cpufreq. The example below is for TC2.
# core_names = ['a7', 'a7', 'a7', 'a15', 'a15']
# Specifies cluster mapping for the device's cores.
# core_clusters = [0, 0, 0, 1, 1]
)
####################################################################################################
################################### Instrumention Configuration ####################################
####################################################################################################
# This defines the additionnal instrumentation that will be enabled during workload execution, #
# which in turn determines what additional data (such as /proc/interrupts content or Streamline #
# traces) will be available in the results directory. #
# #
instrumentation = [
# Records the time it took to run the workload
'execution_time',
# Collects /proc/interrupts before and after execution and does a diff.
'interrupts',
# Collects the contents of/sys/devices/system/cpu before and after execution and does a diff.
'cpufreq',
# Gets energy usage from the workload form HWMON devices
# NOTE: the hardware needs to have the right sensors in order for this to work
#'hwmon',
# Run perf in the background during workload execution and then collect the results. perf is a
# standard Linux performance analysis tool.
#'perf',
# Collect Streamline traces during workload execution. Streamline is part of DS-5
#'streamline',
# Collects traces by interacting with Ftrace Linux kernel internal tracer
#'trace-cmd',
# Obtains the power consumption of the target device's core measured by National Instruments
# Data Acquisition(DAQ) device.
#'daq',
# Collects CCI counter data.
#'cci_pmu_logger',
# Collects FPS (Frames Per Second) and related metrics (such as jank) from
# the View of the workload (Note: only a single View per workload is
# supported at the moment, so this is mainly useful for games).
#'fps',
]
####################################################################################################
################################# Result Processors Configuration ##################################
####################################################################################################
# Specifies how results will be processed and presented. #
# #
result_processors = [
# Creates a results.txt file for each iteration that lists all collected metrics
# in "name = value (units)" format
'standard',
# Creates a results.csv that contains metrics for all iterations of all workloads
# in the .csv format.
'csv',
# Creates a summary.csv that contains summary metrics for all iterations of all
# all in the .csv format. Summary metrics are defined on per-worklod basis
# are typically things like overall scores. The contents of summary.csv are
# always a subset of the contents of results.csv (if it is generated).
'summary_csv',
# Creates a results.csv that contains metrics for all iterations of all workloads
# in the JSON format
#'json',
# Write results to an sqlite3 database. By default, a new database will be
# generated for each run, however it is possible to specify a path to an
# existing DB file (see result processor configuration below), in which
# case results from multiple runs may be stored in the one file.
#'sqlite',
]
####################################################################################################
################################### Logging output Configuration ###################################
####################################################################################################
# Specify the format of logging messages. The format uses the old formatting syntax: #
# #
# http://docs.python.org/2/library/stdtypes.html#string-formatting-operations #
# #
# The attributes that can be used in formats are listested here: #
# #
# http://docs.python.org/2/library/logging.html#logrecord-attributes #
# #
logging = {
# Log file format
'file format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
# Verbose console output format
'verbose format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
# Regular console output format
'regular format': '%(levelname)-8s %(message)s',
# Colouring the console output
'colour_enabled': True,
}
####################################################################################################
#################################### Instruments Configuration #####################################
####################################################################################################
# Instrumention Configuration is related to specific insturment's settings. Some of the #
# instrumentations require specific settings in order for them to work. These settings are #
# specified here. #
# Note that these settings only take effect if the corresponding instrument is
# enabled above.
####################################################################################################
######################################## perf configuration ########################################
# The hardware events such as instructions executed, cache-misses suffered, or branches
# mispredicted to be reported by perf. Events can be obtained from the device by tpying
# 'perf list'.
#perf_events = ['migrations', 'cs']
# The perf options which can be obtained from man page for perf-record
#perf_options = '-a -i'
####################################################################################################
####################################### hwmon configuration ########################################
# The kinds of sensors hwmon instrument will look for
#hwmon_sensors = ['energy', 'temp']
####################################################################################################
##################################### streamline configuration #####################################
# The port number on which gatord will listen
#port = 8080
# Enabling/disabling the run of 'streamline -analyze' on the captured data.
#streamline_analyze = True
# Enabling/disabling the generation of a CSV report
#streamline_report_csv = True
####################################################################################################
###################################### trace-cmd configuration #####################################
# trace-cmd events to be traced. The events can be found by rooting on the device then type
# 'trace-cmd list -e'
#trace_events = ['power*']
####################################################################################################
######################################### DAQ configuration ########################################
# The host address of the machine that runs the daq Server which the insturment communicates with
#daq_server_host = '10.1.17.56'
# The port number for daq Server in which daq insturment communicates with
#daq_server_port = 56788
# The values of resistors 1 and 2 (in Ohms) across which the voltages are measured
#daq_resistor_values = [0.002, 0.002]
####################################################################################################
################################### cci_pmu_logger configuration ###################################
# The events to be counted by PMU
# NOTE: The number of events must not exceed the number of counters available (which is 4 for CCI-400)
#cci_pmu_events = ['0x63', '0x83']
# The name of the events which will be used when reporting PMU counts
#cci_pmu_event_labels = ['event_0x63', 'event_0x83']
# The period (in jiffies) between counter reads
#cci_pmu_period = 15
####################################################################################################
################################### fps configuration ##############################################
# Data points below this FPS will dropped as not constituting "real" gameplay. The assumption
# being that while actually running, the FPS in the game will not drop below X frames per second,
# except on loading screens, menus, etc, which should not contribute to FPS calculation.
#fps_drop_threshold=5
# If set to True, this will keep the raw dumpsys output in the results directory (this is maily
# used for debugging). Note: frames.csv with collected frames data will always be generated
# regardless of this setting.
#fps_keep_raw=False
####################################################################################################
################################# Result Processor Configuration ###################################
####################################################################################################
# Specifies an alternative database to store results in. If the file does not
# exist, it will be created (the directiory of the file must exist however). If
# the file does exist, the results will be added to the existing data set (each
# run as a UUID, so results won't clash even if identical agendas were used).
# Note that in order for this to work, the version of the schema used to generate
# the DB file must match that of the schema used for the current run. Please
# see "What's new" secition in WA docs to check if the schema has changed in
# recent releases of WA.
#sqlite_database = '/work/results/myresults.sqlite'
# If the file specified by sqlite_database exists, setting this to True will
# cause that file to be overwritten rather than updated -- existing results in
# the file will be lost.
#sqlite_overwrite = False
# distribution: internal
####################################################################################################
#################################### Resource Getter configuration #################################
####################################################################################################
# The location on your system where /arm/scratch is mounted. Used by
# Scratch resource getter.
#scratch_mount_point = '/arm/scratch'
# end distribution

16
wlauto/core/__init__.py Normal file
View File

@@ -0,0 +1,16 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

244
wlauto/core/agenda.py Normal file
View File

@@ -0,0 +1,244 @@
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from copy import copy
from collections import OrderedDict, defaultdict
from wlauto.exceptions import ConfigError
from wlauto.utils.misc import load_struct_from_yaml, LoadSyntaxError
from wlauto.utils.types import counter, reset_counter
import yaml
def get_aliased_param(d, aliases, default=None, pop=True):
alias_map = [i for i, a in enumerate(aliases) if a in d]
if len(alias_map) > 1:
message = 'Only one of {} may be specified in a single entry'
raise ConfigError(message.format(aliases))
elif alias_map:
if pop:
return d.pop(aliases[alias_map[0]])
else:
return d[aliases[alias_map[0]]]
else:
return default
class AgendaEntry(object):
def to_dict(self):
return copy(self.__dict__)
class AgendaWorkloadEntry(AgendaEntry):
"""
Specifies execution of a workload, including things like the number of
iterations, device runtime_parameters configuration, etc.
"""
def __init__(self, **kwargs):
super(AgendaWorkloadEntry, self).__init__()
self.id = kwargs.pop('id')
self.workload_name = get_aliased_param(kwargs, ['workload_name', 'name'])
if not self.workload_name:
raise ConfigError('No workload name specified in entry {}'.format(self.id))
self.label = kwargs.pop('label', self.workload_name)
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params', 'params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
if kwargs:
raise ConfigError('Invalid entry(ies) in workload {}: {}'.format(self.id, ', '.join(kwargs.keys())))
class AgendaSectionEntry(AgendaEntry):
"""
Specifies execution of a workload, including things like the number of
iterations, device runtime_parameters configuration, etc.
"""
def __init__(self, agenda, **kwargs):
super(AgendaSectionEntry, self).__init__()
self.id = kwargs.pop('id')
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params', 'params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
self.workloads = []
for w in kwargs.pop('workloads', []):
self.workloads.append(agenda.get_workload_entry(w))
if kwargs:
raise ConfigError('Invalid entry(ies) in section {}: {}'.format(self.id, ', '.join(kwargs.keys())))
def to_dict(self):
d = copy(self.__dict__)
d['workloads'] = [w.to_dict() for w in self.workloads]
return d
class AgendaGlobalEntry(AgendaEntry):
"""
Workload configuration global to all workloads.
"""
def __init__(self, **kwargs):
super(AgendaGlobalEntry, self).__init__()
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params', 'params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
if kwargs:
raise ConfigError('Invalid entries in global section: {}'.format(kwargs))
class Agenda(object):
def __init__(self, source=None):
self.filepath = None
self.config = None
self.global_ = None
self.sections = []
self.workloads = []
self._seen_ids = defaultdict(set)
if source:
try:
reset_counter('section')
reset_counter('workload')
self._load(source)
except (ConfigError, LoadSyntaxError, SyntaxError), e:
raise ConfigError(str(e))
def add_workload_entry(self, w):
entry = self.get_workload_entry(w)
self.workloads.append(entry)
def get_workload_entry(self, w):
if isinstance(w, basestring):
w = {'name': w}
if not isinstance(w, dict):
raise ConfigError('Invalid workload entry: "{}" in {}'.format(w, self.filepath))
self._assign_id_if_needed(w, 'workload')
return AgendaWorkloadEntry(**w)
def _load(self, source):
raw = self._load_raw_from_source(source)
if not isinstance(raw, dict):
message = '{} does not contain a valid agenda structure; top level must be a dict.'
raise ConfigError(message.format(self.filepath))
for k, v in raw.iteritems():
if k == 'config':
self.config = v
elif k == 'global':
self.global_ = AgendaGlobalEntry(**v)
elif k == 'sections':
self._collect_existing_ids(v, 'section')
for s in v:
if not isinstance(s, dict):
raise ConfigError('Invalid section entry: "{}" in {}'.format(s, self.filepath))
self._collect_existing_ids(s.get('workloads', []), 'workload')
for s in v:
self._assign_id_if_needed(s, 'section')
self.sections.append(AgendaSectionEntry(self, **s))
elif k == 'workloads':
self._collect_existing_ids(v, 'workload')
for w in v:
self.workloads.append(self.get_workload_entry(w))
else:
raise ConfigError('Unexpected agenda entry "{}" in {}'.format(k, self.filepath))
def _load_raw_from_source(self, source):
if hasattr(source, 'read') and hasattr(source, 'name'): # file-like object
self.filepath = source.name
raw = load_struct_from_yaml(text=source.read())
elif isinstance(source, basestring):
if os.path.isfile(source):
self.filepath = source
raw = load_struct_from_yaml(filepath=self.filepath)
else: # assume YAML text
raw = load_struct_from_yaml(text=source)
else:
raise ConfigError('Unknown agenda source: {}'.format(source))
return raw
def _collect_existing_ids(self, ds, pool):
# Collection needs to take place first so that auto IDs can be
# correctly assigned, e.g. if someone explicitly specified an ID
# of '1' for one of the workloads.
for d in ds:
if isinstance(d, dict) and 'id' in d:
did = str(d['id'])
if did in self._seen_ids[pool]:
raise ConfigError('Duplicate {} ID: {}'.format(pool, did))
self._seen_ids[pool].add(did)
def _assign_id_if_needed(self, d, pool):
# Also enforces string IDs
if d.get('id') is None:
did = str(counter(pool))
while did in self._seen_ids[pool]:
did = str(counter(pool))
d['id'] = did
self._seen_ids[pool].add(did)
else:
d['id'] = str(d['id'])
# Modifying the yaml parser to use an OrderedDict, rather then regular Python
# dict for mappings. This preservers the order in which the items are
# specified. See
# http://stackoverflow.com/a/21048064
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_mapping(_mapping_tag, data.iteritems())
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
yaml.add_representer(OrderedDict, dict_representer)
yaml.add_constructor(_mapping_tag, dict_constructor)

195
wlauto/core/bootstrap.py Normal file
View File

@@ -0,0 +1,195 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import imp
import sys
import re
from collections import namedtuple, OrderedDict
from wlauto.exceptions import ConfigError
from wlauto.utils.misc import merge_dicts, normalize, unique
from wlauto.utils.types import identifier
_this_dir = os.path.dirname(__file__)
_user_home = os.path.expanduser('~')
# loading our external packages over those from the environment
sys.path.insert(0, os.path.join(_this_dir, '..', 'external'))
# Defines extension points for the WA framework. This table is used by the
# ExtensionLoader (among other places) to identify extensions it should look
# for.
# Parameters that need to be specified in a tuple for each extension type:
# name: The name of the extension type. This will be used to resolve get_
# and list_methods in the extension loader.
# class: The base class for the extension type. Extension loader will check
# whether classes it discovers are subclassed from this.
# default package: This is the package that will be searched for extensions
# of that type by default (if not other packages are
# specified when creating the extension loader). This
# package *must* exist.
# default path: This is the subdirectory under the environment_root which
# will be searched for extensions of this type by default (if
# no other paths are specified when creating the extension
# loader). This directory will be automatically created if it
# does not exist.
#pylint: disable=C0326
_EXTENSION_TYPE_TABLE = [
# name, class, default package, default path
('command', 'wlauto.core.command.Command', 'wlauto.commands', 'commands'),
('device', 'wlauto.core.device.Device', 'wlauto.devices', 'devices'),
('instrument', 'wlauto.core.instrumentation.Instrument', 'wlauto.instrumentation', 'instruments'),
('module', 'wlauto.core.extension.Module', 'wlauto.modules', 'modules'),
('resource_getter', 'wlauto.core.resource.ResourceGetter', 'wlauto.resource_getters', 'resource_getters'),
('result_processor', 'wlauto.core.result.ResultProcessor', 'wlauto.result_processors', 'result_processors'),
('workload', 'wlauto.core.workload.Workload', 'wlauto.workloads', 'workloads'),
]
_Extension = namedtuple('_Extension', 'name, cls, default_package, default_path')
_extensions = [_Extension._make(ext) for ext in _EXTENSION_TYPE_TABLE] # pylint: disable=W0212
class ConfigLoader(object):
"""
This class is responsible for loading and validating config files.
"""
def __init__(self):
self._loaded = False
self._config = {}
self.config_count = 0
self._loaded_files = []
self.environment_root = None
self.output_directory = 'wa_output'
self.reboot_after_each_iteration = True
self.dependencies_directory = None
self.agenda = None
self.extension_packages = []
self.extension_paths = []
self.extensions = []
self.verbosity = 0
self.debug = False
self.package_directory = os.path.dirname(_this_dir)
self.commands = {}
@property
def meta_directory(self):
return os.path.join(self.output_directory, '__meta')
@property
def log_file(self):
return os.path.join(self.output_directory, 'run.log')
def update(self, source):
if isinstance(source, dict):
self.update_from_dict(source)
else:
self.config_count += 1
self.update_from_file(source)
def update_from_file(self, source):
try:
new_config = imp.load_source('config_{}'.format(self.config_count), source)
except SyntaxError, e:
message = 'Sytax error in config: {}'.format(str(e))
raise ConfigError(message)
self._config = merge_dicts(self._config, vars(new_config),
list_duplicates='first', match_types=False, dict_type=OrderedDict)
self._loaded_files.append(source)
self._loaded = True
def update_from_dict(self, source):
normalized_source = dict((identifier(k), v) for k, v in source.iteritems())
self._config = merge_dicts(self._config, normalized_source, list_duplicates='first',
match_types=False, dict_type=OrderedDict)
self._loaded = True
def get_config_paths(self):
return [lf.rstrip('c') for lf in self._loaded_files]
def _check_loaded(self):
if not self._loaded:
raise ConfigError('Config file not loaded.')
def __getattr__(self, name):
self._check_loaded()
return self._config.get(normalize(name))
def init_environment(env_root, dep_dir, extension_paths, overwrite_existing=False): # pylint: disable=R0914
"""Initialise a fresh user environment creating the workload automation"""
if os.path.exists(env_root):
if not overwrite_existing:
raise ConfigError('Environment {} already exists.'.format(env_root))
shutil.rmtree(env_root)
os.makedirs(env_root)
with open(os.path.join(_this_dir, '..', 'config_example.py')) as rf:
text = re.sub(r'""".*?"""', '', rf.read(), 1, re.DOTALL)
with open(os.path.join(_env_root, 'config.py'), 'w') as wf:
wf.write(text)
os.makedirs(dep_dir)
for path in extension_paths:
os.makedirs(path)
# If running with sudo on POSIX, change the ownership to the real user.
real_user = os.getenv('SUDO_USER')
if real_user:
import pwd # done here as module won't import on win32
user_entry = pwd.getpwnam(real_user)
uid, gid = user_entry.pw_uid, user_entry.pw_gid
os.chown(env_root, uid, gid)
# why, oh why isn't there a recusive=True option for os.chown?
for root, dirs, files in os.walk(env_root):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files: # pylint: disable=W0621
os.chown(os.path.join(root, f), uid, gid)
_env_root = os.getenv('WA_USER_DIRECTORY', os.path.join(_user_home, '.workload_automation'))
_dep_dir = os.path.join(_env_root, 'dependencies')
_extension_paths = [os.path.join(_env_root, ext.default_path) for ext in _extensions]
_extension_paths.extend(os.getenv('WA_EXTENSION_PATHS', '').split(os.pathsep))
if not os.path.isdir(_env_root):
init_environment(_env_root, _dep_dir, _extension_paths)
elif not os.path.isfile(os.path.join(_env_root, 'config.py')):
with open(os.path.join(_this_dir, '..', 'config_example.py')) as f:
f_text = re.sub(r'""".*?"""', '', f.read(), 1, re.DOTALL)
with open(os.path.join(_env_root, 'config.py'), 'w') as f:
f.write(f_text)
settings = ConfigLoader()
settings.environment_root = _env_root
settings.dependencies_directory = _dep_dir
settings.extension_paths = _extension_paths
settings.extensions = _extensions
_packages_file = os.path.join(_env_root, 'packages')
if os.path.isfile(_packages_file):
with open(_packages_file) as fh:
settings.extension_packages = unique(fh.read().split())
_env_config = os.path.join(settings.environment_root, 'config.py')
settings.update(_env_config)

67
wlauto/core/command.py Normal file
View File

@@ -0,0 +1,67 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import textwrap
from wlauto.core.extension import Extension
from wlauto.core.entry_point import init_argument_parser
from wlauto.utils.doc import format_body
class Command(Extension):
"""
Defines a Workload Automation command. This will be executed from the command line as
``wa <command> [args ...]``. This defines the name to be used when invoking wa, the
code that will actually be executed on invocation and the argument parser to be used
to parse the reset of the command line arguments.
"""
help = None
usage = None
description = None
epilog = None
formatter_class = None
def __init__(self, subparsers):
super(Command, self).__init__()
self.group = subparsers
parser_params = dict(help=(self.help or self.description), usage=self.usage,
description=format_body(textwrap.dedent(self.description), 80),
epilog=self.epilog)
if self.formatter_class:
parser_params['formatter_class'] = self.formatter_class
self.parser = subparsers.add_parser(self.name, **parser_params)
init_argument_parser(self.parser) # propagate top-level options
self.initialize()
def initialize(self):
"""
Perform command-specific initialisation (e.g. adding command-specific options to the command's
parser).
"""
pass
def execute(self, args):
"""
Execute this command.
:args: An ``argparse.Namespace`` containing command line arguments (as returned by
``argparse.ArgumentParser.parse_args()``. This would usually be the result of
invoking ``self.parser``.
"""
raise NotImplementedError()

View File

@@ -0,0 +1,756 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
from copy import copy
from collections import OrderedDict
from wlauto.exceptions import ConfigError
from wlauto.utils.misc import merge_dicts, merge_lists, load_struct_from_file
from wlauto.utils.types import regex_type, identifier
class SharedConfiguration(object):
def __init__(self):
self.number_of_iterations = None
self.workload_name = None
self.label = None
self.boot_parameters = OrderedDict()
self.runtime_parameters = OrderedDict()
self.workload_parameters = OrderedDict()
self.instrumentation = []
class ConfigurationJSONEncoder(json.JSONEncoder):
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, WorkloadRunSpec):
return obj.to_dict()
elif isinstance(obj, RunConfiguration):
return obj.to_dict()
elif isinstance(obj, RebootPolicy):
return obj.policy
elif isinstance(obj, regex_type):
return obj.pattern
else:
return json.JSONEncoder.default(self, obj)
class WorkloadRunSpec(object):
"""
Specifies execution of a workload, including things like the number of
iterations, device runtime_parameters configuration, etc.
"""
# These should be handled by the framework if not explicitly specified
# so it's a programming error if they're not
framework_mandatory_parameters = ['id', 'number_of_iterations']
# These *must* be specified by the user (through one mechanism or another)
# and it is a configuration error if they're not.
mandatory_parameters = ['workload_name']
def __init__(self,
id=None, # pylint: disable=W0622
number_of_iterations=None,
workload_name=None,
boot_parameters=None,
label=None,
section_id=None,
workload_parameters=None,
runtime_parameters=None,
instrumentation=None,
flash=None,
): # pylint: disable=W0622
self.id = id
self.number_of_iterations = number_of_iterations
self.workload_name = workload_name
self.label = label or self.workload_name
self.section_id = section_id
self.boot_parameters = boot_parameters or OrderedDict()
self.runtime_parameters = runtime_parameters or OrderedDict()
self.workload_parameters = workload_parameters or OrderedDict()
self.instrumentation = instrumentation or []
self.flash = flash or OrderedDict()
self._workload = None
self._section = None
self.enabled = True
def set(self, param, value):
if param in ['id', 'section_id', 'number_of_iterations', 'workload_name', 'label']:
if value is not None:
setattr(self, param, value)
elif param in ['boot_parameters', 'runtime_parameters', 'workload_parameters', 'flash']:
setattr(self, param, merge_dicts(getattr(self, param), value, list_duplicates='last',
dict_type=OrderedDict, should_normalize=False))
elif param in ['instrumentation']:
setattr(self, param, merge_lists(getattr(self, param), value, duplicates='last'))
else:
raise ValueError('Unexpected workload spec parameter: {}'.format(param))
def validate(self):
for param_name in self.framework_mandatory_parameters:
param = getattr(self, param_name)
if param is None:
msg = '{} not set for workload spec.'
raise RuntimeError(msg.format(param_name))
for param_name in self.mandatory_parameters:
param = getattr(self, param_name)
if param is None:
msg = '{} not set for workload spec for workload {}'
raise ConfigError(msg.format(param_name, self.id))
def match_selectors(self, selectors):
"""
Returns ``True`` if this spec matches the specified selectors, and
``False`` otherwise. ``selectors`` must be a dict-like object with
attribute names mapping onto selector values. At the moment, only equality
selection is supported; i.e. the value of the attribute of the spec must
match exactly the corresponding value specified in the ``selectors`` dict.
"""
if not selectors:
return True
for k, v in selectors.iteritems():
if getattr(self, k, None) != v:
return False
return True
@property
def workload(self):
if self._workload is None:
raise RuntimeError("Workload for {} has not been loaded".format(self))
return self._workload
@property
def secition(self):
if self.section_id and self._section is None:
raise RuntimeError("Section for {} has not been loaded".format(self))
return self._section
def load(self, device, ext_loader):
"""Loads the workload for the specified device using the specified loader.
This must be done before attempting to execute the spec."""
self._workload = ext_loader.get_workload(self.workload_name, device, **self.workload_parameters)
def to_dict(self):
d = copy(self.__dict__)
del d['_workload']
del d['_section']
return d
def __str__(self):
return '{} {}'.format(self.id, self.label)
def __cmp__(self, other):
if not isinstance(other, WorkloadRunSpec):
return cmp('WorkloadRunSpec', other.__class__.__name__)
return cmp(self.id, other.id)
class _SpecConfig(object):
# TODO: This is a bit of HACK for alias resolution. This formats Alias
# params as if they came from config.
def __init__(self, name, params=None):
setattr(self, name, params or {})
class RebootPolicy(object):
"""
Represents the reboot policy for the execution -- at what points the device
should be rebooted. This, in turn, is controlled by the policy value that is
passed in on construction and would typically be read from the user's settings.
Valid policy values are:
:never: The device will never be rebooted.
:as_needed: Only reboot the device if it becomes unresponsive, or needs to be flashed, etc.
:initial: The device will be rebooted when the execution first starts, just before
executing the first workload spec.
:each_spec: The device will be rebooted before running a new workload spec.
:each_iteration: The device will be rebooted before each new iteration.
"""
valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_iteration']
def __init__(self, policy):
policy = policy.strip().lower().replace(' ', '_')
if policy not in self.valid_policies:
message = 'Invalid reboot policy {}; must be one of {}'.format(policy, ', '.join(self.valid_policies))
raise ConfigError(message)
self.policy = policy
@property
def can_reboot(self):
return self.policy != 'never'
@property
def perform_initial_boot(self):
return self.policy not in ['never', 'as_needed']
@property
def reboot_on_each_spec(self):
return self.policy in ['each_spec', 'each_iteration']
@property
def reboot_on_each_iteration(self):
return self.policy == 'each_iteration'
def __str__(self):
return self.policy
__repr__ = __str__
def __cmp__(self, other):
if isinstance(other, RebootPolicy):
return cmp(self.policy, other.policy)
else:
return cmp(self.policy, other)
class RunConfigurationItem(object):
"""
This represents a predetermined "configuration point" (an individual setting)
and describes how it must be handled when encountered.
"""
# Also defines the NULL value for each category
valid_categories = {
'scalar': None,
'list': [],
'dict': {},
}
# A callable that takes an arbitrary number of positional arguments
# is also valid.
valid_methods = ['keep', 'replace', 'merge']
def __init__(self, name, category, method):
if category not in self.valid_categories:
raise ValueError('Invalid category: {}'.format(category))
if not callable(method) and method not in self.valid_methods:
raise ValueError('Invalid method: {}'.format(method))
if category == 'scalar' and method == 'merge':
raise ValueError('Method cannot be "merge" for a scalar')
self.name = name
self.category = category
self.method = method
def combine(self, *args):
"""
Combine the provided values according to the method for this
configuration item. Order matters -- values are assumed to be
in the order they were specified by the user. The resulting value
is also checked to patch the specified type.
"""
args = [a for a in args if a is not None]
if not args:
return self.valid_categories[self.category]
if self.method == 'keep' or len(args) == 1:
value = args[0]
elif self.method == 'replace':
value = args[-1]
elif self.method == 'merge':
if self.category == 'list':
value = merge_lists(*args, duplicates='last', dict_type=OrderedDict)
elif self.category == 'dict':
value = merge_dicts(*args,
should_merge_lists=True,
should_normalize=False,
list_duplicates='last',
dict_type=OrderedDict)
else:
raise ValueError('Unexpected category for merge : "{}"'.format(self.category))
elif callable(self.method):
value = self.method(*args)
else:
raise ValueError('Unexpected method: "{}"'.format(self.method))
return value
def _combine_ids(*args):
return '_'.join(args)
class RunConfiguration(object):
"""
Loads and maintains the unified configuration for this run. This includes configuration
for WA execution as a whole, and parameters for specific specs.
WA configuration mechanism aims to be flexible and easy to use, while at the same
time providing storing validation and early failure on error. To meet these requirements,
the implementation gets rather complicated. This is going to be a quick overview of
the underlying mechanics.
.. note:: You don't need to know this to use WA, or to write extensions for it. From
the point of view of extension writers, configuration from various sources
"magically" appears as attributes of their classes. This explanation peels
back the curtain and is intended for those who, for one reason or another,
need to understand how the magic works.
**terminology**
run
A single execution of a WA agenda.
run config(uration) (object)
An instance of this class. There is one per run.
config(uration) item
A single configuration entry or "setting", e.g. the device interface to use. These
can be for the run as a whole, or for a specific extension.
(workload) spec
A specification of a single workload execution. This combines workload configuration
with things like the number of iterations to run, which instruments to enable, etc.
More concretely, this is an instance of :class:`WorkloadRunSpec`.
**overview**
There are three types of WA configuration:
1. "Meta" configuration that determines how the rest of the configuration is
processed (e.g. where extensions get loaded from). Since this does not pertain
to *run* configuration, it will not be covered further.
2. Global run configuration, e.g. which workloads, result processors and instruments
will be enabled for a run.
3. Per-workload specification configuration, that determines how a particular workload
instance will get executed (e.g. what workload parameters will be used, how many
iterations.
**run configuration**
Run configuration may appear in a config file (usually ``~/.workload_automation/config.py``),
or in the ``config`` section of an agenda. Configuration is specified as a nested structure
of dictionaries (associative arrays, or maps) and lists in the syntax following the format
implied by the file extension (currently, YAML and Python are supported). If the same
configuration item appears in more than one source, they are merged with conflicting entries
taking the value from the last source that specified them.
In addition to a fixed set of global configuration items, configuration for any WA
Extension (instrument, result processor, etc) may also be specified, namespaced under
the extension's name (i.e. the extensions name is a key in the global config with value
being a dict of parameters and their values). Some Extension parameters also specify a
"global alias" that may appear at the top-level of the config rather than under the
Extension's name. It is *not* an error to specify configuration for an Extension that has
not been enabled for a particular run; such configuration will be ignored.
**per-workload configuration**
Per-workload configuration can be specified in three places in the agenda: the
workload entry in the ``workloads`` list, the ``global`` entry (configuration there
will be applied to every workload entry), and in a section entry in ``sections`` list
( configuration in every section will be applied to every workload entry separately,
creating a "cross-product" of section and workload configurations; additionally,
sections may specify their own workload lists).
If they same configuration item appears in more than one of the above places, they will
be merged in the following order: ``global``, ``section``, ``workload``, with conflicting
scalar values in the later overriding those from previous locations.
**Global parameter aliases**
As mentioned above, an Extension's parameter may define a global alias, which will be
specified and picked up from the top-level config, rather than config for that specific
extension. It is an error to specify the value for a parameter both through a global
alias and through extension config dict in the same configuration file. It is, however,
possible to use a global alias in one file, and specify extension configuration for the
same parameter in another file, in which case, the usual merging rules would apply.
**Loading and validation of configuration**
Validation of user-specified configuration happens at several stages of run initialisation,
to ensure that appropriate context for that particular type of validation is available and
that meaningful errors can be reported, as early as is feasible.
- Syntactic validation is performed when configuration is first loaded.
This is done by the loading mechanism (e.g. YAML parser), rather than WA itself. WA
propagates any errors encountered as ``ConfigError``\ s.
- Once a config file is loaded into a Python structure, it scanned to
extract settings. Static configuration is validated and added to the config. Extension
configuration is collected into a collection of "raw" config, and merged as appropriate, but
is not processed further at this stage.
- Once all configuration sources have been processed, the configuration as a whole
is validated (to make sure there are no missing settings, etc).
- Extensions are loaded through the run config object, which instantiates
them with appropriate parameters based on the "raw" config collected earlier. When an
Extension is instantiated in such a way, it's config is "officially" added to run configuration
tracked by the run config object. Raw config is discarded at the end of the run, so
that any config that wasn't loaded in this way is not recoded (as it was not actually used).
- Extension parameters a validated individually (for type, value ranges, etc) as they are
loaded in the Extension's __init__.
- An extension's ``validate()`` method is invoked before it is used (exactly when this
happens depends on the extension's type) to perform any final validation *that does not
rely on the target being present* (i.e. this would happen before WA connects to the target).
This can be used perform inter-parameter validation for an extension (e.g. when valid range for
one parameter depends on another), and more general WA state assumptions (e.g. a result
processor can check that an instrument it depends on has been installed).
- Finally, it is the responsibility of individual extensions to validate any assumptions
they make about the target device (usually as part of their ``setup()``).
**Handling of Extension aliases.**
WA extensions can have zero or more aliases (not to be confused with global aliases for extension
*parameters*). An extension allows associating an alternative name for the extension with a set
of parameter values. In other words aliases associate common configurations for an extension with
a name, providing a shorthand for it. For example, "t-rex_offscreen" is an alias for "glbenchmark"
workload that specifies that "use_case" should be "t-rex" and "variant" should be "offscreen".
**special loading rules**
Note that as a consequence of being able to specify configuration for *any* Extension namespaced
under the Extension's name in the top-level config, two distinct mechanisms exist form configuring
devices and workloads. This is valid, however due to their nature, they are handled in a special way.
This may be counter intuitive, so configuration of devices and workloads creating entries for their
names in the config is discouraged in favour of using the "normal" mechanisms of configuring them
(``device_config`` for devices and workload specs in the agenda for workloads).
In both cases (devices and workloads), "normal" config will always override named extension config
*irrespective of which file it was specified in*. So a ``adb_name`` name specified in ``device_config``
inside ``~/.workload_automation/config.py`` will override ``adb_name`` specified for ``juno`` in the
agenda (even when device is set to "juno").
Again, this ignores normal loading rules, so the use of named extension configuration for devices
and workloads is discouraged. There maybe some situations where this behaviour is useful however
(e.g. maintaining configuration for different devices in one config file).
"""
default_reboot_policy = 'as_needed'
default_execution_order = 'by_iteration'
# This is generic top-level configuration.
general_config = [
RunConfigurationItem('run_name', 'scalar', 'replace'),
RunConfigurationItem('project', 'scalar', 'replace'),
RunConfigurationItem('project_stage', 'dict', 'replace'),
RunConfigurationItem('execution_order', 'scalar', 'replace'),
RunConfigurationItem('reboot_policy', 'scalar', 'replace'),
RunConfigurationItem('device', 'scalar', 'replace'),
RunConfigurationItem('flashing_config', 'dict', 'replace'),
]
# Configuration specified for each workload spec. "workload_parameters"
# aren't listed because they are handled separately.
workload_config = [
RunConfigurationItem('id', 'scalar', _combine_ids),
RunConfigurationItem('number_of_iterations', 'scalar', 'replace'),
RunConfigurationItem('workload_name', 'scalar', 'replace'),
RunConfigurationItem('label', 'scalar', 'replace'),
RunConfigurationItem('section_id', 'scalar', 'replace'),
RunConfigurationItem('boot_parameters', 'dict', 'merge'),
RunConfigurationItem('runtime_parameters', 'dict', 'merge'),
RunConfigurationItem('instrumentation', 'list', 'merge'),
RunConfigurationItem('flash', 'dict', 'merge'),
]
# List of names that may be present in configuration (and it is valid for
# them to be there) but are not handled buy RunConfiguration.
ignore_names = ['logging']
def get_reboot_policy(self):
if not self._reboot_policy:
self._reboot_policy = RebootPolicy(self.default_reboot_policy)
return self._reboot_policy
def set_reboot_policy(self, value):
if isinstance(value, RebootPolicy):
self._reboot_policy = value
else:
self._reboot_policy = RebootPolicy(value)
reboot_policy = property(get_reboot_policy, set_reboot_policy)
@property
def all_instrumentation(self):
result = set()
for spec in self.workload_specs:
result = result.union(set(spec.instrumentation))
return result
def __init__(self, ext_loader):
self.ext_loader = ext_loader
self.device = None
self.device_config = None
self.execution_order = None
self.project = None
self.project_stage = None
self.run_name = None
self.instrumentation = {}
self.result_processors = {}
self.workload_specs = []
self.flashing_config = {}
self.other_config = {} # keeps track of used config for extensions other than of the four main kinds.
self._used_config_items = []
self._global_instrumentation = []
self._reboot_policy = None
self._agenda = None
self._finalized = False
self._general_config_map = {i.name: i for i in self.general_config}
self._workload_config_map = {i.name: i for i in self.workload_config}
# Config files may contains static configuration for extensions that
# would not be part of this of this run (e.g. DB connection settings
# for a result processor that has not been enabled). Such settings
# should not be part of configuration for this run (as they will not
# be affecting it), but we still need to keep track it in case a later
# config (e.g. from the agenda) enables the extension.
# For this reason, all extension config is first loaded into the
# following dict and when an extension is identified as need for the
# run, its config is picked up from this "raw" dict and it becomes part
# of the run configuration.
self._raw_config = {'instrumentation': [], 'result_processors': []}
def get_extension(self, ext_name, *args):
self._check_finalized()
self._load_default_config_if_necessary(ext_name)
ext_config = self._raw_config[ext_name]
ext_cls = self.ext_loader.get_extension_class(ext_name)
if ext_cls.kind not in ['workload', 'device', 'instrument', 'result_processor']:
self.other_config[ext_name] = ext_config
return self.ext_loader.get_extension(ext_name, *args, **ext_config)
def to_dict(self):
d = copy(self.__dict__)
to_remove = ['ext_loader', 'workload_specs'] + [k for k in d.keys() if k.startswith('_')]
for attr in to_remove:
del d[attr]
d['workload_specs'] = [s.to_dict() for s in self.workload_specs]
d['reboot_policy'] = self.reboot_policy # this is a property so not in __dict__
return d
def load_config(self, source):
"""Load configuration from the specified source. The source must be
either a path to a valid config file or a dict-like object. Currently,
config files can be either python modules (.py extension) or YAML documents
(.yaml extension)."""
if self._finalized:
raise ValueError('Attempting to load a config file after run configuration has been finalized.')
try:
config_struct = _load_raw_struct(source)
self._merge_config(config_struct)
except ConfigError as e:
message = 'Error in {}:\n\t{}'
raise ConfigError(message.format(getattr(source, 'name', None), e.message))
def set_agenda(self, agenda, selectors=None):
"""Set the agenda for this run; Unlike with config files, there can only be one agenda."""
if self._agenda:
# note: this also guards against loading an agenda after finalized() has been called,
# as that would have required an agenda to be set.
message = 'Attempting to set a second agenda {};\n\talready have agenda {} set'
raise ValueError(message.format(agenda.filepath, self._agenda.filepath))
try:
self._merge_config(agenda.config or {})
self._load_specs_from_agenda(agenda, selectors)
self._agenda = agenda
except ConfigError as e:
message = 'Error in {}:\n\t{}'
raise ConfigError(message.format(agenda.filepath, e.message))
def finalize(self):
"""This must be invoked once all configuration sources have been loaded. This will
do the final processing, setting instrumentation and result processor configuration
for the run And making sure that all the mandatory config has been specified."""
if self._finalized:
return
if not self._agenda:
raise ValueError('Attempting to finalize run configuration before an agenda is loaded.')
self._finalize_config_list('instrumentation')
self._finalize_config_list('result_processors')
if not self.device:
raise ConfigError('Device not specified in the config.')
self._finalize_device_config()
if not self.reboot_policy.reboot_on_each_spec:
for spec in self.workload_specs:
if spec.boot_parameters:
message = 'spec {} specifies boot_parameters; reboot policy must be at least "each_spec"'
raise ConfigError(message.format(spec.id))
for spec in self.workload_specs:
for globinst in self._global_instrumentation:
if globinst not in spec.instrumentation:
spec.instrumentation.append(globinst)
spec.validate()
self._finalized = True
def serialize(self, wfh):
json.dump(self, wfh, cls=ConfigurationJSONEncoder, indent=4)
def _merge_config(self, config):
"""
Merge the settings specified by the ``config`` dict-like object into current
configuration.
"""
if not isinstance(config, dict):
raise ValueError('config must be a dict; found {}'.format(config.__class__.__name__))
for k, v in config.iteritems():
k = identifier(k)
if k in self.ext_loader.global_param_aliases:
self._resolve_global_alias(k, v)
elif k in self._general_config_map:
self._set_run_config_item(k, v)
elif self.ext_loader.has_extension(k):
self._set_extension_config(k, v)
elif k == 'device_config':
self._set_raw_dict(k, v)
elif k in ['instrumentation', 'result_processors']:
# Instrumentation can be enabled and disabled by individual
# workloads, so we need to track it in two places: a list of
# all instruments for the run (as they will all need to be
# initialized and installed, and a list of only the "global"
# instruments which can then be merged into instrumentation
# lists of individual workload specs.
self._set_raw_list('_global_{}'.format(k), v)
self._set_raw_list(k, v)
elif k in self.ignore_names:
pass
else:
raise ConfigError('Unknown configuration option: {}'.format(k))
def _resolve_global_alias(self, name, value):
ga = self.ext_loader.global_param_aliases[name]
for param, ext in ga.iteritems():
for name in [ext.name] + [a.name for a in ext.aliases]:
self._load_default_config_if_necessary(name)
self._raw_config[name][param.name] = value
def _set_run_config_item(self, name, value):
item = self._general_config_map[name]
combined_value = item.combine(getattr(self, name, None), value)
setattr(self, name, combined_value)
def _set_extension_config(self, name, value):
default_config = self.ext_loader.get_default_config(name)
self._set_raw_dict(name, value, default_config)
def _set_raw_dict(self, name, value, default_config=None):
existing_config = self._raw_config.get(name, default_config or {})
new_config = _merge_config_dicts(existing_config, value)
self._raw_config[name] = new_config
def _set_raw_list(self, name, value):
old_value = self._raw_config.get(name, [])
new_value = merge_lists(old_value, value, duplicates='last')
self._raw_config[name] = new_value
def _finalize_config_list(self, attr_name):
"""Note: the name is somewhat misleading. This finalizes a list
form the specified configuration (e.g. "instrumentation"); internal
representation is actually a dict, not a list..."""
ext_config = {}
raw_list = self._raw_config.get(attr_name, [])
for extname in raw_list:
default_config = self.ext_loader.get_default_config(extname)
ext_config[extname] = self._raw_config.get(extname, default_config)
list_name = '_global_{}'.format(attr_name)
setattr(self, list_name, raw_list)
setattr(self, attr_name, ext_config)
def _finalize_device_config(self):
self._load_default_config_if_necessary(self.device)
config = _merge_config_dicts(self._raw_config.get(self.device),
self._raw_config.get('device_config', {}))
self.device_config = config
def _load_default_config_if_necessary(self, name):
if name not in self._raw_config:
self._raw_config[name] = self.ext_loader.get_default_config(name)
def _load_specs_from_agenda(self, agenda, selectors):
global_dict = agenda.global_.to_dict() if agenda.global_ else {}
if agenda.sections:
for section_entry in agenda.sections:
section_dict = section_entry.to_dict()
for workload_entry in agenda.workloads + section_entry.workloads:
workload_dict = workload_entry.to_dict()
self._load_workload_spec(global_dict, section_dict, workload_dict, selectors)
else: # no sections were specified
for workload_entry in agenda.workloads:
workload_dict = workload_entry.to_dict()
self._load_workload_spec(global_dict, {}, workload_dict, selectors)
def _load_workload_spec(self, global_dict, section_dict, workload_dict, selectors):
spec = WorkloadRunSpec()
for name, config in self._workload_config_map.iteritems():
value = config.combine(global_dict.get(name), section_dict.get(name), workload_dict.get(name))
spec.set(name, value)
if section_dict:
spec.set('section_id', section_dict.get('id'))
realname, alias_config = self.ext_loader.resolve_alias(spec.workload_name)
if not spec.label:
spec.label = spec.workload_name
spec.workload_name = realname
dicts = [self.ext_loader.get_default_config(realname),
alias_config,
self._raw_config.get(spec.workload_name),
global_dict.get('workload_parameters'),
section_dict.get('workload_parameters'),
workload_dict.get('workload_parameters')]
dicts = [d for d in dicts if d is not None]
value = _merge_config_dicts(*dicts)
spec.set('workload_parameters', value)
if not spec.number_of_iterations:
spec.number_of_iterations = 1
if spec.match_selectors(selectors):
instrumentation_config = self._raw_config['instrumentation']
for instname in spec.instrumentation:
if instname not in instrumentation_config:
instrumentation_config.append(instname)
self.workload_specs.append(spec)
def _check_finalized(self):
if not self._finalized:
raise ValueError('Attempting to access configuration before it has been finalized.')
def _load_raw_struct(source):
"""Load a raw dict config structure from the specified source."""
if isinstance(source, basestring):
if os.path.isfile(source):
raw = load_struct_from_file(filepath=source)
else:
raise ConfigError('File "{}" does not exit'.format(source))
elif isinstance(source, dict):
raw = source
else:
raise ConfigError('Unknown config source: {}'.format(source))
return raw
def _merge_config_dicts(*args, **kwargs):
"""Provides a different set of default settings for ```merge_dicts`` """
return merge_dicts(*args,
should_merge_lists=kwargs.get('should_merge_lists', False),
should_normalize=kwargs.get('should_normalize', False),
list_duplicates=kwargs.get('list_duplicates', 'last'),
dict_type=kwargs.get('dict_type', OrderedDict))

418
wlauto/core/device.py Normal file
View File

@@ -0,0 +1,418 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base classes for device interfaces.
:Device: The base class for all devices. This defines the interface that must be
implemented by all devices and therefore any workload and instrumentation
can always rely on.
:AndroidDevice: Implements most of the :class:`Device` interface, and extends it
with a number of Android-specific methods.
:BigLittleDevice: Subclasses :class:`AndroidDevice` to implement big.LITTLE-specific
runtime parameters.
:SimpleMulticoreDevice: Subclasses :class:`AndroidDevice` to implement homogeneous cores
device runtime parameters.
"""
import os
import imp
import string
from collections import OrderedDict
from contextlib import contextmanager
from wlauto.core.extension import Extension, ExtensionMeta, AttributeCollection, Parameter
from wlauto.exceptions import DeviceError, ConfigError
from wlauto.utils.types import list_of_strings, list_of_integers
__all__ = ['RuntimeParameter', 'CoreParameter', 'Device', 'DeviceMeta']
class RuntimeParameter(object):
"""
A runtime parameter which has its getter and setter methods associated it
with it.
"""
def __init__(self, name, getter, setter,
getter_args=None, setter_args=None,
value_name='value', override=False):
"""
:param name: the name of the parameter.
:param getter: the getter method which returns the value of this parameter.
:param setter: the setter method which sets the value of this parameter. The setter
always expects to be passed one argument when it is called.
:param getter_args: keyword arguments to be used when invoking the getter.
:param setter_args: keyword arguments to be used when invoking the setter.
:param override: A ``bool`` that specifies whether a parameter of the same name further up the
hierarchy should be overridden. If this is ``False`` (the default), an exception
will be raised by the ``AttributeCollection`` instead.
"""
self.name = name
self.getter = getter
self.setter = setter
self.getter_args = getter_args or {}
self.setter_args = setter_args or {}
self.value_name = value_name
self.override = override
def __str__(self):
return self.name
__repr__ = __str__
class CoreParameter(RuntimeParameter):
"""A runtime parameter that will get expanded into a RuntimeParameter for each core type."""
def get_runtime_parameters(self, core_names):
params = []
for core in set(core_names):
name = string.Template(self.name).substitute(core=core)
getter = string.Template(self.getter).substitute(core=core)
setter = string.Template(self.setter).substitute(core=core)
getargs = dict(self.getter_args.items() + [('core', core)])
setargs = dict(self.setter_args.items() + [('core', core)])
params.append(RuntimeParameter(name, getter, setter, getargs, setargs, self.value_name, self.override))
return params
class DeviceMeta(ExtensionMeta):
to_propagate = ExtensionMeta.to_propagate + [
('runtime_parameters', RuntimeParameter, AttributeCollection),
]
class Device(Extension):
"""
Base class for all devices supported by Workload Automation. Defines
the interface the rest of WA uses to interact with devices.
:name: Unique name used to identify the device.
:platform: The name of the device's platform (e.g. ``Android``) this may
be used by workloads and instrumentation to assess whether they
can run on the device.
:working_directory: a string of the directory which is
going to be used by the workloads on the device.
:binaries_directory: a string of the binary directory for
the device.
:has_gpu: Should be ``True`` if the device as a separate GPU, and
``False`` if graphics processing is done on a CPU.
.. note:: Pretty much all devices currently on the market
have GPUs, however this may not be the case for some
development boards.
:path_module: The name of one of the modules implementing the os.path
interface, e.g. ``posixpath`` or ``ntpath``. You can provide
your own implementation rather than relying on one of the
standard library modules, in which case you need to specify
the *full* path to you module. e.g. '/home/joebloggs/mypathimp.py'
:parameters: A list of RuntimeParameter objects. The order of the objects
is very important as the setters and getters will be called
in the order the RuntimeParameter objects inserted.
:active_cores: This should be a list of all the currently active cpus in
the device in ``'/sys/devices/system/cpu/online'``. The
returned list should be read from the device at the time
of read request.
"""
__metaclass__ = DeviceMeta
parameters = [
Parameter('core_names', kind=list_of_strings, mandatory=True, default=None,
description="""
This is a list of all cpu cores on the device with each
element being the core type, e.g. ``['a7', 'a7', 'a15']``. The
order of the cores must match the order they are listed in
``'/sys/devices/system/cpu'``. So in this case, ``'cpu0'`` must
be an A7 core, and ``'cpu2'`` an A15.'
"""),
Parameter('core_clusters', kind=list_of_integers, mandatory=True, default=None,
description="""
This is a list indicating the cluster affinity of the CPU cores,
each element correponding to the cluster ID of the core coresponding
to it's index. E.g. ``[0, 0, 1]`` indicates that cpu0 and cpu1 are on
cluster 0, while cpu2 is on cluster 1.
"""),
]
runtime_parameters = []
# These must be overwritten by subclasses.
name = None
platform = None
default_working_directory = None
has_gpu = None
path_module = None
active_cores = None
def __init__(self, **kwargs): # pylint: disable=W0613
super(Device, self).__init__(**kwargs)
if not self.path_module:
raise NotImplementedError('path_module must be specified by the deriving classes.')
libpath = os.path.dirname(os.__file__)
modpath = os.path.join(libpath, self.path_module)
if not modpath.lower().endswith('.py'):
modpath += '.py'
try:
self.path = imp.load_source('device_path', modpath)
except IOError:
raise DeviceError('Unsupported path module: {}'.format(self.path_module))
def reset(self):
"""
Initiate rebooting of the device.
Added in version 2.1.3.
"""
raise NotImplementedError()
def boot(self, *args, **kwargs):
"""
Perform the seteps necessary to boot the device to the point where it is ready
to accept other commands.
Changed in version 2.1.3: no longer expected to wait until boot completes.
"""
raise NotImplementedError()
def connect(self, *args, **kwargs):
"""
Establish a connection to the device that will be used for subsequent commands.
Added in version 2.1.3.
"""
raise NotImplementedError()
def disconnect(self):
""" Close the established connection to the device. """
raise NotImplementedError()
def initialize(self, context, *args, **kwargs):
"""
Default implementation just calls through to init(). May be overriden by specialised
abstract sub-cleasses to implent platform-specific intialization without requiring
concrete implementations to explicitly invoke parent's init().
Added in version 2.1.3.
"""
self.init(context, *args, **kwargs)
def init(self, context, *args, **kwargs):
"""
Initialize the device. This method *must* be called after a device reboot before
any other commands can be issued, however it may also be called without rebooting.
It is up to device-specific implementations to identify what initialisation needs
to be preformed on a particular invocation. Bear in mind that no assumptions can be
made about the state of the device prior to the initiation of workload execution,
so full initialisation must be performed at least once, even if no reboot has occurred.
After that, the device-specific implementation may choose to skip initialization if
the device has not been rebooted; it is up to the implementation to keep track of
that, however.
All arguments are device-specific (see the documentation for the your device).
"""
pass
def ping(self):
"""
This must return successfully if the device is able to receive commands, or must
raise :class:`wlauto.exceptions.DeviceUnresponsiveError` if the device cannot respond.
"""
raise NotImplementedError()
def get_runtime_parameter_names(self):
return [p.name for p in self._expand_runtime_parameters()]
def get_runtime_parameters(self):
""" returns the runtime parameters that have been set. """
# pylint: disable=cell-var-from-loop
runtime_parameters = OrderedDict()
for rtp in self._expand_runtime_parameters():
if not rtp.getter:
continue
getter = getattr(self, rtp.getter)
rtp_value = getter(**rtp.getter_args)
runtime_parameters[rtp.name] = rtp_value
return runtime_parameters
def set_runtime_parameters(self, params):
"""
The parameters are taken from the keyword arguments and are specific to
a particular device. See the device documentation.
"""
runtime_parameters = self._expand_runtime_parameters()
rtp_map = {rtp.name.lower(): rtp for rtp in runtime_parameters}
params = OrderedDict((k.lower(), v) for k, v in params.iteritems())
expected_keys = rtp_map.keys()
if not set(params.keys()) <= set(expected_keys):
unknown_params = list(set(params.keys()).difference(set(expected_keys)))
raise ConfigError('Unknown runtime parameter(s): {}'.format(unknown_params))
for param in params:
rtp = rtp_map[param]
setter = getattr(self, rtp.setter)
args = dict(rtp.setter_args.items() + [(rtp.value_name, params[rtp.name.lower()])])
setter(**args)
def capture_screen(self, filepath):
"""Captures the current device screen into the specified file in a PNG format."""
raise NotImplementedError()
def get_properties(self, output_path):
"""Captures and saves the device configuration properties version and
any other relevant information. Return them in a dict"""
raise NotImplementedError()
def listdir(self, path, **kwargs):
""" List the contents of the specified directory. """
raise NotImplementedError()
def push_file(self, source, dest):
""" Push a file from the host file system onto the device. """
raise NotImplementedError()
def pull_file(self, source, dest):
""" Pull a file from device system onto the host file system. """
raise NotImplementedError()
def delete_file(self, filepath):
""" Delete the specified file on the device. """
raise NotImplementedError()
def file_exists(self, filepath):
""" Check if the specified file or directory exist on the device. """
raise NotImplementedError()
def get_pids_of(self, process_name):
""" Returns a list of PIDs of the specified process name. """
raise NotImplementedError()
def kill(self, pid, as_root=False):
""" Kill the process with the specified PID. """
raise NotImplementedError()
def killall(self, process_name, as_root=False):
""" Kill all running processes with the specified name. """
raise NotImplementedError()
def install(self, filepath, **kwargs):
""" Install the specified file on the device. What "install" means is device-specific
and may possibly also depend on the type of file."""
raise NotImplementedError()
def uninstall(self, filepath):
""" Uninstall the specified file on the device. What "uninstall" means is device-specific
and may possibly also depend on the type of file."""
raise NotImplementedError()
def execute(self, command, timeout=None, **kwargs):
"""
Execute the specified command command on the device and return the output.
:param command: Command to be executed on the device.
:param timeout: If the command does not return after the specified time,
execute() will abort with an error. If there is no timeout for
the command, this should be set to 0 or None.
Other device-specific keyword arguments may also be specified.
:returns: The stdout output from the command.
"""
raise NotImplementedError()
def set_sysfile_value(self, filepath, value, verify=True):
"""
Write the specified value to the specified file on the device
and verify that the value has actually been written.
:param file: The file to be modified.
:param value: The value to be written to the file. Must be
an int or a string convertable to an int.
:param verify: Specifies whether the value should be verified, once written.
Should raise DeviceError if could write value.
"""
raise NotImplementedError()
def get_sysfile_value(self, sysfile, kind=None):
"""
Get the contents of the specified sysfile.
:param sysfile: The file who's contents will be returned.
:param kind: The type of value to be expected in the sysfile. This can
be any Python callable that takes a single str argument.
If not specified or is None, the contents will be returned
as a string.
"""
raise NotImplementedError()
def start(self):
"""
This gets invoked before an iteration is started and is endented to help the
device manange any internal supporting functions.
"""
pass
def stop(self):
"""
This gets invoked after iteration execution has completed and is endented to help the
device manange any internal supporting functions.
"""
pass
def __str__(self):
return 'Device<{}>'.format(self.name)
__repr__ = __str__
def _expand_runtime_parameters(self):
expanded_params = []
for param in self.runtime_parameters:
if isinstance(param, CoreParameter):
expanded_params.extend(param.get_runtime_parameters(self.core_names)) # pylint: disable=no-member
else:
expanded_params.append(param)
return expanded_params
@contextmanager
def _check_alive(self):
try:
yield
except Exception as e:
self.ping()
raise e

View File

@@ -0,0 +1,75 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import argparse
import logging
from wlauto.core.bootstrap import settings
from wlauto.core.extension_loader import ExtensionLoader
from wlauto.exceptions import WAError
from wlauto.utils.misc import get_traceback
from wlauto.utils.log import init_logging
from wlauto.utils.cli import init_argument_parser
from wlauto.utils.doc import format_body
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='zope')
logger = logging.getLogger('command_line')
def load_commands(subparsers):
ext_loader = ExtensionLoader(paths=settings.extension_paths)
for command in ext_loader.list_commands():
settings.commands[command.name] = ext_loader.get_command(command.name, subparsers=subparsers)
def main():
try:
description = ("Execute automated workloads on a remote device and process "
"the resulting output.\n\nUse \"wa <subcommand> -h\" to see "
"help for individual subcommands.")
parser = argparse.ArgumentParser(description=format_body(description, 80),
prog='wa',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
init_argument_parser(parser)
load_commands(parser.add_subparsers(dest='command')) # each command will add its own subparser
args = parser.parse_args()
settings.verbosity = args.verbose
settings.debug = args.debug
if args.config:
settings.update(args.config)
init_logging(settings.verbosity)
command = settings.commands[args.command]
sys.exit(command.execute(args))
except KeyboardInterrupt:
logging.info('Got CTRL-C. Aborting.')
sys.exit(3)
except WAError, e:
logging.critical(e)
sys.exit(1)
except Exception, e: # pylint: disable=broad-except
tb = get_traceback()
logging.critical(tb)
logging.critical('{}({})'.format(e.__class__.__name__, e))
sys.exit(2)

798
wlauto/core/execution.py Normal file
View File

@@ -0,0 +1,798 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=no-member
"""
This module contains the execution logic for Workload Automation. It defines the
following actors:
WorkloadSpec: Identifies the workload to be run and defines parameters under
which it should be executed.
Executor: Responsible for the overall execution process. It instantiates
and/or intialises the other actors, does any necessary vaidation
and kicks off the whole process.
Execution Context: Provides information about the current state of run
execution to instrumentation.
RunInfo: Information about the current run.
Runner: This executes workload specs that are passed to it. It goes through
stages of execution, emitting an appropriate signal at each step to
allow instrumentation to do its stuff.
"""
import os
import uuid
import logging
import subprocess
import random
from copy import copy
from datetime import datetime
from contextlib import contextmanager
from collections import Counter, defaultdict, OrderedDict
from itertools import izip_longest
import wlauto.core.signal as signal
from wlauto.core import instrumentation
from wlauto.core.bootstrap import settings
from wlauto.core.extension import Artifact
from wlauto.core.configuration import RunConfiguration
from wlauto.core.extension_loader import ExtensionLoader
from wlauto.core.resolver import ResourceResolver
from wlauto.core.result import ResultManager, IterationResult, RunResult
from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError,
DeviceError, DeviceNotRespondingError)
from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, merge_dicts, format_duration
# The maximum number of reboot attempts for an iteration.
MAX_REBOOT_ATTEMPTS = 3
# If something went wrong during device initialization, wait this
# long (in seconds) before retrying. This is necessary, as retrying
# immediately may not give the device enough time to recover to be able
# to reboot.
REBOOT_DELAY = 3
class RunInfo(object):
"""
Information about the current run, such as it's unique ID, run
time, etc.
"""
def __init__(self, config):
self.config = config
self.uuid = uuid.uuid4()
self.start_time = None
self.end_time = None
self.duration = None
self.project = config.project
self.project_stage = config.project_stage
self.run_name = config.run_name
self.notes = None
self.device_properties = {}
def to_dict(self):
d = copy(self.__dict__)
d['uuid'] = str(self.uuid)
del d['config']
d = merge_dicts(d, self.config.to_dict())
return d
class ExecutionContext(object):
"""
Provides a context for instrumentation. Keeps track of things like
current workload and iteration.
This class also provides two status members that can be used by workloads
and instrumentation to keep track of arbitrary state. ``result``
is reset on each new iteration of a workload; run_status is maintained
throughout a Workload Automation run.
"""
# These are the artifacts generated by the core framework.
default_run_artifacts = [
Artifact('runlog', 'run.log', 'log', mandatory=True,
description='The log for the entire run.'),
]
@property
def current_iteration(self):
if self.current_job:
spec_id = self.current_job.spec.id
return self.job_iteration_counts[spec_id]
else:
return None
@property
def workload(self):
return getattr(self.spec, 'workload', None)
@property
def spec(self):
return getattr(self.current_job, 'spec', None)
@property
def result(self):
return getattr(self.current_job, 'result', None)
def __init__(self, device, config):
self.device = device
self.config = config
self.reboot_policy = config.reboot_policy
self.output_directory = None
self.current_job = None
self.resolver = None
self.last_error = None
self.run_info = None
self.run_result = None
self.run_output_directory = settings.output_directory
self.host_working_directory = settings.meta_directory
self.iteration_artifacts = None
self.run_artifacts = copy(self.default_run_artifacts)
self.job_iteration_counts = defaultdict(int)
self.aborted = False
if settings.agenda:
self.run_artifacts.append(Artifact('agenda',
os.path.join(self.host_working_directory,
os.path.basename(settings.agenda)),
'meta',
mandatory=True,
description='Agenda for this run.'))
for i in xrange(1, settings.config_count + 1):
self.run_artifacts.append(Artifact('config_{}'.format(i),
os.path.join(self.host_working_directory,
'config_{}.py'.format(i)),
kind='meta',
mandatory=True,
description='Config file used for the run.'))
def initialize(self):
if not os.path.isdir(self.run_output_directory):
os.makedirs(self.run_output_directory)
self.output_directory = self.run_output_directory
self.resolver = ResourceResolver(self.config)
self.run_info = RunInfo(self.config)
self.run_result = RunResult(self.run_info)
def next_job(self, job):
"""Invoked by the runner when starting a new iteration of workload execution."""
self.current_job = job
self.job_iteration_counts[self.spec.id] += 1
self.current_job.result.iteration = self.current_iteration
if not self.aborted:
outdir_name = '_'.join(map(str, [self.spec.label, self.spec.id, self.current_iteration]))
self.output_directory = _d(os.path.join(self.run_output_directory, outdir_name))
self.iteration_artifacts = [wa for wa in self.workload.artifacts]
def end_job(self):
if self.current_job.result.status == IterationResult.ABORTED:
self.aborted = True
self.current_job = None
self.output_directory = self.run_output_directory
def add_artifact(self, name, path, kind, *args, **kwargs):
if self.current_job is None:
self.add_run_artifact(name, path, kind, *args, **kwargs)
else:
self.add_iteration_artifact(name, path, kind, *args, **kwargs)
def add_run_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.run_output_directory)
self.run_artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
def add_iteration_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.iteration_artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
def get_artifact(self, name):
if self.iteration_artifacts:
for art in self.iteration_artifacts:
if art.name == name:
return art
for art in self.run_artifacts:
if art.name == name:
return art
return None
def _check_artifact_path(path, rootpath):
if path.startswith(rootpath):
return os.path.abspath(path)
rootpath = os.path.abspath(rootpath)
full_path = os.path.join(rootpath, path)
if not os.path.isfile(full_path):
raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
return full_path
class Executor(object):
"""
The ``Executor``'s job is to set up the execution context and pass to a ``Runner``
along with a loaded run specification. Once the ``Runner`` has done its thing,
the ``Executor`` performs some final reporint before returning.
The initial context set up involves combining configuration from various sources,
loading of requided workloads, loading and installation of instruments and result
processors, etc. Static validation of the combined configuration is also performed.
"""
# pylint: disable=R0915
def __init__(self):
self.logger = logging.getLogger('Executor')
self.error_logged = False
self.warning_logged = False
self.config = None
self.ext_loader = None
self.device = None
self.context = None
def execute(self, agenda, selectors=None): # NOQA
"""
Execute the run specified by an agenda. Optionally, selectors may be used to only
selecute a subset of the specified agenda.
Params::
:agenda: an ``Agenda`` instance to be executed.
:selectors: A dict mapping selector name to the coresponding values.
**Selectors**
Currently, the following seectors are supported:
ids
The value must be a sequence of workload specfication IDs to be executed. Note
that if sections are specified inthe agenda, the workload specifacation ID will
be a combination of the section and workload IDs.
"""
signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED)
signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED)
self.logger.info('Initializing')
self.ext_loader = ExtensionLoader(packages=settings.extension_packages,
paths=settings.extension_paths)
self.logger.debug('Loading run configuration.')
self.config = RunConfiguration(self.ext_loader)
for filepath in settings.get_config_paths():
self.config.load_config(filepath)
self.config.set_agenda(agenda, selectors)
self.config.finalize()
config_outfile = os.path.join(settings.meta_directory, 'run_config.json')
with open(config_outfile, 'w') as wfh:
self.config.serialize(wfh)
self.logger.debug('Initialising device configuration.')
if not self.config.device:
raise ConfigError('Make sure a device is specified in the config.')
self.device = self.ext_loader.get_device(self.config.device, **self.config.device_config)
self.device.validate()
self.context = ExecutionContext(self.device, self.config)
self.logger.debug('Loading resource discoverers.')
self.context.initialize()
self.context.resolver.load()
self.context.add_artifact('run_config', config_outfile, 'meta')
self.logger.debug('Installing instrumentation')
for name, params in self.config.instrumentation.iteritems():
instrument = self.ext_loader.get_instrument(name, self.device, **params)
instrumentation.install(instrument)
instrumentation.validate()
self.logger.debug('Installing result processors')
result_manager = ResultManager()
for name, params in self.config.result_processors.iteritems():
processor = self.ext_loader.get_result_processor(name, **params)
result_manager.install(processor)
result_manager.validate()
self.logger.debug('Loading workload specs')
for workload_spec in self.config.workload_specs:
workload_spec.load(self.device, self.ext_loader)
workload_spec.workload.init_resources(self.context)
workload_spec.workload.validate()
if self.config.flashing_config:
if not self.device.flasher:
msg = 'flashing_config specified for {} device that does not support flashing.'
raise ConfigError(msg.format(self.device.name))
self.logger.debug('Flashing the device')
self.device.flasher.flash(self.device)
self.logger.info('Running workloads')
runner = self._get_runner(result_manager)
runner.init_queue(self.config.workload_specs)
runner.run()
self.execute_postamble()
def execute_postamble(self):
"""
This happens after the run has completed. The overall results of the run are
summarised to the user.
"""
result = self.context.run_result
counter = Counter()
for ir in result.iteration_results:
counter[ir.status] += 1
self.logger.info('Done.')
self.logger.info('Run duration: {}'.format(format_duration(self.context.run_info.duration)))
status_summary = 'Ran a total of {} iterations: '.format(sum(self.context.job_iteration_counts.values()))
parts = []
for status in IterationResult.values:
if status in counter:
parts.append('{} {}'.format(counter[status], status))
self.logger.info(status_summary + ', '.join(parts))
self.logger.info('Results can be found in {}'.format(settings.output_directory))
if self.error_logged:
self.logger.warn('There were errors during execution.')
self.logger.warn('Please see {}'.format(settings.log_file))
elif self.warning_logged:
self.logger.warn('There were warnings during execution.')
self.logger.warn('Please see {}'.format(settings.log_file))
def _get_runner(self, result_manager):
if not self.config.execution_order or self.config.execution_order == 'by_iteration':
if self.config.reboot_policy == 'each_spec':
self.logger.info('each_spec reboot policy with the default by_iteration execution order is '
'equivalent to each_iteration policy.')
runnercls = ByIterationRunner
elif self.config.execution_order in ['classic', 'by_spec']:
runnercls = BySpecRunner
elif self.config.execution_order == 'by_section':
runnercls = BySectionRunner
elif self.config.execution_order == 'random':
runnercls = RandomRunner
else:
raise ConfigError('Unexpected execution order: {}'.format(self.config.execution_order))
return runnercls(self.device, self.context, result_manager)
def _error_signalled_callback(self):
self.error_logged = True
signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED)
def _warning_signalled_callback(self):
self.warning_logged = True
signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED)
class RunnerJob(object):
"""
Represents a single execution of a ``RunnerJobDescription``. There will be one created for each iteration
specified by ``RunnerJobDescription.number_of_iterations``.
"""
def __init__(self, spec):
self.spec = spec
self.iteration = None
self.result = IterationResult(self.spec)
class Runner(object):
"""
This class is responsible for actually performing a workload automation
run. The main responsibility of this class is to emit appropriate signals
at the various stages of the run to allow things like traces an other
instrumentation to hook into the process.
This is an abstract base class that defines each step of the run, but not
the order in which those steps are executed, which is left to the concrete
derived classes.
"""
class _RunnerError(Exception):
"""Internal runner error."""
pass
@property
def current_job(self):
if self.job_queue:
return self.job_queue[0]
return None
@property
def previous_job(self):
if self.completed_jobs:
return self.completed_jobs[-1]
return None
@property
def next_job(self):
if self.job_queue:
if len(self.job_queue) > 1:
return self.job_queue[1]
return None
@property
def spec_changed(self):
if self.previous_job is None and self.current_job is not None: # Start of run
return True
if self.previous_job is not None and self.current_job is None: # End of run
return True
return self.current_job.spec.id != self.previous_job.spec.id
@property
def spec_will_change(self):
if self.current_job is None and self.next_job is not None: # Start of run
return True
if self.current_job is not None and self.next_job is None: # End of run
return True
return self.current_job.spec.id != self.next_job.spec.id
def __init__(self, device, context, result_manager):
self.device = device
self.context = context
self.result_manager = result_manager
self.logger = logging.getLogger('Runner')
self.job_queue = []
self.completed_jobs = []
self._initial_reset = True
def init_queue(self, specs):
raise NotImplementedError()
def run(self): # pylint: disable=too-many-branches
self._send(signal.RUN_START)
self._initialize_run()
try:
while self.job_queue:
try:
self._init_job()
self._run_job()
except KeyboardInterrupt:
self.current_job.result.status = IterationResult.ABORTED
raise
except Exception, e: # pylint: disable=broad-except
self.current_job.result.status = IterationResult.FAILED
self.current_job.result.add_event(e.message)
if isinstance(e, DeviceNotRespondingError):
self.logger.info('Device appears to be unresponsive.')
if self.context.reboot_policy.can_reboot and self.device.can('reset_power'):
self.logger.info('Attempting to hard-reset the device...')
try:
self.device.hard_reset()
self.device.connect()
except DeviceError: # hard_boot not implemented for the device.
raise e
else:
raise e
else: # not a DeviceNotRespondingError
self.logger.error(e)
finally:
self._finalize_job()
except KeyboardInterrupt:
self.logger.info('Got CTRL-C. Finalizing run... (CTRL-C again to abort).')
# Skip through the remaining jobs.
while self.job_queue:
self.context.next_job(self.current_job)
self.current_job.result.status = IterationResult.ABORTED
self._finalize_job()
except DeviceNotRespondingError:
self.logger.info('Device unresponsive and recovery not possible. Skipping the rest of the run.')
self.context.aborted = True
while self.job_queue:
self.context.next_job(self.current_job)
self.current_job.result.status = IterationResult.SKIPPED
self._finalize_job()
instrumentation.enable_all()
self._finalize_run()
self._process_results()
self.result_manager.finalize(self.context)
self._send(signal.RUN_END)
def _initialize_run(self):
self.context.run_info.start_time = datetime.utcnow()
if self.context.reboot_policy.perform_initial_boot:
self.logger.info('\tBooting device')
with self._signal_wrap('INITIAL_BOOT'):
self._reboot_device()
else:
self.logger.info('Connecting to device')
self.device.connect()
self.logger.info('Initializing device')
self.device.initialize(self.context)
props = self.device.get_properties(self.context)
self.context.run_info.device_properties = props
self.result_manager.initialize(self.context)
self._send(signal.RUN_INIT)
if instrumentation.check_failures():
raise InstrumentError('Detected failure(s) during instrumentation initialization.')
def _init_job(self):
self.current_job.result.status = IterationResult.RUNNING
self.context.next_job(self.current_job)
def _run_job(self): # pylint: disable=too-many-branches
spec = self.current_job.spec
if not spec.enabled:
self.logger.info('Skipping workload %s (iteration %s)', spec, self.context.current_iteration)
self.current_job.result.status = IterationResult.SKIPPED
return
self.logger.info('Running workload %s (iteration %s)', spec, self.context.current_iteration)
if spec.flash:
if not self.context.reboot_policy.can_reboot:
raise ConfigError('Cannot flash as reboot_policy does not permit rebooting.')
if not self.device.can('flash'):
raise DeviceError('Device does not support flashing.')
self._flash_device(spec.flash)
elif not self.completed_jobs:
# Never reboot on the very fist job of a run, as we would have done
# the initial reboot if a reboot was needed.
pass
elif self.context.reboot_policy.reboot_on_each_spec and self.spec_changed:
self.logger.debug('Rebooting on spec change.')
self._reboot_device()
elif self.context.reboot_policy.reboot_on_each_iteration:
self.logger.debug('Rebooting on iteration.')
self._reboot_device()
instrumentation.disable_all()
instrumentation.enable(spec.instrumentation)
self.device.start()
if self.spec_changed:
self._send(signal.WORKLOAD_SPEC_START)
self._send(signal.ITERATION_START)
try:
setup_ok = False
with self._handle_errors('Setting up device parameters'):
self.device.set_runtime_parameters(spec.runtime_parameters)
setup_ok = True
if setup_ok:
with self._handle_errors('running {}'.format(spec.workload.name)):
self.current_job.result.status = IterationResult.RUNNING
self._run_workload_iteration(spec.workload)
else:
self.logger.info('\tSkipping the rest of the iterations for this spec.')
spec.enabled = False
except KeyboardInterrupt:
self._send(signal.ITERATION_END)
self._send(signal.WORKLOAD_SPEC_END)
raise
else:
self._send(signal.ITERATION_END)
if self.spec_will_change or not spec.enabled:
self._send(signal.WORKLOAD_SPEC_END)
finally:
self.device.stop()
def _finalize_job(self):
self.context.run_result.iteration_results.append(self.current_job.result)
self.job_queue[0].iteration = self.context.current_iteration
self.completed_jobs.append(self.job_queue.pop(0))
self.context.end_job()
def _finalize_run(self):
self.logger.info('Finalizing.')
self._send(signal.RUN_FIN)
with self._handle_errors('Disconnecting from the device'):
self.device.disconnect()
info = self.context.run_info
info.end_time = datetime.utcnow()
info.duration = info.end_time - info.start_time
def _process_results(self):
self.logger.info('Processing overall results')
with self._signal_wrap('OVERALL_RESULTS_PROCESSING'):
if instrumentation.check_failures():
self.context.run_result.non_iteration_errors = True
self.result_manager.process_run_result(self.context.run_result, self.context)
def _run_workload_iteration(self, workload):
self.logger.info('\tSetting up')
with self._signal_wrap('WORKLOAD_SETUP'):
try:
workload.setup(self.context)
except:
self.logger.info('\tSkipping the rest of the iterations for this spec.')
self.current_job.spec.enabled = False
raise
try:
self.logger.info('\tExecuting')
with self._handle_errors('Running workload'):
with self._signal_wrap('WORKLOAD_EXECUTION'):
workload.run(self.context)
self.logger.info('\tProcessing result')
self._send(signal.BEFORE_WORKLOAD_RESULT_UPDATE)
try:
if self.current_job.result.status != IterationResult.FAILED:
with self._handle_errors('Processing workload result',
on_error_status=IterationResult.PARTIAL):
workload.update_result(self.context)
self._send(signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE)
if self.current_job.result.status == IterationResult.RUNNING:
self.current_job.result.status = IterationResult.OK
finally:
self._send(signal.AFTER_WORKLOAD_RESULT_UPDATE)
finally:
self.logger.info('\tTearing down')
with self._handle_errors('Tearing down workload',
on_error_status=IterationResult.NONCRITICAL):
with self._signal_wrap('WORKLOAD_TEARDOWN'):
workload.teardown(self.context)
self.result_manager.add_result(self.current_job.result, self.context)
def _flash_device(self, flashing_params):
with self._signal_wrap('FLASHING'):
self.device.flash(**flashing_params)
self.device.connect()
def _reboot_device(self):
with self._signal_wrap('BOOT'):
for reboot_attempts in xrange(MAX_REBOOT_ATTEMPTS):
if reboot_attempts:
self.logger.info('\tRetrying...')
with self._handle_errors('Rebooting device'):
self.device.boot(**self.current_job.spec.boot_parameters)
break
else:
raise DeviceError('Could not reboot device; max reboot attempts exceeded.')
self.device.connect()
def _send(self, s):
signal.send(s, self, self.context)
def _take_screenshot(self, filename):
if self.context.output_directory:
filepath = os.path.join(self.context.output_directory, filename)
else:
filepath = os.path.join(settings.output_directory, filename)
self.device.capture_screen(filepath)
@contextmanager
def _handle_errors(self, action, on_error_status=IterationResult.FAILED):
try:
if action is not None:
self.logger.debug(action)
yield
except (KeyboardInterrupt, DeviceNotRespondingError):
raise
except (WAError, TimeoutError), we:
self.device.ping()
if self.current_job:
self.current_job.result.status = on_error_status
self.current_job.result.add_event(str(we))
try:
self._take_screenshot('error.png')
except Exception, e: # pylint: disable=W0703
# We're already in error state, so the fact that taking a
# screenshot failed is not surprising...
pass
if action:
action = action[0].lower() + action[1:]
self.logger.error('Error while {}:\n\t{}'.format(action, we))
except Exception, e: # pylint: disable=W0703
error_text = '{}("{}")'.format(e.__class__.__name__, e)
if self.current_job:
self.current_job.result.status = on_error_status
self.current_job.result.add_event(error_text)
self.logger.error('Error while {}'.format(action))
self.logger.error(error_text)
if isinstance(e, subprocess.CalledProcessError):
self.logger.error('Got:')
self.logger.error(e.output)
tb = get_traceback()
self.logger.error(tb)
@contextmanager
def _signal_wrap(self, signal_name):
"""Wraps the suite in before/after signals, ensuring
that after signal is always sent."""
before_signal = getattr(signal, 'BEFORE_' + signal_name)
success_signal = getattr(signal, 'SUCCESSFUL_' + signal_name)
after_signal = getattr(signal, 'AFTER_' + signal_name)
try:
self._send(before_signal)
yield
self._send(success_signal)
finally:
self._send(after_signal)
class BySpecRunner(Runner):
"""
This is that "classic" implementation that executes all iterations of a workload
spec before proceeding onto the next spec.
"""
def init_queue(self, specs):
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] # pylint: disable=unused-variable
self.job_queue = [j for spec_jobs in jobs for j in spec_jobs]
class BySectionRunner(Runner):
"""
Runs the first iteration for all benchmarks first, before proceeding to the next iteration,
i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2...
If multiple sections where specified in the agenda, this will run all specs for the first section
followed by all specs for the seciod section, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
"""
def init_queue(self, specs):
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]
self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j]
class ByIterationRunner(Runner):
"""
Runs the first iteration for all benchmarks first, before proceeding to the next iteration,
i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2...
If multiple sections where specified in the agenda, this will run all sections for the first global
spec first, followed by all sections for the second spec, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
"""
def init_queue(self, specs):
sections = OrderedDict()
for s in specs:
if s.section_id not in sections:
sections[s.section_id] = []
sections[s.section_id].append(s)
specs = [s for section_specs in izip_longest(*sections.values()) for s in section_specs if s]
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]
self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j]
class RandomRunner(Runner):
"""
This will run specs in a random order.
"""
def init_queue(self, specs):
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] # pylint: disable=unused-variable
all_jobs = [j for spec_jobs in jobs for j in spec_jobs]
random.shuffle(all_jobs)
self.job_queue = all_jobs

652
wlauto/core/extension.py Normal file
View File

@@ -0,0 +1,652 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import os
import logging
import inspect
from copy import copy
from collections import OrderedDict
from wlauto.core.bootstrap import settings
from wlauto.exceptions import ValidationError, ConfigError
from wlauto.utils.misc import isiterable, ensure_directory_exists as _d, get_article
from wlauto.utils.types import identifier
class AttributeCollection(object):
"""
Accumulator for extension attribute objects (such as Parameters or Artifacts). This will
replace any class member list accumulating such attributes through the magic of
metaprogramming\ [*]_.
.. [*] which is totally safe and not going backfire in any way...
"""
@property
def values(self):
return self._attrs.values()
def __init__(self, attrcls):
self._attrcls = attrcls
self._attrs = OrderedDict()
def add(self, p):
p = self._to_attrcls(p)
if p.name in self._attrs:
if p.override:
newp = copy(self._attrs[p.name])
for a, v in p.__dict__.iteritems():
if v is not None:
setattr(newp, a, v)
self._attrs[p.name] = newp
else:
# Duplicate attribute condition is check elsewhere.
pass
else:
self._attrs[p.name] = p
append = add
def __str__(self):
return 'AC({})'.format(map(str, self._attrs.values()))
__repr__ = __str__
def _to_attrcls(self, p):
if isinstance(p, basestring):
p = self._attrcls(p)
elif isinstance(p, tuple) or isinstance(p, list):
p = self._attrcls(*p)
elif isinstance(p, dict):
p = self._attrcls(**p)
elif not isinstance(p, self._attrcls):
raise ValueError('Invalid parameter value: {}'.format(p))
if (p.name in self._attrs and not p.override and
p.name != 'modules'): # TODO: HACK due to "diamond dependecy" in workloads...
raise ValueError('Attribute {} has already been defined.'.format(p.name))
return p
def __iadd__(self, other):
for p in other:
self.add(p)
return self
def __iter__(self):
return iter(self.values)
def __contains__(self, p):
return p in self._attrs
def __getitem__(self, i):
return self._attrs[i]
def __len__(self):
return len(self._attrs)
class AliasCollection(AttributeCollection):
def __init__(self):
super(AliasCollection, self).__init__(Alias)
def _to_attrcls(self, p):
if isinstance(p, tuple) or isinstance(p, list):
# must be in the form (name, {param: value, ...})
p = self._attrcls(p[1], **p[1])
elif not isinstance(p, self._attrcls):
raise ValueError('Invalid parameter value: {}'.format(p))
if p.name in self._attrs:
raise ValueError('Attribute {} has already been defined.'.format(p.name))
return p
class ListCollection(list):
def __init__(self, attrcls): # pylint: disable=unused-argument
super(ListCollection, self).__init__()
class Param(object):
"""
This is a generic parameter for an extension. Extensions instantiate this to declare which parameters
are supported.
"""
def __init__(self, name, kind=None, mandatory=None, default=None, override=False,
allowed_values=None, description=None, constraint=None, global_alias=None):
"""
Create a new Parameter object.
:param name: The name of the parameter. This will become an instance member of the
extension object to which the parameter is applied, so it must be a valid
python identifier. This is the only mandatory parameter.
:param kind: The type of parameter this is. This must be a callable that takes an arbitrary
object and converts it to the expected type, or raised ``ValueError`` if such
conversion is not possible. Most Python standard types -- ``str``, ``int``, ``bool``, etc. --
can be used here (though for ``bool``, ``wlauto.utils.misc.as_bool`` is preferred
as it intuitively handles strings like ``'false'``). This defaults to ``str`` if
not specified.
:param mandatory: If set to ``True``, then a non-``None`` value for this parameter *must* be
provided on extension object construction, otherwise ``ConfigError`` will be
raised.
:param default: The default value for this parameter. If no value is specified on extension
construction, this value will be used instead. (Note: if this is specified and
is not ``None``, then ``mandatory`` parameter will be ignored).
:param override: A ``bool`` that specifies whether a parameter of the same name further up the
hierarchy should be overridden. If this is ``False`` (the default), an exception
will be raised by the ``AttributeCollection`` instead.
:param allowed_values: This should be the complete list of allowed values for this parameter.
Note: ``None`` value will always be allowed, even if it is not in this list.
If you want to disallow ``None``, set ``mandatory`` to ``True``.
:param constraint: If specified, this must be a callable that takes the parameter value
as an argument and return a boolean indicating whether the constraint
has been satisfied. Alternatively, can be a two-tuple with said callable as
the first element and a string describing the constraint as the second.
:param global_alias: This is an alternative alias for this parameter, unlike the name, this
alias will not be namespaced under the owning extension's name (hence the
global part). This is introduced primarily for backward compatibility -- so
that old extension settings names still work. This should not be used for
new parameters.
"""
self.name = identifier(name)
if kind is not None and not callable(kind):
raise ValueError('Kind must be callable.')
self.kind = kind
self.mandatory = mandatory
self.default = default
self.override = override
self.allowed_values = allowed_values
self.description = description
if self.kind is None and not self.override:
self.kind = str
if constraint is not None and not callable(constraint) and not isinstance(constraint, tuple):
raise ValueError('Constraint must be callable or a (callable, str) tuple.')
self.constraint = constraint
self.global_alias = global_alias
def set_value(self, obj, value=None):
if value is None:
if self.default is not None:
value = self.default
elif self.mandatory:
msg = 'No values specified for mandatory parameter {} in {}'
raise ConfigError(msg.format(self.name, obj.name))
else:
try:
value = self.kind(value)
except (ValueError, TypeError):
typename = self.get_type_name()
msg = 'Bad value "{}" for {}; must be {} {}'
article = get_article(typename)
raise ConfigError(msg.format(value, self.name, article, typename))
current_value = getattr(obj, self.name, None)
if current_value is None:
setattr(obj, self.name, value)
elif not isiterable(current_value):
setattr(obj, self.name, value)
else:
new_value = current_value + [value]
setattr(obj, self.name, new_value)
def validate(self, obj):
value = getattr(obj, self.name, None)
if value is not None:
if self.allowed_values:
self._validate_allowed_values(obj, value)
if self.constraint:
self._validate_constraint(obj, value)
else:
if self.mandatory:
msg = 'No value specified for mandatory parameter {} in {}.'
raise ConfigError(msg.format(self.name, obj.name))
def get_type_name(self):
typename = str(self.kind)
if '\'' in typename:
typename = typename.split('\'')[1]
elif typename.startswith('<function'):
typename = typename.split()[1]
return typename
def _validate_allowed_values(self, obj, value):
if 'list' in str(self.kind):
for v in value:
if v not in self.allowed_values:
msg = 'Invalid value {} for {} in {}; must be in {}'
raise ConfigError(msg.format(v, self.name, obj.name, self.allowed_values))
else:
if value not in self.allowed_values:
msg = 'Invalid value {} for {} in {}; must be in {}'
raise ConfigError(msg.format(value, self.name, obj.name, self.allowed_values))
def _validate_constraint(self, obj, value):
msg_vals = {'value': value, 'param': self.name, 'extension': obj.name}
if isinstance(self.constraint, tuple) and len(self.constraint) == 2:
constraint, msg = self.constraint # pylint: disable=unpacking-non-sequence
elif callable(self.constraint):
constraint = self.constraint
msg = '"{value}" failed constraint validation for {param} in {extension}.'
else:
raise ValueError('Invalid constraint for {}: must be callable or a 2-tuple'.format(self.name))
if not constraint(value):
raise ConfigError(value, msg.format(**msg_vals))
def __repr__(self):
d = copy(self.__dict__)
del d['description']
return 'Param({})'.format(d)
__str__ = __repr__
Parameter = Param
class Artifact(object):
"""
This is an artifact generated during execution/post-processing of a workload.
Unlike metrics, this represents an actual artifact, such as a file, generated.
This may be "result", such as trace, or it could be "meta data" such as logs.
These are distinguished using the ``kind`` attribute, which also helps WA decide
how it should be handled. Currently supported kinds are:
:log: A log file. Not part of "results" as such but contains information about the
run/workload execution that be useful for diagnostics/meta analysis.
:meta: A file containing metadata. This is not part of "results", but contains
information that may be necessary to reproduce the results (contrast with
``log`` artifacts which are *not* necessary).
:data: This file contains new data, not available otherwise and should be considered
part of the "results" generated by WA. Most traces would fall into this category.
:export: Exported version of results or some other artifact. This signifies that
this artifact does not contain any new data that is not available
elsewhere and that it may be safely discarded without losing information.
:raw: Signifies that this is a raw dump/log that is normally processed to extract
useful information and is then discarded. In a sense, it is the opposite of
``export``, but in general may also be discarded.
.. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on
how important it is to preserve this file, e.g. when archiving, vs
how much space it takes up. Unlike ``export`` artifacts which are
(almost) always ignored by other exporters as that would never result
in data loss, ``raw`` files *may* be processed by exporters if they
decided that the risk of losing potentially (though unlikely) useful
data is greater than the time/space cost of handling the artifact (e.g.
a database uploader may choose to ignore ``raw`` artifacts, where as a
network filer archiver may choose to archive them).
.. note: The kind parameter is intended to represent the logical function of a particular
artifact, not it's intended means of processing -- this is left entirely up to the
result processors.
"""
RUN = 'run'
ITERATION = 'iteration'
valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
""""
:param name: Name that uniquely identifies this artifact.
:param path: The *relative* path of the artifact. Depending on the ``level``
must be either relative to the run or iteration output directory.
Note: this path *must* be delimited using ``/`` irrespective of the
operating system.
:param kind: The type of the artifact this is (e.g. log file, result, etc.) this
will be used a hit to result processors. This must be one of ``'log'``,
``'meta'``, ``'data'``, ``'export'``, ``'raw'``.
:param level: The level at which the artifact will be generated. Must be either
``'iteration'`` or ``'run'``.
:param mandatory: Boolean value indicating whether this artifact must be present
at the end of result processing for its level.
:param description: A free-form description of what this artifact is.
"""
if kind not in self.valid_kinds:
raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds))
self.name = name
self.path = path.replace('/', os.sep) if path is not None else path
self.kind = kind
self.level = level
self.mandatory = mandatory
self.description = description
def exists(self, context):
"""Returns ``True`` if artifact exists within the specified context, and
``False`` otherwise."""
fullpath = os.path.join(context.output_directory, self.path)
return os.path.exists(fullpath)
def to_dict(self):
return copy(self.__dict__)
class Alias(object):
"""
This represents a configuration alias for an extension, mapping an alternative name to
a set of parameter values, effectively providing an alternative set of default values.
"""
def __init__(self, name, **kwargs):
self.name = name
self.params = kwargs
self.extension_name = None # gets set by the MetaClass
def validate(self, ext):
ext_params = set(p.name for p in ext.parameters)
for param in self.params:
if param not in ext_params:
# Raising config error because aliases might have come through
# the config.
msg = 'Parameter {} (defined in alias {}) is invalid for {}'
raise ConfigError(msg.format(param, self.name, ext.name))
class ExtensionMeta(type):
"""
This basically adds some magic to extensions to make implementing new extensions, such as
workloads less complicated.
It ensures that certain class attributes (specified by the ``to_propagate``
attribute of the metaclass) get propagated down the inheritance hierarchy. The assumption
is that the values of the attributes specified in the class are iterable; if that is not met,
Bad Things (tm) will happen.
This also provides virtual method implementation, similar to those in C-derived OO languages,
and alias specifications.
"""
to_propagate = [
('parameters', Parameter, AttributeCollection),
('artifacts', Artifact, AttributeCollection),
('core_modules', str, ListCollection),
]
virtual_methods = ['validate']
def __new__(mcs, clsname, bases, attrs):
mcs._propagate_attributes(bases, attrs)
cls = type.__new__(mcs, clsname, bases, attrs)
mcs._setup_aliases(cls)
mcs._implement_virtual(cls, bases)
return cls
@classmethod
def _propagate_attributes(mcs, bases, attrs):
"""
For attributes specified by to_propagate, their values will be a union of
that specified for cls and it's bases (cls values overriding those of bases
in case of conflicts).
"""
for prop_attr, attr_cls, attr_collector_cls in mcs.to_propagate:
should_propagate = False
propagated = attr_collector_cls(attr_cls)
for base in bases:
if hasattr(base, prop_attr):
propagated += getattr(base, prop_attr) or []
should_propagate = True
if prop_attr in attrs:
propagated += attrs[prop_attr] or []
should_propagate = True
if should_propagate:
attrs[prop_attr] = propagated
@classmethod
def _setup_aliases(mcs, cls):
if hasattr(cls, 'aliases'):
aliases, cls.aliases = cls.aliases, AliasCollection()
for alias in aliases:
if isinstance(alias, basestring):
alias = Alias(alias)
alias.validate(cls)
alias.extension_name = cls.name
cls.aliases.add(alias)
@classmethod
def _implement_virtual(mcs, cls, bases):
"""
This implements automatic method propagation to the bases, so
that you don't have to do something like
super(cls, self).vmname()
.. note:: current implementation imposes a restriction in that
parameters into the function *must* be passed as keyword
arguments. There *must not* be positional arguments on
virutal method invocation.
"""
methods = {}
for vmname in mcs.virtual_methods:
clsmethod = getattr(cls, vmname, None)
if clsmethod:
basemethods = [getattr(b, vmname) for b in bases if hasattr(b, vmname)]
methods[vmname] = [bm for bm in basemethods if bm != clsmethod]
methods[vmname].append(clsmethod)
def wrapper(self, __name=vmname, **kwargs):
for dm in methods[__name]:
dm(self, **kwargs)
setattr(cls, vmname, wrapper)
class Extension(object):
"""
Base class for all WA extensions. An extension is basically a plug-in.
It extends the functionality of WA in some way. Extensions are discovered
and loaded dynamically by the extension loader upon invocation of WA scripts.
Adding an extension is a matter of placing a class that implements an appropriate
interface somewhere it would be discovered by the loader. That "somewhere" is
typically one of the extension subdirectories under ``~/.workload_automation/``.
"""
__metaclass__ = ExtensionMeta
kind = None
name = None
parameters = [
Parameter('modules', kind=list,
description="""
Lists the modules to be loaded by this extension. A module is a plug-in that
further extends functionality of an extension.
"""),
]
artifacts = []
aliases = []
core_modules = []
@classmethod
def get_default_config(cls):
return {p.name: p.default for p in cls.parameters}
@property
def dependencies_directory(self):
return _d(os.path.join(settings.dependencies_directory, self.name))
@property
def _classname(self):
return self.__class__.__name__
def __init__(self, **kwargs):
self.__check_from_loader()
self.logger = logging.getLogger(self._classname)
self._modules = []
self.capabilities = getattr(self.__class__, 'capabilities', [])
for param in self.parameters:
param.set_value(self, kwargs.get(param.name))
for key in kwargs:
if key not in self.parameters:
message = 'Unexpected parameter "{}" for {}'
raise ConfigError(message.format(key, self.name))
def get_config(self):
"""
Returns current configuration (i.e. parameter values) of this extension.
"""
config = {}
for param in self.parameters:
config[param.name] = getattr(self, param.name, None)
return config
def validate(self):
"""
Perform basic validation to ensure that this extension is capable of running.
This is intended as an early check to ensure the extension has not been mis-configured,
rather than a comprehensive check (that may, e.g., require access to the execution
context).
This method may also be used to enforce (i.e. set as well as check) inter-parameter
constraints for the extension (e.g. if valid values for parameter A depend on the value
of parameter B -- something that is not possible to enfroce using ``Parameter``\ 's
``constraint`` attribute.
"""
if self.name is None:
raise ValidationError('Name not set for {}'.format(self._classname))
for param in self.parameters:
param.validate(self)
def check_artifacts(self, context, level):
"""
Make sure that all mandatory artifacts have been generated.
"""
for artifact in self.artifacts:
if artifact.level != level or not artifact.mandatory:
continue
fullpath = os.path.join(context.output_directory, artifact.path)
if not os.path.exists(fullpath):
message = 'Mandatory "{}" has not been generated for {}.'
raise ValidationError(message.format(artifact.path, self.name))
def __getattr__(self, name):
if name == '_modules':
raise ValueError('_modules accessed too early!')
for module in self._modules:
if hasattr(module, name):
return getattr(module, name)
raise AttributeError(name)
def load_modules(self, loader):
"""
Load the modules specified by the "modules" Parameter using the provided loader. A loader
can be any object that has an atribute called "get_module" that implements the following
signature::
get_module(name, owner, **kwargs)
and returns an instance of :class:`wlauto.core.extension.Module`. If the module with the
specified name is not found, the loader must raise an appropriate exception.
"""
modules = list(reversed(self.core_modules)) + list(reversed(self.modules or []))
if not modules:
return
for module_spec in modules:
if not module_spec:
continue
if isinstance(module_spec, basestring):
name = module_spec
params = {}
elif isinstance(module_spec, dict):
if len(module_spec) != 1:
message = 'Invalid module spec: {}; dict must have exctly one key -- the module name.'
raise ValueError(message.format(module_spec))
name, params = module_spec.items()[0]
else:
message = 'Invalid module spec: {}; must be a string or a one-key dict.'
raise ValueError(message.format(module_spec))
if not isinstance(params, dict):
message = 'Invalid module spec: {}; dict value must also be a dict.'
raise ValueError(message.format(module_spec))
module = loader.get_module(name, owner=self, **params)
module.initialize()
for capability in module.capabilities:
if capability not in self.capabilities:
self.capabilities.append(capability)
self._modules.append(module)
def has(self, capability):
"""Check if this extension has the specified capability. The alternative method ``can`` is
identical to this. Which to use is up to the caller depending on what makes semantic sense
in the context of the capability, e.g. ``can('hard_reset')`` vs ``has('active_cooling')``."""
return capability in self.capabilities
can = has
def __check_from_loader(self):
"""
There are a few things that need to happen in order to get a valide extension instance.
Not all of them are currently done through standard Python initialisation mechanisms
(specifically, the loading of modules and alias resolution). In order to avoid potential
problems with not fully loaded extensions, make sure that an extension is *only* instantiated
by the loader.
"""
stack = inspect.stack()
stack.pop(0) # current frame
frame = stack.pop(0)
# skip throuth the init call chain
while stack and frame[3] == '__init__':
frame = stack.pop(0)
if frame[3] != '_instantiate':
message = 'Attempting to instantiate {} directly (must be done through an ExtensionLoader)'
raise RuntimeError(message.format(self.__class__.__name__))
class Module(Extension):
"""
This is a "plugin" for an extension this is intended to capture functionality that may be optional
for an extension, and so may or may not be present in a particular setup; or, conversely, functionality
that may be reusable between multiple devices, even if they are not with the same inheritance hierarchy.
In other words, a Module is roughly equivalent to a kernel module and its primary purpose is to
implement WA "drivers" for various peripherals that may or may not be present in a particular setup.
.. note:: A mudule is itself an Extension and can therefore have it's own modules.
"""
capabilities = []
@property
def root_owner(self):
owner = self.owner
while isinstance(owner, Module) and owner is not self:
owner = owner.owner
return owner
def __init__(self, owner, **kwargs):
super(Module, self).__init__(**kwargs)
self.owner = owner
while isinstance(owner, Module):
if owner.name == self.name:
raise ValueError('Circular module import for {}'.format(self.name))
def initialize(self):
pass

View File

@@ -0,0 +1,400 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import inspect
import imp
import string
import logging
from functools import partial
from collections import OrderedDict
from wlauto.core.bootstrap import settings
from wlauto.core.extension import Extension
from wlauto.exceptions import NotFoundError, LoaderError
from wlauto.utils.misc import walk_modules, load_class, merge_lists, merge_dicts, get_article
from wlauto.utils.types import identifier
MODNAME_TRANS = string.maketrans(':/\\.', '____')
class ExtensionLoaderItem(object):
def __init__(self, ext_tuple):
self.name = ext_tuple.name
self.default_package = ext_tuple.default_package
self.default_path = ext_tuple.default_path
self.cls = load_class(ext_tuple.cls)
class GlobalParameterAlias(object):
"""
Represents a "global alias" for an extension parameter. A global alias
is specified at the top-level of config rather namespaced under an extension
name.
Multiple extensions may have parameters with the same global_alias if they are
part of the same inheritance hierarchy and one parameter is an override of the
other. This class keeps track of all such cases in its extensions dict.
"""
def __init__(self, name):
self.name = name
self.extensions = {}
def iteritems(self):
for ext in self.extensions.itervalues():
yield (self.get_param(ext), ext)
def get_param(self, ext):
for param in ext.parameters:
if param.global_alias == self.name:
return param
message = 'Extension {} does not have a parameter with global alias {}'
raise ValueError(message.format(ext.name, self.name))
def update(self, other_ext):
self._validate_ext(other_ext)
self.extensions[other_ext.name] = other_ext
def _validate_ext(self, other_ext):
other_param = self.get_param(other_ext)
for param, ext in self.iteritems():
if ((not (issubclass(ext, other_ext) or issubclass(other_ext, ext))) and
other_param.kind != param.kind):
message = 'Duplicate global alias {} declared in {} and {} extensions with different types'
raise LoaderError(message.format(self.name, ext.name, other_ext.name))
if not param.name == other_param.name:
message = 'Two params {} in {} and {} in {} both declare global alias {}'
raise LoaderError(message.format(param.name, ext.name,
other_param.name, other_ext.name, self.name))
def __str__(self):
text = 'GlobalAlias({} => {})'
extlist = ', '.join(['{}.{}'.format(e.name, p.name) for p, e in self.iteritems()])
return text.format(self.name, extlist)
class ExtensionLoader(object):
"""
Discovers, enumerates and loads available devices, configs, etc.
The loader will attempt to discover things on construction by looking
in predetermined set of locations defined by default_paths. Optionally,
additional locations may specified through paths parameter that must
be a list of additional Python module paths (i.e. dot-delimited).
"""
_instance = None
# Singleton
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(ExtensionLoader, cls).__new__(cls, *args, **kwargs)
else:
for k, v in kwargs.iteritems():
if not hasattr(cls._instance, k):
raise ValueError('Invalid parameter for ExtensionLoader: {}'.format(k))
setattr(cls._instance, k, v)
return cls._instance
def set_load_defaults(self, value):
self._load_defaults = value
if value:
self.packages = merge_lists(self.default_packages, self.packages, duplicates='last')
def get_load_defaults(self):
return self._load_defaults
load_defaults = property(get_load_defaults, set_load_defaults)
def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False, load_defaults=True):
"""
params::
:packages: List of packages to load extensions from.
:paths: List of paths to be searched for Python modules containing
WA extensions.
:ignore_paths: List of paths to ignore when search for WA extensions (these would
typically be subdirectories of one or more locations listed in
``paths`` parameter.
:keep_going: Specifies whether to keep going if an error occurs while loading
extensions.
:load_defaults: Specifies whether extension should be loaded from default locations
(WA package, and user's WA directory) as well as the packages/paths
specified explicitly in ``packages`` and ``paths`` parameters.
"""
self._load_defaults = None
self.logger = logging.getLogger('ExtensionLoader')
self.keep_going = keep_going
self.extension_kinds = {ext_tuple.name: ExtensionLoaderItem(ext_tuple)
for ext_tuple in settings.extensions}
self.default_packages = [ext.default_package for ext in self.extension_kinds.values()]
self.packages = packages or []
self.load_defaults = load_defaults
self.paths = paths or []
self.ignore_paths = ignore_paths or []
self.extensions = {}
self.aliases = {}
self.global_param_aliases = {}
# create an empty dict for each extension type to store discovered
# extensions.
for ext in self.extension_kinds.values():
setattr(self, '_' + ext.name, {})
self._load_from_packages(self.packages)
self._load_from_paths(self.paths, self.ignore_paths)
def update(self, packages=None, paths=None, ignore_paths=None):
""" Load extensions from the specified paths/packages
without clearing or reloading existing extension. """
if packages:
self.packages.extend(packages)
self._load_from_packages(packages)
if paths:
self.paths.extend(paths)
self.ignore_paths.extend(ignore_paths or [])
self._load_from_paths(paths, ignore_paths or [])
def clear(self):
""" Clear all discovered items. """
self.extensions.clear()
for ext in self.extension_kinds.values():
self._get_store(ext).clear()
def reload(self):
""" Clear all discovered items and re-run the discovery. """
self.clear()
self._load_from_packages(self.packages)
self._load_from_paths(self.paths, self.ignore_paths)
def get_extension_class(self, name, kind=None):
"""
Return the class for the specified extension if found or raises ``ValueError``.
"""
name, _ = self.resolve_alias(name)
if kind is None:
return self.extensions[name]
ext = self.extension_kinds.get(kind)
if ext is None:
raise ValueError('Unknown extension type: {}'.format(kind))
store = self._get_store(ext)
if name not in store:
raise NotFoundError('Extensions {} is not {} {}.'.format(name, get_article(kind), kind))
return store[name]
def get_extension(self, name, *args, **kwargs):
"""
Return extension of the specified kind with the specified name. Any additional
parameters will be passed to the extension's __init__.
"""
name, base_kwargs = self.resolve_alias(name)
kind = kwargs.pop('kind', None)
kwargs = merge_dicts(base_kwargs, kwargs, list_duplicates='last', dict_type=OrderedDict)
cls = self.get_extension_class(name, kind)
extension = _instantiate(cls, args, kwargs)
extension.load_modules(self)
return extension
def get_default_config(self, ext_name):
"""
Returns the default configuration for the specified extension name. The name may be an alias,
in which case, the returned config will be augmented with appropriate alias overrides.
"""
real_name, alias_config = self.resolve_alias(ext_name)
base_default_config = self.get_extension_class(real_name).get_default_config()
return merge_dicts(base_default_config, alias_config, list_duplicates='last', dict_type=OrderedDict)
def list_extensions(self, kind=None):
"""
List discovered extension classes. Optionally, only list extensions of a
particular type.
"""
if kind is None:
return self.extensions.values()
if kind not in self.extension_kinds:
raise ValueError('Unknown extension type: {}'.format(kind))
return self._get_store(self.extension_kinds[kind]).values()
def has_extension(self, name, kind=None):
"""
Returns ``True`` if an extensions with the specified ``name`` has been
discovered by the loader. If ``kind`` was specified, only returns ``True``
if the extension has been found, *and* it is of the specified kind.
"""
try:
self.get_extension_class(name, kind)
return True
except NotFoundError:
return False
def resolve_alias(self, alias_name):
"""
Try to resolve the specified name as an extension alias. Returns a
two-tuple, the first value of which is actual extension name, and the
second is a dict of parameter values for this alias. If the name passed
is already an extension name, then the result is ``(alias_name, {})``.
"""
alias_name = identifier(alias_name.lower())
if alias_name in self.extensions:
return (alias_name, {})
if alias_name in self.aliases:
alias = self.aliases[alias_name]
return (alias.extension_name, alias.params)
raise NotFoundError('Could not find extension or alias "{}"'.format(alias_name))
# Internal methods.
def __getattr__(self, name):
"""
This resolves methods for specific extensions types based on corresponding
generic extension methods. So it's possible to say things like ::
loader.get_device('foo')
instead of ::
loader.get_extension('foo', kind='device')
"""
if name.startswith('get_'):
name = name.replace('get_', '', 1)
if name in self.extension_kinds:
return partial(self.get_extension, kind=name)
if name.startswith('list_'):
name = name.replace('list_', '', 1).rstrip('s')
if name in self.extension_kinds:
return partial(self.list_extensions, kind=name)
if name.startswith('has_'):
name = name.replace('has_', '', 1)
if name in self.extension_kinds:
return partial(self.has_extension, kind=name)
raise AttributeError(name)
def _get_store(self, ext):
name = getattr(ext, 'name', ext)
return getattr(self, '_' + name)
def _load_from_packages(self, packages):
try:
for package in packages:
for module in walk_modules(package):
self._load_module(module)
except ImportError as e:
message = 'Problem loading extensions from extra packages: {}'
raise LoaderError(message.format(e.message))
def _load_from_paths(self, paths, ignore_paths):
self.logger.debug('Loading from paths.')
for path in paths:
self.logger.debug('Checking path %s', path)
for root, _, files in os.walk(path):
should_skip = False
for igpath in ignore_paths:
if root.startswith(igpath):
should_skip = True
break
if should_skip:
continue
for fname in files:
if not os.path.splitext(fname)[1].lower() == '.py':
continue
filepath = os.path.join(root, fname)
try:
modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS)
module = imp.load_source(modname, filepath)
self._load_module(module)
except (SystemExit, ImportError), e:
if self.keep_going:
self.logger.warn('Failed to load {}'.format(filepath))
self.logger.warn('Got: {}'.format(e))
else:
raise LoaderError('Failed to load {}'.format(filepath), sys.exc_info())
def _load_module(self, module): # NOQA pylint: disable=too-many-branches
self.logger.debug('Checking module %s', module.__name__)
for obj in vars(module).itervalues():
if inspect.isclass(obj):
if not issubclass(obj, Extension) or not hasattr(obj, 'name') or not obj.name:
continue
try:
for ext in self.extension_kinds.values():
if issubclass(obj, ext.cls):
self._add_found_extension(obj, ext)
break
else: # did not find a matching Extension type
message = 'Unknown extension type for {} (type: {})'
raise LoaderError(message.format(obj.name, obj.__class__.__name__))
except LoaderError as e:
if self.keep_going:
self.logger.warning(e)
else:
raise e
def _add_found_extension(self, obj, ext):
"""
:obj: Found extension class
:ext: matching extension item.
"""
self.logger.debug('\tAdding %s %s', ext.name, obj.name)
key = identifier(obj.name.lower())
obj.kind = ext.name
if key in self.extensions or key in self.aliases:
raise LoaderError('{} {} already exists.'.format(ext.name, obj.name))
# Extensions are tracked both, in a common extensions
# dict, and in per-extension kind dict (as retrieving
# extensions by kind is a common use case.
self.extensions[key] = obj
store = self._get_store(ext)
store[key] = obj
for alias in obj.aliases:
if alias in self.extensions or alias in self.aliases:
raise LoaderError('{} {} already exists.'.format(ext.name, obj.name))
self.aliases[alias.name] = alias
# Update global aliases list. If a global alias is already in the list,
# then make sure this extension is in the same parent/child hierarchy
# as the one already found.
for param in obj.parameters:
if param.global_alias:
if param.global_alias not in self.global_param_aliases:
ga = GlobalParameterAlias(param.global_alias)
ga.update(obj)
self.global_param_aliases[ga.name] = ga
else: # global alias already exists.
self.global_param_aliases[param.global_alias].update(obj)
# Utility functions.
def _instantiate(cls, args=None, kwargs=None):
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
try:
return cls(*args, **kwargs)
except Exception:
raise LoaderError('Could not load {}'.format(cls), sys.exc_info())

35
wlauto/core/exttype.py Normal file
View File

@@ -0,0 +1,35 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Separate module to avoid circular dependencies
from wlauto.core.bootstrap import settings
from wlauto.core.extension import Extension
from wlauto.utils.misc import load_class
_extension_bases = {ext.name: load_class(ext.cls) for ext in settings.extensions}
def get_extension_type(ext):
"""Given an instance of ``wlauto.core.Extension``, return a string representing
the type of the extension (e.g. ``'workload'`` for a Workload subclass instance)."""
if not isinstance(ext, Extension):
raise ValueError('{} is not an instance of Extension'.format(ext))
for name, cls in _extension_bases.iteritems():
if isinstance(ext, cls):
return name
raise ValueError('Unknown extension type: {}'.format(ext.__class__.__name__))

View File

@@ -0,0 +1,374 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Adding New Instrument
=====================
Any new instrument should be a subclass of Instrument and it must have a name.
When a new instrument is added to Workload Automation, the methods of the new
instrument will be found automatically and hooked up to the supported signals.
Once a signal is broadcasted, the corresponding registered method is invoked.
Each method in Instrument must take two arguments, which are self and context.
Supported signals can be found in [... link to signals ...] To make
implementations easier and common, the basic steps to add new instrument is
similar to the steps to add new workload.
Hence, the following methods are sufficient to implement to add new instrument:
- setup: This method is invoked after the workload is setup. All the
necessary setups should go inside this method. Setup, includes operations
like, pushing the files to the target device, install them, clear logs,
etc.
- start: It is invoked just before the workload start execution. Here is
where instrument measures start being registered/taken.
- stop: It is invoked just after the workload execution stops. The measures
should stop being taken/registered.
- update_result: It is invoked after the workload updated its result.
update_result is where the taken measures are added to the result so it
can be processed by Workload Automation.
- teardown is invoked after the workload is teared down. It is a good place
to clean any logs generated by the instrument.
For example, to add an instrument which will trace device errors, we subclass
Instrument and overwrite the variable name.::
#BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace')
class TraceErrorsInstrument(Instrument):
name = 'trace-errors'
def __init__(self, device):
super(TraceErrorsInstrument, self).__init__(device)
self.trace_on_device = os.path.join(self.device.working_directory, 'trace')
We then declare and implement the aforementioned methods. For the setup method,
we want to push the file to the target device and then change the file mode to
755 ::
def setup(self, context):
self.device.push_file(BINARY_FILE, self.device.working_directory)
self.device.execute('chmod 755 {}'.format(self.trace_on_device))
Then we implemented the start method, which will simply run the file to start
tracing. ::
def start(self, context):
self.device.execute('{} start'.format(self.trace_on_device))
Lastly, we need to stop tracing once the workload stops and this happens in the
stop method::
def stop(self, context):
self.device.execute('{} stop'.format(self.trace_on_device))
The generated result can be updated inside update_result, or if it is trace, we
just pull the file to the host device. context has a result variable which
has add_metric method. It can be used to add the instrumentation results metrics
to the final result for the workload. The method can be passed 4 params, which
are metric key, value, unit and lower_is_better, which is a boolean. ::
def update_result(self, context):
# pull the trace file to the device
result = os.path.join(self.device.working_directory, 'trace.txt')
self.device.pull_file(result, context.working_directory)
# parse the file if needs to be parsed, or add result to
# context.result
At the end, we might want to delete any files generated by the instrumentation
and the code to clear these file goes in teardown method. ::
def teardown(self, context):
self.device.delete_file(os.path.join(self.device.working_directory, 'trace.txt'))
"""
import logging
import inspect
from collections import OrderedDict
import wlauto.core.signal as signal
from wlauto.core.extension import Extension
from wlauto.exceptions import WAError, DeviceNotRespondingError, TimeoutError
from wlauto.utils.misc import get_traceback, isiterable
logger = logging.getLogger('instrumentation')
# Maps method names onto signals the should be registered to.
# Note: the begin/end signals are paired -- if a begin_ signal is sent,
# then the corresponding end_ signal is guaranteed to also be sent.
# Note: using OrderedDict to preserve logical ordering for the table generated
# in the documentation
SIGNAL_MAP = OrderedDict([
# Below are "aliases" for some of the more common signals to allow
# instrumentation to have similar structure to workloads
('initialize', signal.RUN_INIT),
('setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('start', signal.BEFORE_WORKLOAD_EXECUTION),
('stop', signal.AFTER_WORKLOAD_EXECUTION),
('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('finalize', signal.RUN_FIN),
('on_run_start', signal.RUN_START),
('on_run_end', signal.RUN_END),
('on_workload_spec_start', signal.WORKLOAD_SPEC_START),
('on_workload_spec_end', signal.WORKLOAD_SPEC_END),
('on_iteration_start', signal.ITERATION_START),
('on_iteration_end', signal.ITERATION_END),
('before_initial_boot', signal.BEFORE_INITIAL_BOOT),
('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT),
('after_initial_boot', signal.AFTER_INITIAL_BOOT),
('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT),
('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT),
('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT),
('before_boot', signal.BEFORE_BOOT),
('on_successful_boot', signal.SUCCESSFUL_BOOT),
('after_boot', signal.AFTER_BOOT),
('on_spec_init', signal.SPEC_INIT),
('on_run_init', signal.RUN_INIT),
('on_iteration_init', signal.ITERATION_INIT),
('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP),
('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('after_workload_setup', signal.AFTER_WORKLOAD_SETUP),
('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION),
('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION),
('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION),
('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE),
('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN),
('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN),
('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING),
('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING),
('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING),
('on_error', signal.ERROR_LOGGED),
('on_warning', signal.WARNING_LOGGED),
])
PRIORITY_MAP = OrderedDict([
('very_fast_', 20),
('fast_', 10),
('normal_', 0),
('slow_', -10),
('very_slow_', -20),
])
installed = []
def is_installed(instrument):
if isinstance(instrument, Instrument):
if instrument in installed:
return True
if instrument.name in [i.name for i in installed]:
return True
elif isinstance(instrument, type):
if instrument in [i.__class__ for i in installed]:
return True
else: # assume string
if instrument in [i.name for i in installed]:
return True
return False
failures_detected = False
def reset_failures():
global failures_detected # pylint: disable=W0603
failures_detected = False
def check_failures():
result = failures_detected
reset_failures()
return result
class ManagedCallback(object):
"""
This wraps instruments' callbacks to ensure that errors do interfer
with run execution.
"""
def __init__(self, instrument, callback):
self.instrument = instrument
self.callback = callback
def __call__(self, context):
if self.instrument.is_enabled:
try:
self.callback(context)
except (KeyboardInterrupt, DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception as e: # pylint: disable=W0703
logger.error('Error in insturment {}'.format(self.instrument.name))
global failures_detected # pylint: disable=W0603
failures_detected = True
if isinstance(e, WAError):
logger.error(e)
else:
tb = get_traceback()
logger.error(tb)
logger.error('{}({})'.format(e.__class__.__name__, e))
if not context.current_iteration:
# Error occureed outside of an iteration (most likely
# during intial setup or teardown). Since this would affect
# the rest of the run, mark the instument as broken so that
# it doesn't get re-enabled for subsequent iterations.
self.instrument.is_broken = True
disable(self.instrument)
# Need this to keep track of callbacks, because the dispatcher only keeps
# weak references, so if the callbacks aren't referenced elsewhere, they will
# be deallocated before they've had a chance to be invoked.
_callbacks = []
def install(instrument):
"""
This will look for methods (or any callable members) with specific names
in the instrument and hook them up to the corresponding signals.
:param instrument: Instrument instance to install.
"""
logger.debug('Installing instrument %s.', instrument)
if is_installed(instrument):
raise ValueError('Instrument {} is already installed.'.format(instrument.name))
for attr_name in dir(instrument):
priority = 0
stripped_attr_name = attr_name
for key, value in PRIORITY_MAP.iteritems():
if attr_name.startswith(key):
stripped_attr_name = attr_name[len(key):]
priority = value
break
if stripped_attr_name in SIGNAL_MAP:
attr = getattr(instrument, attr_name)
if not callable(attr):
raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument))
arg_num = len(inspect.getargspec(attr).args)
if not arg_num == 2:
raise ValueError('{} must take exactly 2 arguments; {} given.'.format(attr_name, arg_num))
logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name])
mc = ManagedCallback(instrument, attr)
_callbacks.append(mc)
signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority)
installed.append(instrument)
def uninstall(instrument):
instrument = get_instrument(instrument)
installed.remove(instrument)
def validate():
for instrument in installed:
instrument.validate()
def get_instrument(inst):
if isinstance(inst, Instrument):
return inst
for installed_inst in installed:
if installed_inst.name == inst:
return installed_inst
raise ValueError('Instrument {} is not installed'.format(inst))
def disable_all():
for instrument in installed:
_disable_instrument(instrument)
def enable_all():
for instrument in installed:
_enable_instrument(instrument)
def enable(to_enable):
if isiterable(to_enable):
for inst in to_enable:
_enable_instrument(inst)
else:
_enable_instrument(to_enable)
def disable(to_disable):
if isiterable(to_disable):
for inst in to_disable:
_disable_instrument(inst)
else:
_disable_instrument(to_disable)
def _enable_instrument(inst):
inst = get_instrument(inst)
if not inst.is_broken:
logger.debug('Enabling instrument {}'.format(inst.name))
inst.is_enabled = True
else:
logger.debug('Not enabling broken instrument {}'.format(inst.name))
def _disable_instrument(inst):
inst = get_instrument(inst)
if inst.is_enabled:
logger.debug('Disabling instrument {}'.format(inst.name))
inst.is_enabled = False
def get_enabled():
return [i for i in installed if i.is_enabled]
def get_disabled():
return [i for i in installed if not i.is_enabled]
class Instrument(Extension):
"""
Base class for instrumentation implementations.
"""
def __init__(self, device, **kwargs):
super(Instrument, self).__init__(**kwargs)
self.device = device
self.is_enabled = True
self.is_broken = False
def __str__(self):
return self.name
def __repr__(self):
return 'Instrument({})'.format(self.name)

109
wlauto/core/resolver.py Normal file
View File

@@ -0,0 +1,109 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Defines infrastructure for resource resolution. This is used to find
various dependencies/assets/etc that WA objects rely on in a flexible way.
"""
import logging
from collections import defaultdict
# Note: this is the modified louie library in wlauto/external.
# prioritylist does not exist in vanilla louie.
from louie.prioritylist import PriorityList # pylint: disable=E0611,F0401
from wlauto.exceptions import ResourceError
class ResourceResolver(object):
"""
Discovers and registers getters, and then handles requests for
resources using registered getters.
"""
def __init__(self, config):
self.logger = logging.getLogger(self.__class__.__name__)
self.getters = defaultdict(PriorityList)
self.config = config
def load(self):
"""
Discover getters under the specified source. The source could
be either a python package/module or a path.
"""
for rescls in self.config.ext_loader.list_resource_getters():
getter = self.config.get_extension(rescls.name, self)
getter.register()
def get(self, resource, strict=True, *args, **kwargs):
"""
Uses registered getters to attempt to discover a resource of the specified
kind and matching the specified criteria. Returns path to the resource that
has been discovered. If a resource has not been discovered, this will raise
a ``ResourceError`` or, if ``strict`` has been set to ``False``, will return
``None``.
"""
self.logger.debug('Resolving {}'.format(resource))
for getter in self.getters[resource.name]:
self.logger.debug('Trying {}'.format(getter))
result = getter.get(resource, *args, **kwargs)
if result is not None:
self.logger.debug('Resource {} found using {}'.format(resource, getter))
return result
if strict:
raise ResourceError('{} could not be found'.format(resource))
self.logger.debug('Resource {} not found.'.format(resource))
return None
def register(self, getter, kind, priority=0):
"""
Register the specified resource getter as being able to discover a resource
of the specified kind with the specified priority.
This method would typically be invoked by a getter inside its __init__.
The idea being that getters register themselves for resources they know
they can discover.
*priorities*
getters that are registered with the highest priority will be invoked first. If
multiple getters are registered under the same priority, they will be invoked
in the order they were registered (i.e. in the order they were discovered). This is
essentially non-deterministic.
Generally getters that are more likely to find a resource, or would find a
"better" version of the resource should register with higher (positive) priorities.
Fall-back getters that should only be invoked if a resource is not found by usual
means should register with lower (negative) priorities.
"""
self.logger.debug('Registering {}'.format(getter.name))
self.getters[kind].add(getter, priority)
def unregister(self, getter, kind):
"""
Unregister a getter that has been registered earlier.
"""
self.logger.debug('Unregistering {}'.format(getter.name))
try:
self.getters[kind].remove(getter)
except ValueError:
raise ValueError('Resource getter {} is not installed.'.format(getter.name))

182
wlauto/core/resource.py Normal file
View File

@@ -0,0 +1,182 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.core.extension import Extension
class GetterPriority(object):
"""
Enumerates standard ResourceGetter priorities. In general, getters should register
under one of these, rather than specifying other priority values.
:cached: The cached version of the resource. Look here first. This priority also implies
that the resource at this location is a "cache" and is not the only version of the
resource, so it may be cleared without losing access to the resource.
:preferred: Take this resource in favour of the environment resource.
:environment: Found somewhere under ~/.workload_automation/ or equivalent, or
from environment variables, external configuration files, etc.
These will override resource supplied with the package.
:external_package: Resource provided by another package.
:package: Resource provided with the package.
:remote: Resource will be downloaded from a remote location (such as an HTTP server
or a samba share). Try this only if no other getter was successful.
"""
cached = 20
preferred = 10
environment = 0
external_package = -5
package = -10
remote = -20
class Resource(object):
"""
Represents a resource that needs to be resolved. This can be pretty much
anything: a file, environment variable, a Python object, etc. The only thing
a resource *has* to have is an owner (which would normally be the
Workload/Instrument/Device/etc object that needs the resource). In addition,
a resource have any number of attributes to identify, but all of them are resource
type specific.
"""
name = None
def __init__(self, owner):
self.owner = owner
def delete(self, instance):
"""
Delete an instance of this resource type. This must be implemented by the concrete
subclasses based on what the resource looks like, e.g. deleting a file or a directory
tree, or removing an entry from a database.
:note: Implementation should *not* contain any logic for deciding whether or not
a resource should be deleted, only the actual deletion. The assumption is
that if this method is invoked, then the decision has already been made.
"""
raise NotImplementedError()
def __str__(self):
return '<{}\'s {}>'.format(self.owner, self.name)
class ResourceGetter(Extension):
"""
Base class for implementing resolvers. Defines resolver interface. Resolvers are
responsible for discovering resources (such as particular kinds of files) they know
about based on the parameters that are passed to them. Each resolver also has a dict of
attributes that describe it's operation, and may be used to determine which get invoked.
There is no pre-defined set of attributes and resolvers may define their own.
Class attributes:
:name: Name that uniquely identifies this getter. Must be set by any concrete subclass.
:resource_type: Identifies resource type(s) that this getter can handle. This must
be either a string (for a single type) or a list of strings for
multiple resource types. This must be set by any concrete subclass.
:priority: Priority with which this getter will be invoked. This should be one of
the standard priorities specified in ``GetterPriority`` enumeration. If not
set, this will default to ``GetterPriority.environment``.
"""
name = None
resource_type = None
priority = GetterPriority.environment
def __init__(self, resolver, **kwargs):
super(ResourceGetter, self).__init__(**kwargs)
self.resolver = resolver
def register(self):
"""
Registers with a resource resolver. Concrete implementations must override this
to invoke ``self.resolver.register()`` method to register ``self`` for specific
resource types.
"""
if self.resource_type is None:
raise ValueError('No resource type specified for {}'.format(self.name))
elif isinstance(self.resource_type, list):
for rt in self.resource_type:
self.resolver.register(self, rt, self.priority)
else:
self.resolver.register(self, self.resource_type, self.priority)
def unregister(self):
"""Unregister from a resource resolver."""
if self.resource_type is None:
raise ValueError('No resource type specified for {}'.format(self.name))
elif isinstance(self.resource_type, list):
for rt in self.resource_type:
self.resolver.unregister(self, rt)
else:
self.resolver.unregister(self, self.resource_type)
def get(self, resource, **kwargs):
"""
This will get invoked by the resolver when attempting to resolve a resource, passing
in the resource to be resolved as the first parameter. Any additional parameters would
be specific to a particular resource type.
This method will only be invoked for resource types that the getter has registered for.
:param resource: an instance of :class:`wlauto.core.resource.Resource`.
:returns: Implementations of this method must return either the discovered resource or
``None`` if the resource could not be discovered.
"""
raise NotImplementedError()
def delete(self, resource, *args, **kwargs):
"""
Delete the resource if it is discovered. All arguments are passed to a call
to``self.get()``. If that call returns a resource, it is deleted.
:returns: ``True`` if the specified resource has been discovered and deleted,
and ``False`` otherwise.
"""
discovered = self.get(resource, *args, **kwargs)
if discovered:
resource.delete(discovered)
return True
else:
return False
def __str__(self):
return '<ResourceGetter {}>'.format(self.name)
class __NullOwner(object):
"""Represents an owner for a resource not owned by anyone."""
name = 'noone'
def __getattr__(self, name):
return None
def __str__(self):
return 'no-one'
__repr__ = __str__
NO_ONE = __NullOwner()

321
wlauto/core/result.py Normal file
View File

@@ -0,0 +1,321 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=no-member
"""
This module defines the classes used to handle result
processing inside Workload Automation. There will be a
:class:`wlauto.core.workload.WorkloadResult` object generated for
every workload iteration executed. This object will have a list of
:class:`wlauto.core.workload.WorkloadMetric` objects. This list will be
populated by the workload itself and may also be updated by instrumentation
(e.g. to add power measurements). Once the result object has been fully
populated, it will be passed into the ``process_iteration_result`` method of
:class:`ResultProcessor`. Once the entire run has completed, a list containing
result objects from all iterations will be passed into ``process_results``
method of :class`ResultProcessor`.
Which result processors will be active is defined by the ``result_processors``
list in the ``~/.workload_automation/config.py``. Only the result_processors
who's names appear in this list will be used.
A :class:`ResultsManager` keeps track of active results processors.
"""
import logging
import traceback
from copy import copy
from contextlib import contextmanager
from datetime import datetime
from wlauto.core.extension import Extension
from wlauto.exceptions import WAError
from wlauto.utils.types import numeric
from wlauto.utils.misc import enum_metaclass
class ResultManager(object):
"""
Keeps track of result processors and passes on the results onto the individual processors.
"""
def __init__(self):
self.logger = logging.getLogger('ResultsManager')
self.processors = []
self._bad = []
def install(self, processor):
self.logger.debug('Installing results processor %s', processor.name)
self.processors.append(processor)
def uninstall(self, processor):
if processor in self.processors:
self.logger.debug('Uninstalling results processor %s', processor.name)
self.processors.remove(processor)
else:
self.logger.warning('Attempting to uninstall results processor %s, which is not installed.',
processor.name)
def initialize(self, context):
# Errors aren't handled at this stage, because this gets executed
# before workload execution starts and we just want to propagte them
# and terminate (so that error can be corrected and WA restarted).
for processor in self.processors:
processor.initialize(context)
def add_result(self, result, context):
with self._manage_processors(context):
for processor in self.processors:
with self._handle_errors(processor):
processor.process_iteration_result(result, context)
for processor in self.processors:
with self._handle_errors(processor):
processor.export_iteration_result(result, context)
def process_run_result(self, result, context):
with self._manage_processors(context):
for processor in self.processors:
with self._handle_errors(processor):
processor.process_run_result(result, context)
for processor in self.processors:
with self._handle_errors(processor):
processor.export_run_result(result, context)
def finalize(self, context):
with self._manage_processors(context):
for processor in self.processors:
with self._handle_errors(processor):
processor.finalize(context)
def validate(self):
for processor in self.processors:
processor.validate()
@contextmanager
def _manage_processors(self, context, finalize_bad=True):
yield
for processor in self._bad:
if finalize_bad:
processor.finalize(context)
self.uninstall(processor)
self._bad = []
@contextmanager
def _handle_errors(self, processor):
try:
yield
except KeyboardInterrupt, e:
raise e
except WAError, we:
self.logger.error('"{}" result processor has encountered an error'.format(processor.name))
self.logger.error('{}("{}")'.format(we.__class__.__name__, we.message))
self._bad.append(processor)
except Exception, e: # pylint: disable=W0703
self.logger.error('"{}" result processor has encountered an error'.format(processor.name))
self.logger.error('{}("{}")'.format(e.__class__.__name__, e))
self.logger.error(traceback.format_exc())
self._bad.append(processor)
class ResultProcessor(Extension):
"""
Base class for result processors. Defines an interface that should be implemented
by the subclasses. A result processor can be used to do any kind of post-processing
of the results, from writing them out to a file, to uploading them to a database,
performing calculations, generating plots, etc.
"""
def initialize(self, context):
pass
def process_iteration_result(self, result, context):
pass
def export_iteration_result(self, result, context):
pass
def process_run_result(self, result, context):
pass
def export_run_result(self, result, context):
pass
def finalize(self, context):
pass
class RunResult(object):
"""
Contains overall results for a run.
"""
__metaclass__ = enum_metaclass('values', return_name=True)
values = [
'OK',
'OKISH',
'PARTIAL',
'FAILED',
'UNKNOWN',
]
@property
def status(self):
if not self.iteration_results or all([s.status == IterationResult.FAILED for s in self.iteration_results]):
return self.FAILED
elif any([s.status == IterationResult.FAILED for s in self.iteration_results]):
return self.PARTIAL
elif any([s.status == IterationResult.ABORTED for s in self.iteration_results]):
return self.PARTIAL
elif (any([s.status == IterationResult.PARTIAL for s in self.iteration_results]) or
self.non_iteration_errors):
return self.OKISH
elif all([s.status == IterationResult.OK for s in self.iteration_results]):
return self.OK
else:
return self.UNKNOWN # should never happen
def __init__(self, run_info):
self.info = run_info
self.iteration_results = []
self.artifacts = []
self.events = []
self.non_iteration_errors = False
class RunEvent(object):
"""
An event that occured during a run.
"""
def __init__(self, message):
self.timestamp = datetime.utcnow()
self.message = message
def to_dict(self):
return copy(self.__dict__)
def __str__(self):
return '{} {}'.format(self.timestamp, self.message)
__repr__ = __str__
class IterationResult(object):
"""
Contains the result of running a single iteration of a workload. It is the
responsibility of a workload to instantiate a IterationResult, populate it,
and return it form its get_result() method.
Status explanations:
:NOT_STARTED: This iteration has not yet started.
:RUNNING: This iteration is currently running and no errors have been detected.
:OK: This iteration has completed and no errors have been detected
:PARTIAL: One or more instruments have failed (the iteration may still be running).
:FAILED: The workload itself has failed.
:ABORTED: The user interupted the workload
:SKIPPED: The iteration was skipped due to a previous failure
"""
__metaclass__ = enum_metaclass('values', return_name=True)
values = [
'NOT_STARTED',
'RUNNING',
'OK',
'NONCRITICAL',
'PARTIAL',
'FAILED',
'ABORTED',
'SKIPPED',
]
def __init__(self, spec):
self.spec = spec
self.id = spec.id
self.workload = spec.workload
self.iteration = None
self.status = self.NOT_STARTED
self.events = []
self.metrics = []
self.artifacts = []
def add_metric(self, name, value, units=None, lower_is_better=False):
self.metrics.append(Metric(name, value, units, lower_is_better))
def has_metric(self, name):
for metric in self.metrics:
if metric.name == name:
return True
return False
def add_event(self, message):
self.events.append(RunEvent(message))
def to_dict(self):
d = copy(self.__dict__)
d['events'] = [e.to_dict() for e in self.events]
return d
def __iter__(self):
return iter(self.metrics)
def __getitem__(self, name):
for metric in self.metrics:
if metric.name == name:
return metric
raise KeyError('Metric {} not found.'.format(name))
class Metric(object):
"""
This is a single metric collected from executing a workload.
:param name: the name of the metric. Uniquely identifies the metric
within the results.
:param value: The numerical value of the metric for this execution of
a workload. This can be either an int or a float.
:param units: Units for the collected value. Can be None if the value
has no units (e.g. it's a count or a standardised score).
:param lower_is_better: Boolean flag indicating where lower values are
better than higher ones. Defaults to False.
"""
def __init__(self, name, value, units=None, lower_is_better=False):
self.name = name
self.value = numeric(value)
self.units = units
self.lower_is_better = lower_is_better
def to_dict(self):
return self.__dict__
def __str__(self):
result = '{}: {}'.format(self.name, self.value)
if self.units:
result += ' ' + self.units
result += ' ({})'.format('-' if self.lower_is_better else '+')
return '<{}>'.format(result)
__repr__ = __str__

189
wlauto/core/signal.py Normal file
View File

@@ -0,0 +1,189 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module wraps louie signalling mechanism. It relies on modified version of loiue
that has prioritization added to handler invocation.
"""
from louie import dispatcher # pylint: disable=F0401
class Signal(object):
"""
This class implements the signals to be used for notifiying callbacks
registered to respond to different states and stages of the execution of workload
automation.
"""
def __init__(self, name, invert_priority=False):
"""
Instantiates a Signal.
:param name: name is the identifier of the Signal object. Signal instances with
the same name refer to the same execution stage/stage.
:param invert_priority: boolean parameter that determines whether multiple
callbacks for the same signal should be ordered with
ascending or descending priorities. Typically this flag
should be set to True if the Signal is triggered AFTER an
a state/stage has been reached. That way callbacks with high
priorities will be called right after the event has occured.
"""
self.name = name
self.invert_priority = invert_priority
def __str__(self):
return self.name
__repr__ = __str__
def __hash__(self):
return id(self.name)
# These are paired events -- if the before_event is sent, the after_ signal is
# guaranteed to also be sent. In particular, the after_ signals will be sent
# even if there is an error, so you cannot assume in the handler that the
# device has booted successfully. In most cases, you should instead use the
# non-paired signals below.
BEFORE_FLASHING = Signal('before-flashing-signal', invert_priority=True)
SUCCESSFUL_FLASHING = Signal('successful-flashing-signal')
AFTER_FLASHING = Signal('after-flashing-signal')
BEFORE_BOOT = Signal('before-boot-signal', invert_priority=True)
SUCCESSFUL_BOOT = Signal('successful-boot-signal')
AFTER_BOOT = Signal('after-boot-signal')
BEFORE_INITIAL_BOOT = Signal('before-initial-boot-signal', invert_priority=True)
SUCCESSFUL_INITIAL_BOOT = Signal('successful-initial-boot-signal')
AFTER_INITIAL_BOOT = Signal('after-initial-boot-signal')
BEFORE_FIRST_ITERATION_BOOT = Signal('before-first-iteration-boot-signal', invert_priority=True)
SUCCESSFUL_FIRST_ITERATION_BOOT = Signal('successful-first-iteration-boot-signal')
AFTER_FIRST_ITERATION_BOOT = Signal('after-first-iteration-boot-signal')
BEFORE_WORKLOAD_SETUP = Signal('before-workload-setup-signal', invert_priority=True)
SUCCESSFUL_WORKLOAD_SETUP = Signal('successful-workload-setup-signal')
AFTER_WORKLOAD_SETUP = Signal('after-workload-setup-signal')
BEFORE_WORKLOAD_EXECUTION = Signal('before-workload-execution-signal', invert_priority=True)
SUCCESSFUL_WORKLOAD_EXECUTION = Signal('successful-workload-execution-signal')
AFTER_WORKLOAD_EXECUTION = Signal('after-workload-execution-signal')
BEFORE_WORKLOAD_RESULT_UPDATE = Signal('before-iteration-result-update-signal', invert_priority=True)
SUCCESSFUL_WORKLOAD_RESULT_UPDATE = Signal('successful-iteration-result-update-signal')
AFTER_WORKLOAD_RESULT_UPDATE = Signal('after-iteration-result-update-signal')
BEFORE_WORKLOAD_TEARDOWN = Signal('before-workload-teardown-signal', invert_priority=True)
SUCCESSFUL_WORKLOAD_TEARDOWN = Signal('successful-workload-teardown-signal')
AFTER_WORKLOAD_TEARDOWN = Signal('after-workload-teardown-signal')
BEFORE_OVERALL_RESULTS_PROCESSING = Signal('before-overall-results-process-signal', invert_priority=True)
SUCCESSFUL_OVERALL_RESULTS_PROCESSING = Signal('successful-overall-results-process-signal')
AFTER_OVERALL_RESULTS_PROCESSING = Signal('after-overall-results-process-signal')
# These are the not-paired signals; they are emitted independently. E.g. the
# fact that RUN_START was emitted does not mean run end will be.
RUN_START = Signal('start-signal', invert_priority=True)
RUN_END = Signal('end-signal')
WORKLOAD_SPEC_START = Signal('workload-spec-start-signal', invert_priority=True)
WORKLOAD_SPEC_END = Signal('workload-spec-end-signal')
ITERATION_START = Signal('iteration-start-signal', invert_priority=True)
ITERATION_END = Signal('iteration-end-signal')
RUN_INIT = Signal('run-init-signal')
SPEC_INIT = Signal('spec-init-signal')
ITERATION_INIT = Signal('iteration-init-signal')
RUN_FIN = Signal('run-fin-signal')
# These signals are used by the LoggerFilter to tell about logging events
ERROR_LOGGED = Signal('error_logged')
WARNING_LOGGED = Signal('warning_logged')
def connect(handler, signal, sender=dispatcher.Any, priority=0):
"""
Connects a callback to a signal, so that the callback will be automatically invoked
when that signal is sent.
Parameters:
:handler: This can be any callable that that takes the right arguments for
the signal. For most siginals this means a single argument that
will be an ``ExecutionContext`` instance. But please see documentaion
for individual signals in the :ref:`signals reference <instrumentation_method_map>`.
:signal: The signal to which the hanlder will be subscribed. Please see
:ref:`signals reference <instrumentation_method_map>` for the list of standard WA
signals.
.. note:: There is nothing that prevents instrumentation from sending their
own signals that are not part of the standard set. However the signal
must always be an :class:`wlauto.core.signal.Signal` instance.
:sender: The handler will be invoked only for the signals emitted by this sender. By
default, this is set to :class:`louie.dispatcher.Any`, so the handler will
be invoked for signals from any sentder.
:priority: An integer (positive or negative) the specifies the priority of the handler.
Handlers with higher priority will be called before handlers with lower
priority. The call order of handlers with the same priority is not specified.
Defaults to 0.
.. note:: Priorities for some signals are inverted (so highest priority
handlers get executed last). Please see :ref:`signals reference <instrumentation_method_map>`
for details.
"""
if signal.invert_priority:
dispatcher.connect(handler, signal, sender, priority=-priority) # pylint: disable=E1123
else:
dispatcher.connect(handler, signal, sender, priority=priority) # pylint: disable=E1123
def disconnect(handler, signal, sender=dispatcher.Any):
"""
Disconnect a previously connected handler form the specified signal, optionally, only
for the specified sender.
Parameters:
:handler: The callback to be disconnected.
:signal: The signal the handler is to be disconnected form. It will
be an :class:`wlauto.core.signal.Signal` instance.
:sender: If specified, the handler will only be disconnected from the signal
sent by this sender.
"""
dispatcher.disconnect(handler, signal, sender)
def send(signal, sender, *args, **kwargs):
"""
Sends a signal, causing connected handlers to be invoked.
Paramters:
:signal: Signal to be sent. This must be an instance of :class:`wlauto.core.signal.Signal`
or its subclasses.
:sender: The sender of the signal (typically, this would be ``self``). Some handlers may only
be subscribed to signals from a particular sender.
The rest of the parameters will be passed on as aruments to the handler.
"""
dispatcher.send(signal, sender, *args, **kwargs)

26
wlauto/core/version.py Normal file
View File

@@ -0,0 +1,26 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision'])
version = VersionTuple(2, 3, 0)
def get_wa_version():
version_string = '{}.{}.{}'.format(version.major, version.minor, version.revision)
return version_string

94
wlauto/core/workload.py Normal file
View File

@@ -0,0 +1,94 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A workload is the unit of execution. It represents a set of activities are are performed
and measured together, as well as the necessary setup and teardown procedures. A single
execution of a workload produces one :class:`wlauto.core.result.WorkloadResult` that is populated with zero or more
:class:`wlauto.core.result.WorkloadMetric`\ s and/or
:class:`wlauto.core.result.Artifact`\s by the workload and active instrumentation.
"""
from wlauto.core.extension import Extension
from wlauto.exceptions import WorkloadError
class Workload(Extension):
"""
This is the base class for the workloads executed by the framework.
Each of the methods throwing NotImplementedError *must* be implemented
by the derived classes.
"""
supported_devices = []
supported_platforms = []
summary_metrics = []
def __init__(self, device, **kwargs):
"""
Creates a new Workload.
:param device: the Device on which the workload will be executed.
"""
super(Workload, self).__init__(**kwargs)
if self.supported_devices and device.name not in self.supported_devices:
raise WorkloadError('Workload {} does not support device {}'.format(self.name, device.name))
if self.supported_platforms and device.platform not in self.supported_platforms:
raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.platform))
self.device = device
def init_resources(self, context):
"""
May be optionally overridden by concrete instances in order to discover and initialise
necessary resources. This method will be invoked at most once during the execution:
before running any workloads, and before invocation of ``validate()``, but after it is
clear that this workload will run (i.e. this method will not be invoked for workloads
that have been discovered but have not been scheduled run in the agenda).
"""
pass
def setup(self, context):
"""
Perform the setup necessary to run the workload, such as copying the necessry files
to the device, configuring the environments, etc.
This is also the place to perform any on-device checks prior to attempting to execute
the workload.
"""
pass
def run(self, context):
"""Execute the workload. This is the method that performs the actual "work" of the"""
pass
def update_result(self, context):
"""
Update the result within the specified execution context with the metrics
form this workload iteration.
"""
pass
def teardown(self, context):
""" Perform any final clean up for the Workload. """
pass
def __str__(self):
return '<Workload {}>'.format(self.name)

View File

@@ -0,0 +1,16 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

View File

@@ -0,0 +1,16 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

View File

@@ -0,0 +1,37 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto import AndroidDevice, Parameter
class GenericDevice(AndroidDevice):
name = 'generic_android'
description = """
Generic Android device. Use this if you do not have a device file for
your device.
This implements the minimum functionality that should be supported by
all android devices.
"""
default_working_directory = '/storage/sdcard0/working'
has_gpu = True
parameters = [
Parameter('core_names', default=[], override=True),
Parameter('core_clusters', default=[], override=True),
]

View File

@@ -0,0 +1,173 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import os
import re
import time
import pexpect
from wlauto import BigLittleDevice, Parameter
from wlauto.exceptions import DeviceError
from wlauto.utils.serial_port import open_serial_connection, pulse_dtr
from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices
from wlauto.utils.uefi import UefiMenu
AUTOSTART_MESSAGE = 'Press Enter to stop auto boot...'
class Juno(BigLittleDevice):
name = 'juno'
description = """
ARM Juno next generation big.LITTLE development platform.
"""
capabilities = ['reset_power']
has_gpu = True
modules = [
'vexpress',
]
parameters = [
Parameter('retries', kind=int, default=2,
description="""Specifies the number of times the device will attempt to recover
(normally, with a hard reset) if it detects that something went wrong."""),
# VExpress flasher expects a device to have these:
Parameter('uefi_entry', default='WA',
description='The name of the entry to use (will be created if does not exist).'),
Parameter('microsd_mount_point', default='/media/JUNO',
description='Location at which the device\'s MicroSD card will be mounted.'),
Parameter('port', default='/dev/ttyS0', description='Serial port on which the device is connected.'),
Parameter('baudrate', kind=int, default=115200, description='Serial connection baud.'),
Parameter('timeout', kind=int, default=300, description='Serial connection timeout.'),
Parameter('core_names', default=['a53', 'a53', 'a53', 'a53', 'a57', 'a57'], override=True),
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1], override=True),
]
short_delay = 1
firmware_prompt = 'Cmd>'
# this is only used if there is no UEFI entry and one has to be created.
kernel_arguments = 'console=ttyAMA0,115200 earlyprintk=pl011,0x7ff80000 verbose debug init=/init root=/dev/sda1 rw ip=dhcp rootwait'
def boot(self, **kwargs):
self.logger.debug('Resetting the device.')
self.reset()
with open_serial_connection(port=self.port,
baudrate=self.baudrate,
timeout=self.timeout,
init_dtr=0) as target:
menu = UefiMenu(target)
self.logger.debug('Waiting for UEFI menu...')
menu.open(timeout=120)
try:
menu.select(self.uefi_entry)
except LookupError:
self.logger.debug('{} UEFI entry not found.'.format(self.uefi_entry))
self.logger.debug('Attempting to create one using default flasher configuration.')
self.flasher.image_args = self.kernel_arguments
self.flasher.create_uefi_enty(self, menu)
menu.select(self.uefi_entry)
self.logger.debug('Waiting for the Android prompt.')
target.expect(self.android_prompt, timeout=self.timeout)
def connect(self):
if not self._is_ready:
if not self.adb_name: # pylint: disable=E0203
with open_serial_connection(timeout=self.timeout,
port=self.port,
baudrate=self.baudrate,
init_dtr=0) as target:
target.sendline('')
self.logger.debug('Waiting for android prompt.')
target.expect(self.android_prompt)
self.logger.debug('Waiting for IP address...')
wait_start_time = time.time()
while True:
target.sendline('ip addr list eth0')
time.sleep(1)
try:
target.expect('inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
self.adb_name = target.match.group(1) + ':5555' # pylint: disable=W0201
break
except pexpect.TIMEOUT:
pass # We have our own timeout -- see below.
if (time.time() - wait_start_time) > self.ready_timeout:
raise DeviceError('Could not acquire IP address.')
if self.adb_name in adb_list_devices():
adb_disconnect(self.adb_name)
adb_connect(self.adb_name, timeout=self.timeout)
super(Juno, self).connect() # wait for boot to complete etc.
self._is_ready = True
def disconnect(self):
if self._is_ready:
super(Juno, self).disconnect()
adb_disconnect(self.adb_name)
self._is_ready = False
def reset(self):
# Currently, reboot is not working in Android on Juno, so
# perfrom a ahard reset instead
self.hard_reset()
def get_cpuidle_states(self, cpu=0):
return {}
def hard_reset(self):
self.disconnect()
self.adb_name = None # Force re-acquire IP address on reboot. pylint: disable=attribute-defined-outside-init
with open_serial_connection(port=self.port,
baudrate=self.baudrate,
timeout=self.timeout,
init_dtr=0,
get_conn=True) as (target, conn):
pulse_dtr(conn, state=True, duration=0.1) # TRM specifies a pulse of >=100ms
i = target.expect([AUTOSTART_MESSAGE, self.firmware_prompt])
if i:
self.logger.debug('Saw firmware prompt.')
time.sleep(self.short_delay)
target.sendline('reboot')
else:
self.logger.debug('Saw auto boot message.')
def wait_for_microsd_mount_point(self, target, timeout=100):
attempts = 1 + self.retries
if os.path.exists(os.path.join(self.microsd_mount_point, 'config.txt')):
return
self.logger.debug('Waiting for VExpress MicroSD to mount...')
for i in xrange(attempts):
if i: # Do not reboot on the first attempt.
target.sendline('reboot')
for _ in xrange(timeout):
time.sleep(self.short_delay)
if os.path.exists(os.path.join(self.microsd_mount_point, 'config.txt')):
return
raise DeviceError('Did not detect MicroSD mount on {}'.format(self.microsd_mount_point))
def get_android_id(self):
# Android ID currenlty not set properly in Juno Android builds.
return 'abad1deadeadbeef'

View File

@@ -0,0 +1,48 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from wlauto import AndroidDevice, Parameter
class Nexus10Device(AndroidDevice):
name = 'Nexus10'
description = """
Nexus10 is a 10 inch tablet device, which has dual-core A15.
To be able to use Nexus10 in WA, the following must be true:
- USB Debugging Mode is enabled.
- Generate USB debugging authorisation for the host machine
"""
default_working_directory = '/sdcard/working'
has_gpu = True
max_cores = 2
parameters = [
Parameter('core_names', default=['A15', 'A15'], override=True),
Parameter('core_clusters', default=[0, 0], override=True),
]
def init(self, context, *args, **kwargs):
time.sleep(self.long_delay)
self.execute('svc power stayon true', check_exit_code=False)
time.sleep(self.long_delay)
self.execute('input keyevent 82')

View File

@@ -0,0 +1,40 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto import AndroidDevice, Parameter
class Nexus5Device(AndroidDevice):
name = 'Nexus5'
description = """
Adapter for Nexus 5.
To be able to use Nexus5 in WA, the following must be true:
- USB Debugging Mode is enabled.
- Generate USB debugging authorisation for the host machine
"""
default_working_directory = '/storage/sdcard0/working'
has_gpu = True
max_cores = 4
parameters = [
Parameter('core_names', default=['krait400', 'krait400', 'krait400', 'krait400'], override=True),
Parameter('core_clusters', default=[0, 0, 0, 0], override=True),
]

View File

@@ -0,0 +1,76 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from wlauto import AndroidDevice, Parameter
from wlauto.exceptions import TimeoutError
from wlauto.utils.android import adb_shell
class Note3Device(AndroidDevice):
name = 'Note3'
description = """
Adapter for Galaxy Note 3.
To be able to use Note3 in WA, the following must be true:
- USB Debugging Mode is enabled.
- Generate USB debugging authorisation for the host machine
"""
parameters = [
Parameter('core_names', default=['A15', 'A15', 'A15', 'A15'], override=True),
Parameter('core_clusters', default=[0, 0, 0, 0], override=True),
Parameter('working_directory', default='/storage/sdcard0/wa-working', override=True),
]
def __init__(self, **kwargs):
super(Note3Device, self).__init__(**kwargs)
self._just_rebooted = False
def init(self, context):
self.execute('svc power stayon true', check_exit_code=False)
def reset(self):
super(Note3Device, self).reset()
self._just_rebooted = True
def hard_reset(self):
super(Note3Device, self).hard_reset()
self._just_rebooted = True
def connect(self): # NOQA pylint: disable=R0912
super(Note3Device, self).connect()
if self._just_rebooted:
self.logger.debug('Waiting for boot to complete...')
# On the Note 3, adb connection gets reset some time after booting.
# This causes errors during execution. To prevent this, open a shell
# session and wait for it to be killed. Once its killed, give adb
# enough time to restart, and then the device should be ready.
try:
adb_shell(self.adb_name, '', timeout=20) # pylint: disable=no-member
time.sleep(5) # give adb time to re-initialize
except TimeoutError:
pass # timed out waiting for the session to be killed -- assume not going to be.
self.logger.debug('Boot completed.')
self._just_rebooted = False
# Swipe upwards to unlock the screen.
time.sleep(self.long_delay)
self.execute('input touchscreen swipe 540 1600 560 800 ')

View File

@@ -0,0 +1,38 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto import AndroidDevice, Parameter
class OdroidXU3(AndroidDevice):
name = "odroidxu3"
description = 'HardKernel Odroid XU3 development board.'
core_modules = [
'odroidxu3-fan',
]
parameters = [
Parameter('adb_name', default='BABABEEFBABABEEF', override=True),
Parameter('working_directory', default='/data/local/wa-working', override=True),
Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True),
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True),
Parameter('port', default='/dev/ttyUSB0', kind=str,
description='Serial port on which the device is connected'),
Parameter('baudrate', default=115200, kind=int, description='Serial connection baud rate'),
]

View File

@@ -0,0 +1,847 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import re
import string
import shutil
import time
from collections import Counter
import pexpect
from wlauto import BigLittleDevice, RuntimeParameter, Parameter, settings
from wlauto.exceptions import ConfigError, DeviceError
from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices
from wlauto.utils.serial_port import open_serial_connection
from wlauto.utils.misc import merge_dicts
from wlauto.utils.types import boolean
BOOT_FIRMWARE = {
'uefi': {
'SCC_0x010': '0x000003E0',
'reboot_attempts': 0,
},
'bootmon': {
'SCC_0x010': '0x000003D0',
'reboot_attempts': 2,
},
}
MODES = {
'mp_a7_only': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a7',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x1032F003',
'cpus': ['a7', 'a7', 'a7'],
},
'mp_a7_bootcluster': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a7bc',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x1032F003',
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
},
'mp_a15_only': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a15',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x0032F003',
'cpus': ['a15', 'a15'],
},
'mp_a15_bootcluster': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a15bc',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x0032F003',
'cpus': ['a15', 'a15', 'a7', 'a7', 'a7'],
},
'iks_cpu': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x1032F003',
'cpus': ['a7', 'a7'],
},
'iks_a15': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a15', 'a15'],
},
'iks_a7': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a7', 'a7'],
},
'iks_ns_a15': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
},
'iks_ns_a7': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
},
}
A7_ONLY_MODES = ['mp_a7_only', 'iks_a7', 'iks_cpu']
A15_ONLY_MODES = ['mp_a15_only', 'iks_a15']
DEFAULT_A7_GOVERNOR_TUNABLES = {
'interactive': {
'above_hispeed_delay': 80000,
'go_hispeed_load': 85,
'hispeed_freq': 800000,
'min_sample_time': 80000,
'timer_rate': 20000,
},
'ondemand': {
'sampling_rate': 50000,
},
}
DEFAULT_A15_GOVERNOR_TUNABLES = {
'interactive': {
'above_hispeed_delay': 80000,
'go_hispeed_load': 85,
'hispeed_freq': 1000000,
'min_sample_time': 80000,
'timer_rate': 20000,
},
'ondemand': {
'sampling_rate': 50000,
},
}
ADB_SHELL_TIMEOUT = 30
class _TC2DeviceConfig(object):
name = 'TC2 Configuration'
device_name = 'TC2'
def __init__(self, # pylint: disable=R0914,W0613
root_mount='/media/VEMSD',
disable_boot_configuration=False,
boot_firmware=None,
mode=None,
fs_medium='usb',
device_working_directory='/data/local/usecase',
bm_image='bm_v519r.axf',
serial_device='/dev/ttyS0',
serial_baud=38400,
serial_max_timeout=600,
serial_log=sys.stdout,
init_timeout=120,
always_delete_uefi_entry=True,
psci_enable=True,
host_working_directory=None,
a7_governor_tunables=None,
a15_governor_tunables=None,
adb_name=None,
# Compatibility with other android devices.
enable_screen_check=None, # pylint: disable=W0613
**kwargs
):
self.root_mount = root_mount
self.disable_boot_configuration = disable_boot_configuration
if not disable_boot_configuration:
self.boot_firmware = boot_firmware or 'uefi'
self.default_mode = mode or 'mp_a7_bootcluster'
elif boot_firmware or mode:
raise ConfigError('boot_firmware and/or mode cannot be specified when disable_boot_configuration is enabled.')
self.mode = self.default_mode
self.working_directory = device_working_directory
self.serial_device = serial_device
self.serial_baud = serial_baud
self.serial_max_timeout = serial_max_timeout
self.serial_log = serial_log
self.bootmon_prompt = re.compile('^([KLM]:\\\)?>', re.MULTILINE)
self.fs_medium = fs_medium.lower()
self.bm_image = bm_image
self.init_timeout = init_timeout
self.always_delete_uefi_entry = always_delete_uefi_entry
self.psci_enable = psci_enable
self.resource_dir = os.path.join(os.path.dirname(__file__), 'resources')
self.board_dir = os.path.join(self.root_mount, 'SITE1', 'HBI0249A')
self.board_file = 'board.txt'
self.board_file_bak = 'board.bak'
self.images_file = 'images.txt'
self.host_working_directory = host_working_directory or settings.meta_directory
if not a7_governor_tunables:
self.a7_governor_tunables = DEFAULT_A7_GOVERNOR_TUNABLES
else:
self.a7_governor_tunables = merge_dicts(DEFAULT_A7_GOVERNOR_TUNABLES, a7_governor_tunables)
if not a15_governor_tunables:
self.a15_governor_tunables = DEFAULT_A15_GOVERNOR_TUNABLES
else:
self.a15_governor_tunables = merge_dicts(DEFAULT_A15_GOVERNOR_TUNABLES, a15_governor_tunables)
self.adb_name = adb_name
@property
def src_images_template_file(self):
return os.path.join(self.resource_dir, MODES[self.mode]['images_file'])
@property
def src_images_file(self):
return os.path.join(self.host_working_directory, 'images.txt')
@property
def src_board_template_file(self):
return os.path.join(self.resource_dir, 'board_template.txt')
@property
def src_board_file(self):
return os.path.join(self.host_working_directory, 'board.txt')
@property
def kernel_arguments(self):
kernel_args = ' console=ttyAMA0,38400 androidboot.console=ttyAMA0 selinux=0'
if self.fs_medium == 'usb':
kernel_args += ' androidboot.hardware=arm-versatileexpress-usb'
if 'iks' in self.mode:
kernel_args += ' no_bL_switcher=0'
return kernel_args
@property
def kernel(self):
return MODES[self.mode]['kernel']
@property
def initrd(self):
return MODES[self.mode]['initrd']
@property
def dtb(self):
return MODES[self.mode]['dtb']
@property
def SCC_0x700(self):
return MODES[self.mode]['SCC_0x700']
@property
def SCC_0x010(self):
return BOOT_FIRMWARE[self.boot_firmware]['SCC_0x010']
@property
def reboot_attempts(self):
return BOOT_FIRMWARE[self.boot_firmware]['reboot_attempts']
def validate(self):
valid_modes = MODES.keys()
if self.mode not in valid_modes:
message = 'Invalid mode: {}; must be in {}'.format(
self.mode, valid_modes)
raise ConfigError(message)
valid_boot_firmware = BOOT_FIRMWARE.keys()
if self.boot_firmware not in valid_boot_firmware:
message = 'Invalid boot_firmware: {}; must be in {}'.format(
self.boot_firmware,
valid_boot_firmware)
raise ConfigError(message)
if self.fs_medium not in ['usb', 'sdcard']:
message = 'Invalid filesystem medium: {} allowed values : usb, sdcard '.format(self.fs_medium)
raise ConfigError(message)
class TC2Device(BigLittleDevice):
name = 'TC2'
description = """
TC2 is a development board, which has three A7 cores and two A15 cores.
TC2 has a number of boot parameters which are:
:root_mount: Defaults to '/media/VEMSD'
:boot_firmware: It has only two boot firmware options, which are
uefi and bootmon. Defaults to 'uefi'.
:fs_medium: Defaults to 'usb'.
:device_working_directory: The direcitory that WA will be using to copy
files to. Defaults to 'data/local/usecase'
:serial_device: The serial device which TC2 is connected to. Defaults to
'/dev/ttyS0'.
:serial_baud: Defaults to 38400.
:serial_max_timeout: Serial timeout value in seconds. Defaults to 600.
:serial_log: Defaults to standard output.
:init_timeout: The timeout in seconds to init the device. Defaults set
to 30.
:always_delete_uefi_entry: If true, it will delete the ufi entry.
Defaults to True.
:psci_enable: Enabling the psci. Defaults to True.
:host_working_directory: The host working directory. Defaults to None.
:disable_boot_configuration: Disables boot configuration through images.txt and board.txt. When
this is ``True``, those two files will not be overwritten in VEMSD.
This option may be necessary if the firmware version in the ``TC2``
is not compatible with the templates in WA. Please note that enabling
this will prevent you form being able to set ``boot_firmware`` and
``mode`` parameters. Defaults to ``False``.
TC2 can also have a number of different booting mode, which are:
:mp_a7_only: Only the A7 cluster.
:mp_a7_bootcluster: Both A7 and A15 clusters, but it boots on A7
cluster.
:mp_a15_only: Only the A15 cluster.
:mp_a15_bootcluster: Both A7 and A15 clusters, but it boots on A15
clusters.
:iks_cpu: Only A7 cluster with only 2 cpus.
:iks_a15: Only A15 cluster.
:iks_a7: Same as iks_cpu
:iks_ns_a15: Both A7 and A15 clusters.
:iks_ns_a7: Both A7 and A15 clusters.
The difference between mp and iks is the scheduling policy.
TC2 takes the following runtime parameters
:a7_cores: Number of active A7 cores.
:a15_cores: Number of active A15 cores.
:a7_governor: CPUFreq governor for the A7 cluster.
:a15_governor: CPUFreq governor for the A15 cluster.
:a7_min_frequency: Minimum CPU frequency for the A7 cluster.
:a15_min_frequency: Minimum CPU frequency for the A15 cluster.
:a7_max_frequency: Maximum CPU frequency for the A7 cluster.
:a15_max_frequency: Maximum CPU frequency for the A7 cluster.
:irq_affinity: lambda x: Which cluster will receive IRQs.
:cpuidle: Whether idle states should be enabled.
:sysfile_values: A dict mapping a complete file path to the value that
should be echo'd into it. By default, the file will be
subsequently read to verify that the value was written
into it with DeviceError raised otherwise. For write-only
files, this check can be disabled by appending a ``!`` to
the end of the file path.
"""
has_gpu = False
a15_only_modes = A15_ONLY_MODES
a7_only_modes = A7_ONLY_MODES
not_configurable_modes = ['iks_a7', 'iks_cpu', 'iks_a15']
parameters = [
Parameter('core_names', mandatory=False, override=True,
description='This parameter will be ignored for TC2'),
Parameter('core_clusters', mandatory=False, override=True,
description='This parameter will be ignored for TC2'),
]
runtime_parameters = [
RuntimeParameter('irq_affinity', lambda d, x: d.set_irq_affinity(x.lower()), lambda: None),
RuntimeParameter('cpuidle', lambda d, x: d.enable_idle_states() if boolean(x) else d.disable_idle_states(),
lambda d: d.get_cpuidle())
]
def get_mode(self):
return self.config.mode
def set_mode(self, mode):
if self._has_booted:
raise DeviceError('Attempting to set boot mode when already booted.')
valid_modes = MODES.keys()
if mode is None:
mode = self.config.default_mode
if mode not in valid_modes:
message = 'Invalid mode: {}; must be in {}'.format(mode, valid_modes)
raise ConfigError(message)
self.config.mode = mode
mode = property(get_mode, set_mode)
def _get_core_names(self):
return MODES[self.mode]['cpus']
def _set_core_names(self, value):
pass
core_names = property(_get_core_names, _set_core_names)
def _get_core_clusters(self):
seen = set([])
core_clusters = []
cluster_id = -1
for core in MODES[self.mode]['cpus']:
if core not in seen:
seen.add(core)
cluster_id += 1
core_clusters.append(cluster_id)
return core_clusters
def _set_core_clusters(self, value):
pass
core_clusters = property(_get_core_clusters, _set_core_clusters)
@property
def cpu_cores(self):
return MODES[self.mode]['cpus']
@property
def max_a7_cores(self):
return Counter(MODES[self.mode]['cpus'])['a7']
@property
def max_a15_cores(self):
return Counter(MODES[self.mode]['cpus'])['a15']
@property
def a7_governor_tunables(self):
return self.config.a7_governor_tunables
@property
def a15_governor_tunables(self):
return self.config.a15_governor_tunables
def __init__(self, **kwargs):
super(TC2Device, self).__init__()
self.config = _TC2DeviceConfig(**kwargs)
self.working_directory = self.config.working_directory
self._serial = None
self._has_booted = None
def boot(self, **kwargs): # NOQA
mode = kwargs.get('os_mode', None)
self._is_ready = False
self._has_booted = False
self.mode = mode
self.logger.debug('Booting in {} mode'.format(self.mode))
with open_serial_connection(timeout=self.config.serial_max_timeout,
port=self.config.serial_device,
baudrate=self.config.serial_baud) as target:
if self.config.boot_firmware == 'bootmon':
self._boot_using_bootmon(target)
elif self.config.boot_firmware == 'uefi':
self._boot_using_uefi(target)
else:
message = 'Unexpected boot firmware: {}'.format(self.config.boot_firmware)
raise ConfigError(message)
try:
target.sendline('')
self.logger.debug('Waiting for the Android prompt.')
target.expect(self.android_prompt, timeout=40) # pylint: disable=E1101
except pexpect.TIMEOUT:
# Try a second time before giving up.
self.logger.debug('Did not get Android prompt, retrying...')
target.sendline('')
target.expect(self.android_prompt, timeout=10) # pylint: disable=E1101
self.logger.debug('Waiting for OS to initialize...')
started_waiting_time = time.time()
time.sleep(20) # we know it's not going to to take less time than this.
boot_completed, got_ip_address = False, False
while True:
try:
if not boot_completed:
target.sendline('getprop sys.boot_completed')
boot_completed = target.expect(['0.*', '1.*'], timeout=10)
if not got_ip_address:
target.sendline('getprop dhcp.eth0.ipaddress')
# regexes are processed in order, so ip regex has to
# come first (as we only want to match new line if we
# don't match the IP). We do a "not" make the logic
# consistent with boot_completed.
got_ip_address = not target.expect(['[1-9]\d*.\d+.\d+.\d+', '\n'], timeout=10)
except pexpect.TIMEOUT:
pass # We have our own timeout -- see below.
if boot_completed and got_ip_address:
break
time.sleep(5)
if (time.time() - started_waiting_time) > self.config.init_timeout:
raise DeviceError('Timed out waiting for the device to initialize.')
self._has_booted = True
def connect(self):
if not self._is_ready:
if self.config.adb_name:
self.adb_name = self.config.adb_name # pylint: disable=attribute-defined-outside-init
else:
with open_serial_connection(timeout=self.config.serial_max_timeout,
port=self.config.serial_device,
baudrate=self.config.serial_baud) as target:
# Get IP address and push the Gator and PMU logger.
target.sendline('su') # as of Android v5.0.2, Linux does not boot into root shell
target.sendline('netcfg')
ipaddr_re = re.compile('eth0 +UP +(.+)/.+', re.MULTILINE)
target.expect(ipaddr_re)
output = target.after
match = re.search('eth0 +UP +(.+)/.+', output)
if not match:
raise DeviceError('Could not get adb IP address.')
ipaddr = match.group(1)
# Connect to device using adb.
target.expect(self.android_prompt) # pylint: disable=E1101
self.adb_name = ipaddr + ":5555" # pylint: disable=W0201
if self.adb_name in adb_list_devices():
adb_disconnect(self.adb_name)
adb_connect(self.adb_name)
self._is_ready = True
self.execute("input keyevent 82", timeout=ADB_SHELL_TIMEOUT)
self.execute("svc power stayon true", timeout=ADB_SHELL_TIMEOUT)
def disconnect(self):
adb_disconnect(self.adb_name)
self._is_ready = False
# TC2-specific methods. You should avoid calling these in
# Workloads/Instruments as that would tie them to TC2 (and if that is
# the case, then you should set the supported_devices parameter in the
# Workload/Instrument accordingly). Most of these can be replace with a
# call to set_runtime_parameters.
def get_cpuidle(self):
return self.get_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable')
def enable_idle_states(self):
"""
Fully enables idle states on TC2.
See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
"""
# Enable C1 (cluster shutdown).
self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 0, verify=False)
# Enable C0 on A15 cluster.
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0, verify=False)
# Enable C0 on A7 cluster.
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 1, verify=False)
def disable_idle_states(self):
"""
Disable idle states on TC2.
See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
"""
# Disable C1 (cluster shutdown).
self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 1, verify=False)
# Disable C0.
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0xFF, verify=False)
def set_irq_affinity(self, cluster):
"""
Set's IRQ affinity to the specified cluster.
This method will only work if the device mode is mp_a7_bootcluster or
mp_a15_bootcluster. This operation does not make sense if there is only one
cluster active (all IRQs will obviously go to that), and it will not work for
IKS kernel because clusters are not exposed to sysfs.
:param cluster: must be either 'a15' or 'a7'.
"""
if self.config.mode not in ('mp_a7_bootcluster', 'mp_a15_bootcluster'):
raise ConfigError('Cannot set IRQ affinity with mode {}'.format(self.config.mode))
if cluster == 'a7':
self.execute('/sbin/set_irq_affinity.sh 0xc07', check_exit_code=False)
elif cluster == 'a15':
self.execute('/sbin/set_irq_affinity.sh 0xc0f', check_exit_code=False)
else:
raise ConfigError('cluster must either "a15" or "a7"; got {}'.format(cluster))
def _boot_using_uefi(self, target):
self.logger.debug('Booting using UEFI.')
self._wait_for_vemsd_mount(target)
self._setup_before_reboot()
self._perform_uefi_reboot(target)
# Get to the UEFI menu.
self.logger.debug('Waiting for UEFI default selection.')
target.sendline('reboot')
target.expect('The default boot selection will start in'.rstrip())
time.sleep(1)
target.sendline(''.rstrip())
# If delete every time is specified, try to delete entry.
if self.config.always_delete_uefi_entry:
self._delete_uefi_entry(target, entry='workload_automation_MP')
self.config.always_delete_uefi_entry = False
# Specify argument to be passed specifying that psci is (or is not) enabled
if self.config.psci_enable:
psci_enable = ' psci=enable'
else:
psci_enable = ''
# Identify the workload automation entry.
selection_pattern = r'\[([0-9]*)\] '
try:
target.expect(re.compile(selection_pattern + 'workload_automation_MP'), timeout=5)
wl_menu_item = target.match.group(1)
except pexpect.TIMEOUT:
self._create_uefi_entry(target, psci_enable, entry_name='workload_automation_MP')
# At this point the board should be rebooted so we need to retry to boot
self._boot_using_uefi(target)
else: # Did not time out.
try:
#Identify the boot manager menu item
target.expect(re.compile(selection_pattern + 'Boot Manager'))
boot_manager_menu_item = target.match.group(1)
#Update FDT
target.sendline(boot_manager_menu_item)
target.expect(re.compile(selection_pattern + 'Update FDT path'), timeout=15)
update_fdt_menu_item = target.match.group(1)
target.sendline(update_fdt_menu_item)
target.expect(re.compile(selection_pattern + 'NOR Flash .*'), timeout=15)
bootmonfs_menu_item = target.match.group(1)
target.sendline(bootmonfs_menu_item)
target.expect('File path of the FDT blob:')
target.sendline(self.config.dtb)
#Return to main manu and boot from wl automation
target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
return_to_main_menu_item = target.match.group(1)
target.sendline(return_to_main_menu_item)
target.sendline(wl_menu_item)
except pexpect.TIMEOUT:
raise DeviceError('Timed out')
def _setup_before_reboot(self):
if not self.config.disable_boot_configuration:
self.logger.debug('Performing pre-boot setup.')
substitution = {
'SCC_0x010': self.config.SCC_0x010,
'SCC_0x700': self.config.SCC_0x700,
}
with open(self.config.src_board_template_file, 'r') as fh:
template_board_txt = string.Template(fh.read())
with open(self.config.src_board_file, 'w') as wfh:
wfh.write(template_board_txt.substitute(substitution))
with open(self.config.src_images_template_file, 'r') as fh:
template_images_txt = string.Template(fh.read())
with open(self.config.src_images_file, 'w') as wfh:
wfh.write(template_images_txt.substitute({'bm_image': self.config.bm_image}))
shutil.copyfile(self.config.src_board_file,
os.path.join(self.config.board_dir, self.config.board_file))
shutil.copyfile(self.config.src_images_file,
os.path.join(self.config.board_dir, self.config.images_file))
os.system('sync') # make sure everything is flushed to microSD
else:
self.logger.debug('Boot configuration disabled proceeding with existing board.txt and images.txt.')
def _delete_uefi_entry(self, target, entry): # pylint: disable=R0201
"""
this method deletes the entry specified as parameter
as a precondition serial port input needs to be parsed AT MOST up to
the point BEFORE recognizing this entry (both entry and boot manager has
not yet been parsed)
"""
try:
selection_pattern = r'\[([0-9]+)\] *'
try:
target.expect(re.compile(selection_pattern + entry), timeout=5)
wl_menu_item = target.match.group(1)
except pexpect.TIMEOUT:
return # Entry does not exist, nothing to delete here...
# Identify and select boot manager menu item
target.expect(selection_pattern + 'Boot Manager', timeout=15)
bootmanager_item = target.match.group(1)
target.sendline(bootmanager_item)
# Identify and select 'Remove entry'
target.expect(selection_pattern + 'Remove Boot Device Entry', timeout=15)
new_entry_item = target.match.group(1)
target.sendline(new_entry_item)
# Delete entry
target.expect(re.compile(selection_pattern + entry), timeout=5)
wl_menu_item = target.match.group(1)
target.sendline(wl_menu_item)
# Return to main manu
target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
return_to_main_menu_item = target.match.group(1)
target.sendline(return_to_main_menu_item)
except pexpect.TIMEOUT:
raise DeviceError('Timed out while deleting UEFI entry.')
def _create_uefi_entry(self, target, psci_enable, entry_name):
"""
Creates the default boot entry that is expected when booting in uefi mode.
"""
self._wait_for_vemsd_mount(target)
try:
selection_pattern = '\[([0-9]+)\] *'
# Identify and select boot manager menu item.
target.expect(selection_pattern + 'Boot Manager', timeout=15)
bootmanager_item = target.match.group(1)
target.sendline(bootmanager_item)
# Identify and select 'add new entry'.
target.expect(selection_pattern + 'Add Boot Device Entry', timeout=15)
new_entry_item = target.match.group(1)
target.sendline(new_entry_item)
# Identify and select BootMonFs.
target.expect(selection_pattern + 'NOR Flash .*', timeout=15)
BootMonFs_item = target.match.group(1)
target.sendline(BootMonFs_item)
# Specify the parameters of the new entry.
target.expect('.+the kernel', timeout=5)
target.sendline(self.config.kernel) # kernel path
target.expect('Has FDT support\?.*\[y\/n\].*', timeout=5)
time.sleep(0.5)
target.sendline('y') # Has Fdt support? -> y
target.expect('Add an initrd.*\[y\/n\].*', timeout=5)
time.sleep(0.5)
target.sendline('y') # add an initrd? -> y
target.expect('.+the initrd.*', timeout=5)
time.sleep(0.5)
target.sendline(self.config.initrd) # initrd path
target.expect('.+to the binary.*', timeout=5)
time.sleep(0.5)
_slow_sendline(target, self.config.kernel_arguments + psci_enable) # arguments to pass to binary
time.sleep(0.5)
target.expect('.+new Entry.+', timeout=5)
_slow_sendline(target, entry_name) # Entry name
target.expect('Choice.+', timeout=15)
time.sleep(2)
except pexpect.TIMEOUT:
raise DeviceError('Timed out while creating UEFI entry.')
self._perform_uefi_reboot(target)
def _perform_uefi_reboot(self, target):
self._wait_for_vemsd_mount(target)
open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
def _wait_for_vemsd_mount(self, target, timeout=100):
attempts = 1 + self.config.reboot_attempts
if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
return
self.logger.debug('Waiting for VEMSD to mount...')
for i in xrange(attempts):
if i: # Do not reboot on the first attempt.
target.sendline('reboot')
target.sendline('usb_on')
for _ in xrange(timeout):
time.sleep(1)
if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
return
raise DeviceError('Timed out waiting for VEMSD to mount.')
def _boot_using_bootmon(self, target):
"""
This method Boots TC2 using the bootmon interface.
"""
self.logger.debug('Booting using bootmon.')
try:
self._wait_for_vemsd_mount(target, timeout=20)
except DeviceError:
# OK, something's wrong. Reboot the board and try again.
self.logger.debug('VEMSD not mounted, attempting to power cycle device.')
target.sendline(' ')
state = target.expect(['Cmd> ', self.config.bootmon_prompt, self.android_prompt]) # pylint: disable=E1101
if state == 0 or state == 1:
# Reboot - Bootmon
target.sendline('reboot')
target.expect('Powering up system...')
elif state == 2:
target.sendline('reboot -n')
target.expect('Powering up system...')
else:
raise DeviceError('Unexpected board state {}; should be 0, 1 or 2'.format(state))
self._wait_for_vemsd_mount(target)
self._setup_before_reboot()
# Reboot - Bootmon
self.logger.debug('Rebooting into bootloader...')
open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
target.expect('Powering up system...')
target.expect(self.config.bootmon_prompt)
# Wait for VEMSD to mount
self._wait_for_vemsd_mount(target)
#Boot Linux - Bootmon
target.sendline('fl linux fdt ' + self.config.dtb)
target.expect(self.config.bootmon_prompt)
target.sendline('fl linux initrd ' + self.config.initrd)
target.expect(self.config.bootmon_prompt)
target.sendline('fl linux boot ' + self.config.kernel + self.config.kernel_arguments)
# Utility functions.
def _slow_sendline(target, line):
for c in line:
target.send(c)
time.sleep(0.1)
target.sendline('')

View File

@@ -0,0 +1,96 @@
BOARD: HBI0249
TITLE: V2P-CA15_A7 Configuration File
[DCCS]
TOTALDCCS: 1 ;Total Number of DCCS
M0FILE: dbb_v110.ebf ;DCC0 Filename
M0MODE: MICRO ;DCC0 Programming Mode
[FPGAS]
TOTALFPGAS: 0 ;Total Number of FPGAs
[TAPS]
TOTALTAPS: 3 ;Total Number of TAPs
T0NAME: STM32TMC ;TAP0 Device Name
T0FILE: NONE ;TAP0 Filename
T0MODE: NONE ;TAP0 Programming Mode
T1NAME: STM32CM3 ;TAP1 Device Name
T1FILE: NONE ;TAP1 Filename
T1MODE: NONE ;TAP1 Programming Mode
T2NAME: CORTEXA15 ;TAP2 Device Name
T2FILE: NONE ;TAP2 Filename
T2MODE: NONE ;TAP2 Programming Mode
[OSCCLKS]
TOTALOSCCLKS: 9 ;Total Number of OSCCLKS
OSC0: 50.0 ;CPUREFCLK0 A15 CPU (20:1 - 1.0GHz)
OSC1: 50.0 ;CPUREFCLK1 A15 CPU (20:1 - 1.0GHz)
OSC2: 40.0 ;CPUREFCLK0 A7 CPU (20:1 - 800MHz)
OSC3: 40.0 ;CPUREFCLK1 A7 CPU (20:1 - 800MHz)
OSC4: 40.0 ;HSBM AXI (40MHz)
OSC5: 23.75 ;HDLCD (23.75MHz - TC PLL is in bypass)
OSC6: 50.0 ;SMB (50MHz)
OSC7: 50.0 ;SYSREFCLK (20:1 - 1.0GHz, ACLK - 500MHz)
OSC8: 50.0 ;DDR2 (8:1 - 400MHz)
[SCC REGISTERS]
TOTALSCCS: 33 ;Total Number of SCC registers
;SCC: 0x010 0x000003D0 ;Remap to NOR0
SCC: 0x010 $SCC_0x010 ;Switch between NOR0/NOR1
SCC: 0x01C 0xFF00FF00 ;CFGRW3 - SMC CS6/7 N/U
SCC: 0x118 0x01CD1011 ;CFGRW17 - HDLCD PLL external bypass
;SCC: 0x700 0x00320003 ;CFGRW48 - [25:24]Boot CPU [28]Boot Cluster (default CA7_0)
SCC: 0x700 $SCC_0x700 ;CFGRW48 - [25:24]Boot CPU [28]Boot Cluster (default CA7_0)
; Bootmon configuration:
; [15]: A7 Event stream generation (default: disabled)
; [14]: A15 Event stream generation (default: disabled)
; [13]: Power down the non-boot cluster (default: disabled)
; [12]: Use per-cpu mailboxes for power management (default: disabled)
; [11]: A15 executes WFEs as nops (default: disabled)
SCC: 0x400 0x33330c00 ;CFGREG41 - A15 configuration register 0 (Default 0x33330c80)
; [29:28] SPNIDEN
; [25:24] SPIDEN
; [21:20] NIDEN
; [17:16] DBGEN
; [13:12] CFGTE
; [9:8] VINITHI_CORE
; [7] IMINLN
; [3:0] CLUSTER_ID
;Set the CPU clock PLLs
SCC: 0x120 0x022F1010 ;CFGRW19 - CA15_0 PLL control - 20:1 (lock OFF)
SCC: 0x124 0x0011710D ;CFGRW20 - CA15_0 PLL value
SCC: 0x128 0x022F1010 ;CFGRW21 - CA15_1 PLL control - 20:1 (lock OFF)
SCC: 0x12C 0x0011710D ;CFGRW22 - CA15_1 PLL value
SCC: 0x130 0x022F1010 ;CFGRW23 - CA7_0 PLL control - 20:1 (lock OFF)
SCC: 0x134 0x0011710D ;CFGRW24 - CA7_0 PLL value
SCC: 0x138 0x022F1010 ;CFGRW25 - CA7_1 PLL control - 20:1 (lock OFF)
SCC: 0x13C 0x0011710D ;CFGRW26 - CA7_1 PLL value
;Power management interface
SCC: 0xC00 0x00000005 ;Control: [0]PMI_EN [1]DBG_EN [2]SPC_SYSCFG
SCC: 0xC04 0x060E0356 ;Latency in uS max: [15:0]DVFS [31:16]PWRUP
SCC: 0xC08 0x00000000 ;Reserved
SCC: 0xC0C 0x00000000 ;Reserved
;CA15 performance values: 0xVVVFFFFF
SCC: 0xC10 0x384061A8 ;CA15 PERFVAL0, 900mV, 20,000*20= 500MHz
SCC: 0xC14 0x38407530 ;CA15 PERFVAL1, 900mV, 25,000*20= 600MHz
SCC: 0xC18 0x384088B8 ;CA15 PERFVAL2, 900mV, 30,000*20= 700MHz
SCC: 0xC1C 0x38409C40 ;CA15 PERFVAL3, 900mV, 35,000*20= 800MHz
SCC: 0xC20 0x3840AFC8 ;CA15 PERFVAL4, 900mV, 40,000*20= 900MHz
SCC: 0xC24 0x3840C350 ;CA15 PERFVAL5, 900mV, 45,000*20=1000MHz
SCC: 0xC28 0x3CF0D6D8 ;CA15 PERFVAL6, 975mV, 50,000*20=1100MHz
SCC: 0xC2C 0x41A0EA60 ;CA15 PERFVAL7, 1050mV, 55,000*20=1200MHz
;CA7 performance values: 0xVVVFFFFF
SCC: 0xC30 0x3840445C ;CA7 PERFVAL0, 900mV, 10,000*20= 350MHz
SCC: 0xC34 0x38404E20 ;CA7 PERFVAL1, 900mV, 15,000*20= 400MHz
SCC: 0xC38 0x384061A8 ;CA7 PERFVAL2, 900mV, 20,000*20= 500MHz
SCC: 0xC3C 0x38407530 ;CA7 PERFVAL3, 900mV, 25,000*20= 600MHz
SCC: 0xC40 0x384088B8 ;CA7 PERFVAL4, 900mV, 30,000*20= 700MHz
SCC: 0xC44 0x38409C40 ;CA7 PERFVAL5, 900mV, 35,000*20= 800MHz
SCC: 0xC48 0x3CF0AFC8 ;CA7 PERFVAL6, 975mV, 40,000*20= 900MHz
SCC: 0xC4C 0x41A0C350 ;CA7 PERFVAL7, 1050mV, 45,000*20=1000MHz

View File

@@ -0,0 +1,25 @@
TITLE: Versatile Express Images Configuration File
[IMAGES]
TOTALIMAGES: 4 ;Number of Images (Max : 32)
NOR0UPDATE: AUTO ;Image Update:NONE/AUTO/FORCE
NOR0ADDRESS: BOOT ;Image Flash Address
NOR0FILE: \SOFTWARE\$bm_image ;Image File Name
NOR1UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
NOR1ADDRESS: 0x00000000 ;Image Flash Address
NOR1FILE: \SOFTWARE\kern_iks.bin ;Image File Name
NOR1LOAD: 0x80008000
NOR1ENTRY: 0x80008000
NOR2UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
NOR2ADDRESS: 0x00000000 ;Image Flash Address
NOR2FILE: \SOFTWARE\iks.dtb ;Image File Name for booting in A7 cluster
NOR2LOAD: 0x84000000
NOR2ENTRY: 0x84000000
NOR3UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
NOR3ADDRESS: 0x00000000 ;Image Flash Address
NOR3FILE: \SOFTWARE\init_iks.bin ;Image File Name
NOR3LOAD: 0x90100000
NOR3ENTRY: 0x90100000

View File

@@ -0,0 +1,55 @@
TITLE: Versatile Express Images Configuration File
[IMAGES]
TOTALIMAGES: 9 ;Number of Images (Max: 32)
NOR0UPDATE: AUTO ;Image Update:NONE/AUTO/FORCE
NOR0ADDRESS: BOOT ;Image Flash Address
NOR0FILE: \SOFTWARE\$bm_image ;Image File Name
NOR1UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
NOR1ADDRESS: 0x0E000000 ;Image Flash Address
NOR1FILE: \SOFTWARE\kern_mp.bin ;Image File Name
NOR1LOAD: 0x80008000
NOR1ENTRY: 0x80008000
NOR2UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
NOR2ADDRESS: 0x0E800000 ;Image Flash Address
NOR2FILE: \SOFTWARE\mp_a7.dtb ;Image File Name for booting in A7 cluster
NOR2LOAD: 0x84000000
NOR2ENTRY: 0x84000000
NOR3UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
NOR3ADDRESS: 0x0E900000 ;Image Flash Address
NOR3FILE: \SOFTWARE\mp_a15.dtb ;Image File Name
NOR3LOAD: 0x84000000
NOR3ENTRY: 0x84000000
NOR4UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
NOR4ADDRESS: 0x0EA00000 ;Image Flash Address
NOR4FILE: \SOFTWARE\mp_a7bc.dtb ;Image File Name
NOR4LOAD: 0x84000000
NOR4ENTRY: 0x84000000
NOR5UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
NOR5ADDRESS: 0x0EB00000 ;Image Flash Address
NOR5FILE: \SOFTWARE\mp_a15bc.dtb ;Image File Name
NOR5LOAD: 0x84000000
NOR5ENTRY: 0x84000000
NOR6UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
NOR6ADDRESS: 0x0EC00000 ;Image Flash Address
NOR6FILE: \SOFTWARE\init_mp.bin ;Image File Name
NOR6LOAD: 0x85000000
NOR6ENTRY: 0x85000000
NOR7UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
NOR7ADDRESS: 0x0C000000 ;Image Flash Address
NOR7FILE: \SOFTWARE\tc2_sec.bin ;Image File Name
NOR7LOAD: 0
NOR7ENTRY: 0
NOR8UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
NOR8ADDRESS: 0x0D000000 ;Image Flash Address
NOR8FILE: \SOFTWARE\tc2_uefi.bin ;Image File Name
NOR8LOAD: 0
NOR8ENTRY: 0

View File

@@ -0,0 +1,16 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

View File

@@ -0,0 +1,37 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto import LinuxDevice, Parameter
class GenericDevice(LinuxDevice):
name = 'generic_linux'
description = """
Generic Linux device. Use this if you do not have a device file for
your device.
This implements the minimum functionality that should be supported by
all Linux devices.
"""
abi = 'armeabi'
has_gpu = True
parameters = [
Parameter('core_names', default=[], override=True),
Parameter('core_clusters', default=[], override=True),
]

View File

@@ -0,0 +1,35 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto import LinuxDevice, Parameter
class OdroidXU3LinuxDevice(LinuxDevice):
name = "odroidxu3_linux"
description = 'HardKernel Odroid XU3 development board (Ubuntu image).'
core_modules = [
'odroidxu3-fan',
]
parameters = [
Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True),
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True),
]
abi = 'armeabi'

143
wlauto/exceptions.py Normal file
View File

@@ -0,0 +1,143 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.utils.misc import get_traceback, TimeoutError # NOQA pylint: disable=W0611
class WAError(Exception):
"""Base class for all Workload Automation exceptions."""
pass
class NotFoundError(WAError):
"""Raised when the specified item is not found."""
pass
class ValidationError(WAError):
"""Raised on failure to validate an extension."""
pass
class DeviceError(WAError):
"""General Device error."""
pass
class DeviceNotRespondingError(WAError):
"""The device is not responding."""
def __init__(self, device):
super(DeviceNotRespondingError, self).__init__('Device {} is not responding.'.format(device))
class WorkloadError(WAError):
"""General Workload error."""
pass
class HostError(WAError):
"""Problem with the host on which WA is running."""
pass
class ModuleError(WAError):
"""
Problem with a module.
.. note:: Modules for specific extension types should raise execeptions
appropriate to that extension. E.g. a ``Device`` module should raise
``DeviceError``. This is intended for situation where a module is
unsure (and/or doesn't care) what its owner is.
"""
pass
class InstrumentError(WAError):
"""General Instrument error."""
pass
class ResultProcessorError(WAError):
"""General ResultProcessor error."""
pass
class ResourceError(WAError):
"""General Resolver error."""
pass
class CommandError(WAError):
"""Raised by commands when they have encountered an error condition
during execution."""
pass
class ToolError(WAError):
"""Raised by tools when they have encountered an error condition
during execution."""
pass
class LoaderError(WAError):
"""Raised when there is an error loading an extension or
an external resource. Apart form the usual message, the __init__
takes an exc_info parameter which should be the result of
sys.exc_info() for the original exception (if any) that
caused the error."""
def __init__(self, message, exc_info=None):
super(LoaderError, self).__init__(message)
self.exc_info = exc_info
def __str__(self):
if self.exc_info:
orig = self.exc_info[1]
orig_name = type(orig).__name__
if isinstance(orig, WAError):
reason = 'because of:\n{}: {}'.format(orig_name, orig)
else:
reason = 'because of:\n{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
return '\n'.join([self.message, reason])
else:
return self.message
class ConfigError(WAError):
"""Raised when configuration provided is invalid. This error suggests that
the user should modify their config and try again."""
pass
class WorkerThreadError(WAError):
"""
This should get raised in the main thread if a non-WAError-derived exception occurs on
a worker/background thread. If a WAError-derived exception is raised in the worker, then
it that exception should be re-raised on the main thread directly -- the main point of this is
to preserve the backtrace in the output, and backtrace doesn't get output for WAErrors.
"""
def __init__(self, thread, exc_info):
self.thread = thread
self.exc_info = exc_info
orig = self.exc_info[1]
orig_name = type(orig).__name__
message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread)
message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
super(WorkerThreadError, self).__init__(message)

74
wlauto/external/README vendored Normal file
View File

@@ -0,0 +1,74 @@
This directory contains external libraries and standalone utilities which have
been written/modified to work with Workload Automation (and thus need to be
included with WA rather than obtained from orignal sources).
bbench_server
=============
This is a small sever that is used to detect when ``bbench`` workload has completed.
``bbench`` navigates though a bunch of web pages in a browser using javascript.
It will cause the browser to sent a GET request to the port the bbench_server is
listening on, indicating the end of workload.
daq_server
==========
Contains Daq server files that will run on a Windows machine. Please refer to
daq instrument documentation.
louie (third party)
=====
Python package that is itself a fork (and now, a replacement for) pydispatcher.
This library provides a signal dispatching mechanism. This has been modified for
WA to add prioritization to callbacks.
pmu_logger
==========
Source for the kernel driver that enable the logging of CCI counters to ftrace
on periodic basis. This driver is required by the ``cci_pmu_logger`` instrument.
readenergy
==========
Outputs Juno internal energy/power/voltage/current measurments by reading APB
regesiters from memory. This is used by ``juno_energy`` instrument.
revent
======
This is a tool that is used to both record and playback key press and screen tap
events. It is used to record UI manipulation for some workloads (such as games)
where it is not possible to use the Android UI Automator.
The tools is also included in binary form in wlauto/common/. In order to build
the tool from source, you will need to have Android NDK in your PATH.
stacktracer.py (third party)
==============
A module based on an ActiveState recipe that allows tracing thread stacks during
execution of a Python program. This is used through the ``--debug`` flag in WA
to ease debuging multi-threaded parts of the code.
terminalsize.py (third party)
===============
Implements a platform-agnostic way of determining terminal window size. Taken
from a public Github gist.
uiauto
======
Contains the utilities library for UI automation.

31
wlauto/external/bbench_server/build.sh vendored Executable file
View File

@@ -0,0 +1,31 @@
#!/bin/bash
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
BUILD_COMMAND=ndk-build
if [[ $(which $BUILD_COMMAND) ]] ; then
$BUILD_COMMAND
if [[ $? ]]; then
echo Coping to ../../workloads/bbench/
cp libs/armeabi/bbench_server ../../workloads/bbench/bin/armeabi/bbench_server
fi
else
echo Please make sure you have Android NDK in your PATH.
exit 1
fi

View File

@@ -0,0 +1,9 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= bbench_server.cpp
LOCAL_MODULE := bbench_server
LOCAL_MODULE_TAGS := optional
LOCAL_STATIC_LIBRARIES := libc
LOCAL_SHARED_LIBRARIES :=
include $(BUILD_EXECUTABLE)

View File

@@ -0,0 +1,151 @@
/* Copyright 2012-2015 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**************************************************************************/
/* Simple HTTP server program that will return on accepting connection */
/**************************************************************************/
/* Tested on Android ICS browser and FireFox browser */
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>
#include <arpa/inet.h>
#include <sys/wait.h>
#define SERVERPORT "3030"
void ExitOnError(int condition, const char *msg)
{
if(condition) { printf("Server: %s\n", msg); exit(1);}
}
void *GetInetAddr(struct sockaddr *sa)
{
if (sa->sa_family == AF_INET)
{
return &(((struct sockaddr_in*)sa)->sin_addr);
}
else
{
return &(((struct sockaddr_in6*)sa)->sin6_addr);
}
}
int main(int argc, char *argv[])
{
socklen_t addr_size;
struct addrinfo hints, *res;
int server_fd, client_fd;
int retval;
int timeout_in_seconds;
// Get the timeout value in seconds
if(argc < 2)
{
printf("Usage %s <timeout in seconds>\n", argv[0]);
exit(1);
}
else
{
timeout_in_seconds = atoi(argv[1]);
printf("Server: Waiting for connection on port %s with timeout of %d seconds\n", SERVERPORT, timeout_in_seconds);
}
/**************************************************************************/
/* Listen to a socket */
/**************************************************************************/
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_UNSPEC; // use IPv4 or IPv6, whichever
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE; // fill in my IP for me
getaddrinfo(NULL, SERVERPORT, &hints, &res);
server_fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
ExitOnError(server_fd < 0, "Socket creation failed");
retval = bind(server_fd, res->ai_addr, res->ai_addrlen);
ExitOnError(retval < 0, "Bind failed");
retval = listen(server_fd, 10);
ExitOnError(retval < 0, "Listen failed");
/**************************************************************************/
/* Wait for connection to arrive or time out */
/**************************************************************************/
fd_set readfds;
FD_ZERO(&readfds);
FD_SET(server_fd, &readfds);
// Timeout parameter
timeval tv;
tv.tv_sec = timeout_in_seconds;
tv.tv_usec = 0;
int ret = select(server_fd+1, &readfds, NULL, NULL, &tv);
ExitOnError(ret <= 0, "No connection established, timed out");
ExitOnError(FD_ISSET(server_fd, &readfds) == 0, "Error occured in select");
/**************************************************************************/
/* Accept connection and print the information */
/**************************************************************************/
{
struct sockaddr_storage client_addr;
char client_addr_string[INET6_ADDRSTRLEN];
addr_size = sizeof client_addr;
client_fd = accept(server_fd, (struct sockaddr *)&client_addr, &addr_size);
ExitOnError(client_fd < 0, "Accept failed");
inet_ntop(client_addr.ss_family,
GetInetAddr((struct sockaddr *)&client_addr),
client_addr_string,
sizeof client_addr_string);
printf("Server: Received connection from %s\n", client_addr_string);
}
/**************************************************************************/
/* Send a acceptable HTTP response */
/**************************************************************************/
{
char response[] = "HTTP/1.1 200 OK\r\n"
"Content-Type: text/html\r\n"
"Connection: close\r\n"
"\r\n"
"<html>"
"<head>Local Server: Connection Accepted</head>"
"<body></body>"
"</html>";
int bytes_sent;
bytes_sent = send(client_fd, response, strlen(response), 0);
ExitOnError(bytes_sent < 0, "Sending Response failed");
}
close(client_fd);
close(server_fd);
return 0;
}

Binary file not shown.

View File

0
wlauto/external/daq_server/src/README vendored Normal file
View File

25
wlauto/external/daq_server/src/build.sh vendored Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
python setup.py sdist
rm -rf build
rm -f MANIFEST
if [[ -d dist ]]; then
mv dist/*.tar.gz ..
rm -rf dist
fi
find . -iname \*.pyc -delete

View File

@@ -0,0 +1,17 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '1.0.1'

View File

@@ -0,0 +1,380 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101,E1103
import os
import sys
from twisted.internet import reactor
from twisted.internet.protocol import Protocol, ClientFactory, ReconnectingClientFactory
from twisted.internet.error import ConnectionLost, ConnectionDone
from twisted.protocols.basic import LineReceiver
if __name__ == '__main__': # for debugging
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from daqpower import log
from daqpower.common import DaqServerRequest, DaqServerResponse, Status
from daqpower.config import get_config_parser
__all__ = ['execute_command', 'run_send_command', 'Status']
class Command(object):
def __init__(self, name, **params):
self.name = name
self.params = params
class CommandResult(object):
def __init__(self):
self.status = None
self.message = None
self.data = None
def __str__(self):
return '{} {}'.format(self.status, self.message)
class CommandExecutorProtocol(Protocol):
def __init__(self, command, timeout=10, retries=1):
self.command = command
self.sent_request = None
self.waiting_for_response = False
self.keep_going = None
self.ports_to_pull = None
self.factory = None
self.timeoutCallback = None
self.timeout = timeout
self.retries = retries
self.retry_count = 0
def connectionMade(self):
if self.command.name == 'get_data':
self.sendRequest('list_port_files')
else:
self.sendRequest(self.command.name, **self.command.params)
def connectionLost(self, reason=ConnectionDone):
if isinstance(reason, ConnectionLost):
self.errorOut('connection lost: {}'.format(reason))
elif self.waiting_for_response:
self.errorOut('Server closed connection without sending a response.')
else:
log.debug('connection terminated.')
def sendRequest(self, command, **params):
self.sent_request = DaqServerRequest(command, params)
request_string = self.sent_request.serialize()
log.debug('sending request: {}'.format(request_string))
self.transport.write(''.join([request_string, '\r\n']))
self.timeoutCallback = reactor.callLater(self.timeout, self.requestTimedOut)
self.waiting_for_response = True
def dataReceived(self, data):
self.keep_going = False
if self.waiting_for_response:
self.waiting_for_response = False
self.timeoutCallback.cancel()
try:
response = DaqServerResponse.deserialize(data)
except Exception, e: # pylint: disable=W0703
self.errorOut('Invalid response: {} ({})'.format(data, e))
else:
if response.status != Status.ERROR:
self.processResponse(response) # may set self.keep_going
if not self.keep_going:
self.commandCompleted(response.status, response.message, response.data)
else:
self.errorOut(response.message)
else:
self.errorOut('unexpected data received: {}\n'.format(data))
def processResponse(self, response):
if self.sent_request.command in ['list_ports', 'list_port_files']:
self.processPortsResponse(response)
elif self.sent_request.command == 'list_devices':
self.processDevicesResponse(response)
elif self.sent_request.command == 'pull':
self.processPullResponse(response)
def processPortsResponse(self, response):
if 'ports' not in response.data:
self.errorOut('Response did not containt ports data: {} ({}).'.format(response, response.data))
ports = response.data['ports']
response.data = ports
if self.command.name == 'get_data':
if ports:
self.ports_to_pull = ports
self.sendPullRequest(self.ports_to_pull.pop())
else:
response.status = Status.OKISH
response.message = 'No ports were returned.'
def processDevicesResponse(self, response):
if 'devices' not in response.data:
self.errorOut('Response did not containt devices data: {} ({}).'.format(response, response.data))
ports = response.data['devices']
response.data = ports
def sendPullRequest(self, port_id):
self.sendRequest('pull', port_id=port_id)
self.keep_going = True
def processPullResponse(self, response):
if 'port_number' not in response.data:
self.errorOut('Response does not contain port number: {} ({}).'.format(response, response.data))
port_number = response.data.pop('port_number')
filename = self.sent_request.params['port_id'] + '.csv'
self.factory.initiateFileTransfer(filename, port_number)
if self.ports_to_pull:
self.sendPullRequest(self.ports_to_pull.pop())
def commandCompleted(self, status, message=None, data=None):
self.factory.result.status = status
self.factory.result.message = message
self.factory.result.data = data
self.transport.loseConnection()
def requestTimedOut(self):
self.retry_count += 1
if self.retry_count > self.retries:
self.errorOut("Request timed out; server failed to respond.")
else:
log.debug('Retrying...')
self.connectionMade()
def errorOut(self, message):
self.factory.errorOut(message)
class CommandExecutorFactory(ClientFactory):
protocol = CommandExecutorProtocol
wait_delay = 1
def __init__(self, config, command, timeout=10, retries=1):
self.config = config
self.command = command
self.timeout = timeout
self.retries = retries
self.result = CommandResult()
self.done = False
self.transfers_in_progress = {}
if command.name == 'get_data':
if 'output_directory' not in command.params:
self.errorOut('output_directory not specifed for get_data command.')
self.output_directory = command.params['output_directory']
if not os.path.isdir(self.output_directory):
log.debug('Creating output directory {}'.format(self.output_directory))
os.makedirs(self.output_directory)
def buildProtocol(self, addr):
protocol = CommandExecutorProtocol(self.command, self.timeout, self.retries)
protocol.factory = self
return protocol
def initiateFileTransfer(self, filename, port):
log.debug('Downloading {} from port {}'.format(filename, port))
filepath = os.path.join(self.output_directory, filename)
session = FileReceiverFactory(filepath, self)
connector = reactor.connectTCP(self.config.host, port, session)
self.transfers_in_progress[session] = connector
def transferComplete(self, session):
connector = self.transfers_in_progress[session]
log.debug('Transfer on port {} complete.'.format(connector.port))
del self.transfers_in_progress[session]
def clientConnectionLost(self, connector, reason):
if self.transfers_in_progress:
log.debug('Waiting for the transfer(s) to complete.')
self.waitForTransfersToCompleteAndExit()
def clientConnectionFailed(self, connector, reason):
self.result.status = Status.ERROR
self.result.message = 'Could not connect to server.'
self.waitForTransfersToCompleteAndExit()
def waitForTransfersToCompleteAndExit(self):
if self.transfers_in_progress:
reactor.callLater(self.wait_delay, self.waitForTransfersToCompleteAndExit)
else:
log.debug('Stopping the reactor.')
reactor.stop()
def errorOut(self, message):
self.result.status = Status.ERROR
self.result.message = message
reactor.crash()
def __str__(self):
return '<CommandExecutorProtocol {}>'.format(self.command.name)
__repr__ = __str__
class FileReceiver(LineReceiver): # pylint: disable=W0223
def __init__(self, path):
self.path = path
self.fh = None
self.factory = None
def connectionMade(self):
if os.path.isfile(self.path):
log.warning('overriding existing file.')
os.remove(self.path)
self.fh = open(self.path, 'w')
def connectionLost(self, reason=ConnectionDone):
if self.fh:
self.fh.close()
def lineReceived(self, line):
line = line.rstrip('\r\n') + '\n'
self.fh.write(line)
class FileReceiverFactory(ReconnectingClientFactory):
def __init__(self, path, owner):
self.path = path
self.owner = owner
def buildProtocol(self, addr):
protocol = FileReceiver(self.path)
protocol.factory = self
self.resetDelay()
return protocol
def clientConnectionLost(self, conector, reason):
if isinstance(reason, ConnectionLost):
log.error('Connection lost: {}'.format(reason))
ReconnectingClientFactory.clientConnectionLost(self, conector, reason)
else:
self.owner.transferComplete(self)
def clientConnectionFailed(self, conector, reason):
if isinstance(reason, ConnectionLost):
log.error('Connection failed: {}'.format(reason))
ReconnectingClientFactory.clientConnectionFailed(self, conector, reason)
def __str__(self):
return '<FileReceiver {}>'.format(self.path)
__repr__ = __str__
def execute_command(server_config, command, **kwargs):
before_fds = _get_open_fds() # see the comment in the finally clause below
if isinstance(command, basestring):
command = Command(command, **kwargs)
timeout = 300 if command.name in ['stop', 'pull'] else 10
factory = CommandExecutorFactory(server_config, command, timeout)
# reactors aren't designed to be re-startable. In order to be
# able to call execute_command multiple times, we need to froce
# re-installation of the reactor; hence this hackery.
# TODO: look into implementing restartable reactors. According to the
# Twisted FAQ, there is no good reason why there isn't one:
# http://twistedmatrix.com/trac/wiki/FrequentlyAskedQuestions#WhycanttheTwistedsreactorberestarted
from twisted.internet import default
del sys.modules['twisted.internet.reactor']
default.install()
global reactor # pylint: disable=W0603
reactor = sys.modules['twisted.internet.reactor']
try:
reactor.connectTCP(server_config.host, server_config.port, factory)
reactor.run()
return factory.result
finally:
# re-startable reactor hack part 2.
# twisted hijacks SIGINT and doesn't bother to un-hijack it when the reactor
# stops. So we have to do it for it *rolls eye*.
import signal
signal.signal(signal.SIGINT, signal.default_int_handler)
# OK, the reactor is also leaking file descriptors. Tracking down all
# of them is non trivial, so instead we're just comparing the before
# and after lists of open FDs for the current process, and closing all
# new ones, as execute_command should never leave anything open after
# it exits (even when downloading data files from the server).
# TODO: This is way too hacky even compared to the rest of this function.
# Additionally, the current implementation ties this to UNIX,
# so in the long run, we need to do this properly and get the FDs
# from the reactor.
after_fds = _get_open_fds()
for fd in (after_fds - before_fds):
try:
os.close(int(fd[1:]))
except OSError:
pass
# Below is the alternative code that gets FDs from the reactor, however
# at the moment it doesn't seem to get everything, which is why code
# above is used instead.
#for fd in readtor._selectables:
# os.close(fd)
#reactor._poller.close()
def _get_open_fds():
if os.name == 'posix':
import subprocess
pid = os.getpid()
procs = subprocess.check_output(
[ "lsof", '-w', '-Ff', "-p", str( pid ) ] )
return set(procs.split())
else:
# TODO: Implement the Windows equivalent.
return []
def run_send_command():
"""Main entry point when running as a script -- should not be invoked form another module."""
parser = get_config_parser()
parser.add_argument('command')
parser.add_argument('-o', '--output-directory', metavar='DIR', default='.',
help='Directory used to output data files (defaults to the current directory).')
parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False)
args = parser.parse_args()
if not args.device_config.labels:
args.device_config.labels = ['PORT_{}'.format(i) for i in xrange(len(args.device_config.resistor_values))]
if args.verbose:
log.start_logging('DEBUG')
else:
log.start_logging('INFO', fmt='%(levelname)-8s %(message)s')
if args.command == 'configure':
args.device_config.validate()
command = Command(args.command, config=args.device_config)
elif args.command == 'get_data':
command = Command(args.command, output_directory=args.output_directory)
else:
command = Command(args.command)
result = execute_command(args.server_config, command)
print result
if result.data:
print result.data
if __name__ == '__main__':
run_send_command()

View File

@@ -0,0 +1,99 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import json
class Serializer(json.JSONEncoder):
def default(self, o): # pylint: disable=E0202
if isinstance(o, Serializable):
return o.serialize()
if isinstance(o, Enum.EnumEntry):
return o.name
return json.JSONEncoder.default(self, o)
class Serializable(object):
@classmethod
def deserialize(cls, text):
return cls(**json.loads(text))
def serialize(self, d=None):
if d is None:
d = self.__dict__
return json.dumps(d, cls=Serializer)
class DaqServerRequest(Serializable):
def __init__(self, command, params=None): # pylint: disable=W0231
self.command = command
self.params = params or {}
class DaqServerResponse(Serializable):
def __init__(self, status, message=None, data=None): # pylint: disable=W0231
self.status = status
self.message = message.strip().replace('\r\n', ' ') if message else ''
self.data = data or {}
def __str__(self):
return '{} {}'.format(self.status, self.message or '')
class Enum(object):
"""
Assuming MyEnum = Enum('A', 'B'),
MyEnum.A and MyEnum.B are valid values.
a = MyEnum.A
(a == MyEnum.A) == True
(a in MyEnum) == True
MyEnum('A') == MyEnum.A
str(MyEnum.A) == 'A'
"""
class EnumEntry(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __cmp__(self, other):
return cmp(self.name, str(other))
def __init__(self, *args):
for a in args:
setattr(self, a, self.EnumEntry(a))
def __call__(self, value):
if value not in self.__dict__:
raise ValueError('Not enum value: {}'.format(value))
return self.__dict__[value]
def __iter__(self):
for e in self.__dict__:
yield self.__dict__[e]
Status = Enum('OK', 'OKISH', 'ERROR')

View File

@@ -0,0 +1,154 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from daqpower.common import Serializable
class ConfigurationError(Exception):
"""Raised when configuration passed into DaqServer is invaid."""
pass
class DeviceConfiguration(Serializable):
"""Encapulates configuration for the DAQ, typically, passed from
the client."""
valid_settings = ['device_id', 'v_range', 'dv_range', 'sampling_rate', 'resistor_values', 'labels']
default_device_id = 'Dev1'
default_v_range = 2.5
default_dv_range = 0.2
default_sampling_rate = 10000
# Channel map used in DAQ 6363 and similar.
default_channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23)
@property
def number_of_ports(self):
return len(self.resistor_values)
def __init__(self, **kwargs): # pylint: disable=W0231
try:
self.device_id = kwargs.pop('device_id') or self.default_device_id
self.v_range = float(kwargs.pop('v_range') or self.default_v_range)
self.dv_range = float(kwargs.pop('dv_range') or self.default_dv_range)
self.sampling_rate = int(kwargs.pop('sampling_rate') or self.default_sampling_rate)
self.resistor_values = kwargs.pop('resistor_values') or []
self.channel_map = kwargs.pop('channel_map') or self.default_channel_map
self.labels = (kwargs.pop('labels') or
['PORT_{}.csv'.format(i) for i in xrange(len(self.resistor_values))])
except KeyError, e:
raise ConfigurationError('Missing config: {}'.format(e.message))
if kwargs:
raise ConfigurationError('Unexpected config: {}'.format(kwargs))
def validate(self):
if not self.number_of_ports:
raise ConfigurationError('No resistor values were specified.')
if not len(self.resistor_values) == len(self.labels):
message = 'The number of resistors ({}) does not match the number of labels ({})'
raise ConfigurationError(message.format(len(self.resistor_values), len(self.labels)))
def __str__(self):
return self.serialize()
__repr__ = __str__
class ServerConfiguration(object):
"""Client-side server configuration."""
valid_settings = ['host', 'port']
default_host = '127.0.0.1'
default_port = 45677
def __init__(self, **kwargs):
self.host = kwargs.pop('host', None) or self.default_host
self.port = kwargs.pop('port', None) or self.default_port
if kwargs:
raise ConfigurationError('Unexpected config: {}'.format(kwargs))
def validate(self):
if not self.host:
raise ConfigurationError('Server host not specified.')
if not self.port:
raise ConfigurationError('Server port not specified.')
elif not isinstance(self.port, int):
raise ConfigurationError('Server port must be an integer.')
class UpdateDeviceConfig(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setting = option_string.strip('-').replace('-', '_')
if setting not in DeviceConfiguration.valid_settings:
raise ConfigurationError('Unkown option: {}'.format(option_string))
setattr(namespace._device_config, setting, values)
class UpdateServerConfig(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setting = option_string.strip('-').replace('-', '_')
if setting not in namespace.server_config.valid_settings:
raise ConfigurationError('Unkown option: {}'.format(option_string))
setattr(namespace.server_config, setting, values)
class ConfigNamespace(object):
class _N(object):
def __init__(self):
self.device_id = None
self.v_range = None
self.dv_range = None
self.sampling_rate = None
self.resistor_values = None
self.labels = None
self.channel_map = None
@property
def device_config(self):
return DeviceConfiguration(**self._device_config.__dict__)
def __init__(self):
self._device_config = self._N()
self.server_config = ServerConfiguration()
class ConfigArgumentParser(argparse.ArgumentParser):
def parse_args(self, *args, **kwargs):
kwargs['namespace'] = ConfigNamespace()
return super(ConfigArgumentParser, self).parse_args(*args, **kwargs)
def get_config_parser(server=True, device=True):
parser = ConfigArgumentParser()
if device:
parser.add_argument('--device-id', action=UpdateDeviceConfig)
parser.add_argument('--v-range', action=UpdateDeviceConfig, type=float)
parser.add_argument('--dv-range', action=UpdateDeviceConfig, type=float)
parser.add_argument('--sampling-rate', action=UpdateDeviceConfig, type=int)
parser.add_argument('--resistor-values', action=UpdateDeviceConfig, type=float, nargs='*')
parser.add_argument('--labels', action=UpdateDeviceConfig, nargs='*')
if server:
parser.add_argument('--host', action=UpdateServerConfig)
parser.add_argument('--port', action=UpdateServerConfig, type=int)
return parser

View File

@@ -0,0 +1,265 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Creates a new DAQ device class. This class assumes that there is a
DAQ connected and mapped as Dev1. It assumes a specific syndesmology on the DAQ (it is not
meant to be a generic DAQ interface). The following diagram shows the wiring for one DaqDevice
port::
Port 0
========
| A0+ <--- Vr -------------------------|
| |
| A0- <--- GND -------------------// |
| |
| A1+ <--- V+ ------------|-------V+ |
| r | |
| A1- <--- Vr --/\/\/\----| |
| | |
| | |
| |--------------------------|
========
:number_of_ports: The number of ports connected on the DAQ. Each port requires 2 DAQ Channels
one for the source voltage and one for the Voltage drop over the
resistor r (V+ - Vr) allows us to detect the current.
:resistor_value: The resistance of r. Typically a few milliOhm
:downsample: The number of samples combined to create one Power point. If set to one
each sample corresponds to one reported power point.
:sampling_rate: The rate at which DAQ takes a sample from each channel.
"""
# pylint: disable=F0401,E1101,W0621
import os
import sys
import csv
import time
import threading
from Queue import Queue, Empty
import numpy
from PyDAQmx import Task
from PyDAQmx.DAQmxFunctions import DAQmxGetSysDevNames
from PyDAQmx.DAQmxTypes import int32, byref, create_string_buffer
from PyDAQmx.DAQmxConstants import (DAQmx_Val_Diff, DAQmx_Val_Volts, DAQmx_Val_GroupByScanNumber, DAQmx_Val_Auto,
DAQmx_Val_Acquired_Into_Buffer, DAQmx_Val_Rising, DAQmx_Val_ContSamps)
from daqpower import log
def list_available_devices():
"""Returns the list of DAQ devices visible to the driver."""
bufsize = 2048 # Should be plenty for all but the most pathalogical of situations.
buf = create_string_buffer('\000' * bufsize)
DAQmxGetSysDevNames(buf, bufsize)
return buf.value.split(',')
class ReadSamplesTask(Task):
def __init__(self, config, consumer):
Task.__init__(self)
self.config = config
self.consumer = consumer
self.sample_buffer_size = (self.config.sampling_rate + 1) * self.config.number_of_ports * 2
self.samples_read = int32()
self.remainder = []
# create voltage channels
for i in xrange(0, 2 * self.config.number_of_ports, 2):
self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i]),
'', DAQmx_Val_Diff,
-config.v_range, config.v_range,
DAQmx_Val_Volts, None)
self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i + 1]),
'', DAQmx_Val_Diff,
-config.dv_range, config.dv_range,
DAQmx_Val_Volts, None)
# configure sampling rate
self.CfgSampClkTiming('',
self.config.sampling_rate,
DAQmx_Val_Rising,
DAQmx_Val_ContSamps,
self.config.sampling_rate)
# register callbacks
self.AutoRegisterEveryNSamplesEvent(DAQmx_Val_Acquired_Into_Buffer, self.config.sampling_rate // 2, 0)
self.AutoRegisterDoneEvent(0)
def EveryNCallback(self):
samples_buffer = numpy.zeros((self.sample_buffer_size,), dtype=numpy.float64)
self.ReadAnalogF64(DAQmx_Val_Auto, 0.0, DAQmx_Val_GroupByScanNumber, samples_buffer,
self.sample_buffer_size, byref(self.samples_read), None)
self.consumer.write((samples_buffer, self.samples_read.value))
def DoneCallback(self, status): # pylint: disable=W0613,R0201
return 0 # The function should return an integer
class AsyncWriter(threading.Thread):
def __init__(self, wait_period=1):
super(AsyncWriter, self).__init__()
self.daemon = True
self.wait_period = wait_period
self.running = threading.Event()
self._stop_signal = threading.Event()
self._queue = Queue()
def write(self, stuff):
if self._stop_signal.is_set():
raise IOError('Attempting to writer to {} after it has been closed.'.format(self.__class__.__name__))
self._queue.put(stuff)
def do_write(self, stuff):
raise NotImplementedError()
def run(self):
self.running.set()
while True:
if self._stop_signal.is_set() and self._queue.empty():
break
try:
self.do_write(self._queue.get(block=True, timeout=self.wait_period))
except Empty:
pass # carry on
self.running.clear()
def stop(self):
self._stop_signal.set()
def wait(self):
while self.running.is_set():
time.sleep(self.wait_period)
class PortWriter(object):
def __init__(self, path):
self.path = path
self.fh = open(path, 'w', 0)
self.writer = csv.writer(self.fh)
self.writer.writerow(['power', 'voltage'])
def write(self, row):
self.writer.writerow(row)
def close(self):
self.fh.close()
def __del__(self):
self.close()
class SamplePorcessorError(Exception):
pass
class SampleProcessor(AsyncWriter):
def __init__(self, resistor_values, output_directory, labels):
super(SampleProcessor, self).__init__()
self.resistor_values = resistor_values
self.output_directory = output_directory
self.labels = labels
self.number_of_ports = len(resistor_values)
if len(self.labels) != self.number_of_ports:
message = 'Number of labels ({}) does not match number of ports ({}).'
raise SamplePorcessorError(message.format(len(self.labels), self.number_of_ports))
self.port_writers = []
def do_write(self, sample_tuple):
samples, number_of_samples = sample_tuple
for i in xrange(0, number_of_samples * self.number_of_ports * 2, self.number_of_ports * 2):
for j in xrange(self.number_of_ports):
V = float(samples[i + 2 * j])
DV = float(samples[i + 2 * j + 1])
P = V * (DV / self.resistor_values[j])
self.port_writers[j].write([P, V])
def start(self):
for label in self.labels:
port_file = self.get_port_file_path(label)
writer = PortWriter(port_file)
self.port_writers.append(writer)
super(SampleProcessor, self).start()
def stop(self):
super(SampleProcessor, self).stop()
self.wait()
for writer in self.port_writers:
writer.close()
def get_port_file_path(self, port_id):
if port_id in self.labels:
return os.path.join(self.output_directory, port_id + '.csv')
else:
raise SamplePorcessorError('Invalid port ID: {}'.format(port_id))
def __del__(self):
self.stop()
class DaqRunner(object):
@property
def number_of_ports(self):
return self.config.number_of_ports
def __init__(self, config, output_directory):
self.config = config
self.processor = SampleProcessor(config.resistor_values, output_directory, config.labels)
self.task = ReadSamplesTask(config, self.processor)
self.is_running = False
def start(self):
log.debug('Starting sample processor.')
self.processor.start()
log.debug('Starting DAQ Task.')
self.task.StartTask()
self.is_running = True
log.debug('Runner started.')
def stop(self):
self.is_running = False
log.debug('Stopping DAQ Task.')
self.task.StopTask()
log.debug('Stopping sample processor.')
self.processor.stop()
log.debug('Runner stopped.')
def get_port_file_path(self, port_id):
return self.processor.get_port_file_path(port_id)
if __name__ == '__main__':
from collections import namedtuple
DeviceConfig = namedtuple('DeviceConfig', ['device_id', 'channel_map', 'resistor_values',
'v_range', 'dv_range', 'sampling_rate',
'number_of_ports', 'labels'])
channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23)
resistor_values = [0.005]
labels = ['PORT_0']
dev_config = DeviceConfig('Dev1', channel_map, resistor_values, 2.5, 0.2, 10000, len(resistor_values), labels)
if not len(sys.argv) == 3:
print 'Usage: {} OUTDIR DURATION'.format(os.path.basename(__file__))
sys.exit(1)
output_directory = sys.argv[1]
duration = float(sys.argv[2])
print "Avialable devices:", list_availabe_devices()
runner = DaqRunner(dev_config, output_directory)
runner.start()
time.sleep(duration)
runner.stop()

View File

@@ -0,0 +1,53 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from twisted.python import log
__all__ = ['debug', 'info', 'warning', 'error', 'critical', 'start_logging']
debug = lambda x: log.msg(x, logLevel=logging.DEBUG)
info = lambda x: log.msg(x, logLevel=logging.INFO)
warning = lambda x: log.msg(x, logLevel=logging.WARNING)
error = lambda x: log.msg(x, logLevel=logging.ERROR)
critical = lambda x: log.msg(x, logLevel=logging.CRITICAL)
class CustomLoggingObserver(log.PythonLoggingObserver):
def emit(self, eventDict):
if 'logLevel' in eventDict:
level = eventDict['logLevel']
elif eventDict['isError']:
level = logging.ERROR
else:
# All of that just just to override this one line from
# default INFO level...
level = logging.DEBUG
text = log.textFromEventDict(eventDict)
if text is None:
return
self.logger.log(level, text)
logObserver = CustomLoggingObserver()
logObserver.start()
def start_logging(level, fmt='%(asctime)s %(levelname)-8s: %(message)s'):
logging.basicConfig(level=getattr(logging, level), format=fmt)

View File

@@ -0,0 +1,480 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101,W0613
from __future__ import division
import os
import sys
import socket
import argparse
import shutil
import time
from datetime import datetime
from zope.interface import implements
from twisted.protocols.basic import LineReceiver
from twisted.internet.protocol import Factory, Protocol
from twisted.internet import reactor, interfaces
from twisted.internet.error import ConnectionLost, ConnectionDone
if __name__ == "__main__": # for debugging
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from daqpower import log
from daqpower.config import DeviceConfiguration
from daqpower.common import DaqServerRequest, DaqServerResponse, Status
try:
from daqpower.daq import DaqRunner, list_available_devices
except ImportError:
# May be using debug mode.
DaqRunner = None
list_available_devices = lambda : ['Dev1']
class ProtocolError(Exception):
pass
class DummyDaqRunner(object):
"""Dummy stub used when running in debug mode."""
num_rows = 200
@property
def number_of_ports(self):
return self.config.number_of_ports
def __init__(self, config, output_directory):
log.info('Creating runner with {} {}'.format(config, output_directory))
self.config = config
self.output_directory = output_directory
self.is_running = False
def start(self):
import csv, random
log.info('runner started')
for i in xrange(self.config.number_of_ports):
rows = [['power', 'voltage']] + [[random.gauss(1.0, 1.0), random.gauss(1.0, 0.1)]
for j in xrange(self.num_rows)]
with open(self.get_port_file_path(self.config.labels[i]), 'wb') as wfh:
writer = csv.writer(wfh)
writer.writerows(rows)
self.is_running = True
def stop(self):
self.is_running = False
log.info('runner stopped')
def get_port_file_path(self, port_id):
if port_id in self.config.labels:
return os.path.join(self.output_directory, '{}.csv'.format(port_id))
else:
raise Exception('Invalid port id: {}'.format(port_id))
class DaqServer(object):
def __init__(self, base_output_directory):
self.base_output_directory = os.path.abspath(base_output_directory)
if os.path.isdir(self.base_output_directory):
log.info('Using output directory: {}'.format(self.base_output_directory))
else:
log.info('Creating new output directory: {}'.format(self.base_output_directory))
os.makedirs(self.base_output_directory)
self.runner = None
self.output_directory = None
self.labels = None
def configure(self, config_string):
message = None
if self.runner:
message = 'Configuring a new session before previous session has been terminated.'
log.warning(message)
if self.runner.is_running:
self.runner.stop()
config = DeviceConfiguration.deserialize(config_string)
config.validate()
self.output_directory = self._create_output_directory()
self.labels = config.labels
log.info('Writing port files to {}'.format(self.output_directory))
self.runner = DaqRunner(config, self.output_directory)
return message
def start(self):
if self.runner:
if not self.runner.is_running:
self.runner.start()
else:
message = 'Calling start() before stop() has been called. Data up to this point will be lost.'
log.warning(message)
self.runner.stop()
self.runner.start()
return message
else:
raise ProtocolError('Start called before a session has been configured.')
def stop(self):
if self.runner:
if self.runner.is_running:
self.runner.stop()
else:
message = 'Attempting to stop() before start() was invoked.'
log.warning(message)
self.runner.stop()
return message
else:
raise ProtocolError('Stop called before a session has been configured.')
def list_devices(self):
return list_available_devices()
def list_ports(self):
return self.labels
def list_port_files(self):
if not self.runner:
raise ProtocolError('Attempting to list port files before session has been configured.')
ports_with_files = []
for port_id in self.labels:
path = self.get_port_file_path(port_id)
if os.path.isfile(path):
ports_with_files.append(port_id)
return ports_with_files
def get_port_file_path(self, port_id):
if not self.runner:
raise ProtocolError('Attepting to get port file path before session has been configured.')
return self.runner.get_port_file_path(port_id)
def terminate(self):
message = None
if self.runner:
if self.runner.is_running:
message = 'Terminating session before runner has been stopped.'
log.warning(message)
self.runner.stop()
self.runner = None
if self.output_directory and os.path.isdir(self.output_directory):
shutil.rmtree(self.output_directory)
self.output_directory = None
log.info('Session terminated.')
else: # Runner has not been created.
message = 'Attempting to close session before it has been configured.'
log.warning(message)
return message
def _create_output_directory(self):
basename = datetime.now().strftime('%Y-%m-%d_%H%M%S%f')
dirname = os.path.join(self.base_output_directory, basename)
os.makedirs(dirname)
return dirname
def __del__(self):
if self.runner:
self.runner.stop()
def __str__(self):
return '({})'.format(self.base_output_directory)
__repr__ = __str__
class DaqControlProtocol(LineReceiver): # pylint: disable=W0223
def __init__(self, daq_server):
self.daq_server = daq_server
self.factory = None
def lineReceived(self, line):
line = line.strip()
log.info('Received: {}'.format(line))
try:
request = DaqServerRequest.deserialize(line)
except Exception, e: # pylint: disable=W0703
self.sendError('Received bad request ({}: {})'.format(e.__class__.__name__, e.message))
else:
self.processRequest(request)
def processRequest(self, request):
try:
if request.command == 'configure':
self.configure(request)
elif request.command == 'start':
self.start(request)
elif request.command == 'stop':
self.stop(request)
elif request.command == 'list_devices':
self.list_devices(request)
elif request.command == 'list_ports':
self.list_ports(request)
elif request.command == 'list_port_files':
self.list_port_files(request)
elif request.command == 'pull':
self.pull_port_data(request)
elif request.command == 'close':
self.terminate(request)
else:
self.sendError('Received unknown command: {}'.format(request.command))
except Exception, e: # pylint: disable=W0703
self.sendError('{}: {}'.format(e.__class__.__name__, e.message))
def configure(self, request):
if 'config' in request.params:
result = self.daq_server.configure(request.params['config'])
if not result:
self.sendResponse(Status.OK)
else:
self.sendResponse(Status.OKISH, message=result)
else:
self.sendError('Invalid config; config string not provided.')
def start(self, request):
result = self.daq_server.start()
if not result:
self.sendResponse(Status.OK)
else:
self.sendResponse(Status.OKISH, message=result)
def stop(self, request):
result = self.daq_server.stop()
if not result:
self.sendResponse(Status.OK)
else:
self.sendResponse(Status.OKISH, message=result)
def pull_port_data(self, request):
if 'port_id' in request.params:
port_id = request.params['port_id']
port_file = self.daq_server.get_port_file_path(port_id)
if os.path.isfile(port_file):
port = self._initiate_file_transfer(port_file)
self.sendResponse(Status.OK, data={'port_number': port})
else:
self.sendError('File for port {} does not exist.'.format(port_id))
else:
self.sendError('Invalid pull request; port id not provided.')
def list_devices(self, request):
devices = self.daq_server.list_devices()
self.sendResponse(Status.OK, data={'devices': devices})
def list_ports(self, request):
port_labels = self.daq_server.list_ports()
self.sendResponse(Status.OK, data={'ports': port_labels})
def list_port_files(self, request):
port_labels = self.daq_server.list_port_files()
self.sendResponse(Status.OK, data={'ports': port_labels})
def terminate(self, request):
status = Status.OK
message = ''
if self.factory.transfer_sessions:
message = 'Terminating with file tranfer sessions in progress. '
log.warning(message)
for session in self.factory.transfer_sessions:
self.factory.transferComplete(session)
message += self.daq_server.terminate() or ''
if message:
status = Status.OKISH
self.sendResponse(status, message)
def sendError(self, message):
log.error(message)
self.sendResponse(Status.ERROR, message)
def sendResponse(self, status, message=None, data=None):
response = DaqServerResponse(status, message=message, data=data)
self.sendLine(response.serialize())
def sendLine(self, line):
log.info('Responding: {}'.format(line))
LineReceiver.sendLine(self, line.replace('\r\n',''))
def _initiate_file_transfer(self, filepath):
sender_factory = FileSenderFactory(filepath, self.factory)
connector = reactor.listenTCP(0, sender_factory)
self.factory.transferInitiated(sender_factory, connector)
return connector.getHost().port
class DaqFactory(Factory):
protocol = DaqControlProtocol
check_alive_period = 5 * 60
max_transfer_lifetime = 30 * 60
def __init__(self, server):
self.server = server
self.transfer_sessions = {}
def buildProtocol(self, addr):
proto = DaqControlProtocol(self.server)
proto.factory = self
reactor.callLater(self.check_alive_period, self.pulse)
return proto
def clientConnectionLost(self, connector, reason):
log.msg('client connection lost: {}.'.format(reason))
if not isinstance(reason, ConnectionLost):
log.msg('ERROR: Client terminated connection mid-transfer.')
for session in self.transfer_sessions:
self.transferComplete(session)
def transferInitiated(self, session, connector):
self.transfer_sessions[session] = (time.time(), connector)
def transferComplete(self, session, reason='OK'):
if reason != 'OK':
log.error(reason)
self.transfer_sessions[session][1].stopListening()
del self.transfer_sessions[session]
def pulse(self):
"""Close down any file tranfer sessions that have been open for too long."""
current_time = time.time()
for session in self.transfer_sessions:
start_time, conn = self.transfer_sessions[session]
if (current_time - start_time) > self.max_transfer_lifetime:
message = '{} session on port {} timed out'
self.transferComplete(session, message.format(session, conn.getHost().port))
if self.transfer_sessions:
reactor.callLater(self.check_alive_period, self.pulse)
def __str__(self):
return '<DAQ {}>'.format(self.server)
__repr__ = __str__
class FileReader(object):
implements(interfaces.IPushProducer)
def __init__(self, filepath):
self.fh = open(filepath)
self.proto = None
self.done = False
self._paused = True
def setProtocol(self, proto):
self.proto = proto
def resumeProducing(self):
if not self.proto:
raise ProtocolError('resumeProducing called with no protocol set.')
self._paused = False
try:
while not self._paused:
line = self.fh.next().rstrip('\n') + '\r\n'
self.proto.transport.write(line)
except StopIteration:
log.debug('Sent everything.')
self.stopProducing()
def pauseProducing(self):
self._paused = True
def stopProducing(self):
self.done = True
self.fh.close()
self.proto.transport.unregisterProducer()
self.proto.transport.loseConnection()
class FileSenderProtocol(Protocol):
def __init__(self, reader):
self.reader = reader
self.factory = None
def connectionMade(self):
self.transport.registerProducer(self.reader, True)
self.reader.resumeProducing()
def connectionLost(self, reason=ConnectionDone):
if self.reader.done:
self.factory.transferComplete()
else:
self.reader.pauseProducing()
self.transport.unregisterProducer()
class FileSenderFactory(Factory):
@property
def done(self):
if self.reader:
return self.reader.done
else:
return None
def __init__(self, path, owner):
self.path = os.path.abspath(path)
self.reader = None
self.owner = owner
def buildProtocol(self, addr):
if not self.reader:
self.reader = FileReader(self.path)
proto = FileSenderProtocol(self.reader)
proto.factory = self
self.reader.setProtocol(proto)
return proto
def transferComplete(self):
self.owner.transferComplete(self)
def __hash__(self):
return hash(self.path)
def __str__(self):
return '<FileSender {}>'.format(self.path)
__repr__ = __str__
def run_server():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', help='Working directory', metavar='DIR', default='.')
parser.add_argument('-p', '--port', help='port the server will listen on.',
metavar='PORT', default=45677, type=int)
parser.add_argument('--debug', help='Run in debug mode (no DAQ connected).',
action='store_true', default=False)
parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False)
args = parser.parse_args()
if args.debug:
global DaqRunner # pylint: disable=W0603
DaqRunner = DummyDaqRunner
else:
if not DaqRunner:
raise ImportError('DaqRunner')
if args.verbose or args.debug:
log.start_logging('DEBUG')
else:
log.start_logging('INFO')
server = DaqServer(args.directory)
reactor.listenTCP(args.port, DaqFactory(server)).getHost()
hostname = socket.gethostbyname(socket.gethostname())
log.info('Listening on {}:{}'.format(hostname, args.port))
reactor.run()
if __name__ == "__main__":
run_server()

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env python
from daqpower.server import run_server
run_server()

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env python
from daqpower.client import run_send_command
run_send_command()

52
wlauto/external/daq_server/src/setup.py vendored Normal file
View File

@@ -0,0 +1,52 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from distutils.core import setup
import daqpower
warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
params = dict(
name='daqpower',
version=daqpower.__version__,
packages=[
'daqpower',
],
scripts=[
'scripts/run-daq-server',
'scripts/send-daq-command',
],
url='N/A',
maintainer='workload-automation',
maintainer_email='workload-automation@arm.com',
install_requires=[
'twisted',
'PyDAQmx',
],
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: Other/Proprietary License',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
],
)
setup(**params)

12
wlauto/external/louie/LICENSE vendored Normal file
View File

@@ -0,0 +1,12 @@
This directory contains Louie package that has been modified by ARM Ltd.
Original Louie package is licensed under BSD license. ARM Ltd. changes are
licensed under Apache version 2 license.
Original Louie package may be found here:
https://pypi.python.org/pypi/Louie/1.1
The text of the BSD License may be viewed here:
http://opensource.org/licenses/bsd-license.php

46
wlauto/external/louie/__init__.py vendored Normal file
View File

@@ -0,0 +1,46 @@
__all__ = [
'dispatcher',
'error',
'plugin',
'robustapply',
'saferef',
'sender',
'signal',
'version',
'connect',
'disconnect',
'get_all_receivers',
'reset',
'send',
'send_exact',
'send_minimal',
'send_robust',
'install_plugin',
'remove_plugin',
'Plugin',
'QtWidgetPlugin',
'TwistedDispatchPlugin',
'Anonymous',
'Any',
'All',
'Signal',
]
import louie.dispatcher, louie.error, louie.plugin, louie.robustapply, \
louie.saferef, louie.sender, louie.signal, louie.version
from louie.dispatcher import \
connect, disconnect, get_all_receivers, reset, \
send, send_exact, send_minimal, send_robust
from louie.plugin import \
install_plugin, remove_plugin, Plugin, \
QtWidgetPlugin, TwistedDispatchPlugin
from louie.sender import Anonymous, Any
from louie.signal import All, Signal

591
wlauto/external/louie/dispatcher.py vendored Normal file
View File

@@ -0,0 +1,591 @@
"""Multiple-producer-multiple-consumer signal-dispatching.
``dispatcher`` is the core of Louie, providing the primary API and the
core logic for the system.
Internal attributes:
- ``WEAKREF_TYPES``: Tuple of types/classes which represent weak
references to receivers, and thus must be dereferenced on retrieval
to retrieve the callable object
- ``connections``::
{ senderkey (id) : { signal : [receivers...] } }
- ``senders``: Used for cleaning up sender references on sender
deletion::
{ senderkey (id) : weakref(sender) }
- ``senders_back``: Used for cleaning up receiver references on receiver
deletion::
{ receiverkey (id) : [senderkey (id)...] }
"""
import os
import weakref
try:
set
except NameError:
from sets import Set as set, ImmutableSet as frozenset
from louie import error
from louie import robustapply
from louie import saferef
from louie.sender import Any, Anonymous
from louie.signal import All
from prioritylist import PriorityList
# Support for statistics.
if __debug__:
connects = 0
disconnects = 0
sends = 0
def print_stats():
print ('\n'
'Louie connects: %i\n'
'Louie disconnects: %i\n'
'Louie sends: %i\n'
'\n') % (connects, disconnects, sends)
if 'PYDISPATCH_STATS' in os.environ:
import atexit
atexit.register(print_stats)
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
connections = {}
senders = {}
senders_back = {}
plugins = []
def reset():
"""Reset the state of Louie.
Useful during unit testing. Should be avoided otherwise.
"""
global connections, senders, senders_back, plugins
connections = {}
senders = {}
senders_back = {}
plugins = []
def connect(receiver, signal=All, sender=Any, weak=True, priority=0):
"""Connect ``receiver`` to ``sender`` for ``signal``.
- ``receiver``: A callable Python object which is to receive
messages/signals/events. Receivers must be hashable objects.
If weak is ``True``, then receiver must be weak-referencable (more
precisely ``saferef.safe_ref()`` must be able to create a
reference to the receiver).
Receivers are fairly flexible in their specification, as the
machinery in the ``robustapply`` module takes care of most of the
details regarding figuring out appropriate subsets of the sent
arguments to apply to a given receiver.
Note: If ``receiver`` is itself a weak reference (a callable), it
will be de-referenced by the system's machinery, so *generally*
weak references are not suitable as receivers, though some use
might be found for the facility whereby a higher-level library
passes in pre-weakrefed receiver references.
- ``signal``: The signal to which the receiver should respond.
If ``All``, receiver will receive all signals from the indicated
sender (which might also be ``All``, but is not necessarily
``All``).
Otherwise must be a hashable Python object other than ``None``
(``DispatcherError`` raised on ``None``).
- ``sender``: The sender to which the receiver should respond.
If ``Any``, receiver will receive the indicated signals from any
sender.
If ``Anonymous``, receiver will only receive indicated signals
from ``send``/``send_exact`` which do not specify a sender, or
specify ``Anonymous`` explicitly as the sender.
Otherwise can be any python object.
- ``weak``: Whether to use weak references to the receiver.
By default, the module will attempt to use weak references to
the receiver objects. If this parameter is ``False``, then strong
references will be used.
- ``priority``: specifies the priority by which a reciever should
get notified
Returns ``None``, may raise ``DispatcherTypeError``.
"""
if signal is None:
raise error.DispatcherTypeError(
'Signal cannot be None (receiver=%r sender=%r)'
% (receiver, sender))
if weak:
receiver = saferef.safe_ref(receiver, on_delete=_remove_receiver)
senderkey = id(sender)
if connections.has_key(senderkey):
signals = connections[senderkey]
else:
connections[senderkey] = signals = {}
# Keep track of senders for cleanup.
# Is Anonymous something we want to clean up?
if sender not in (None, Anonymous, Any):
def remove(object, senderkey=senderkey):
_remove_sender(senderkey=senderkey)
# Skip objects that can not be weakly referenced, which means
# they won't be automatically cleaned up, but that's too bad.
try:
weak_sender = weakref.ref(sender, remove)
senders[senderkey] = weak_sender
except:
pass
receiver_id = id(receiver)
# get current set, remove any current references to
# this receiver in the set, including back-references
if signals.has_key(signal):
receivers = signals[signal]
_remove_old_back_refs(senderkey, signal, receiver, receivers)
else:
receivers = signals[signal] = PriorityList()
try:
current = senders_back.get(receiver_id)
if current is None:
senders_back[receiver_id] = current = []
if senderkey not in current:
current.append(senderkey)
except:
pass
receivers.add(receiver, priority)
# Update stats.
if __debug__:
global connects
connects += 1
def disconnect(receiver, signal=All, sender=Any, weak=True):
"""Disconnect ``receiver`` from ``sender`` for ``signal``.
- ``receiver``: The registered receiver to disconnect.
- ``signal``: The registered signal to disconnect.
- ``sender``: The registered sender to disconnect.
- ``weak``: The weakref state to disconnect.
``disconnect`` reverses the process of ``connect``, the semantics for
the individual elements are logically equivalent to a tuple of
``(receiver, signal, sender, weak)`` used as a key to be deleted
from the internal routing tables. (The actual process is slightly
more complex but the semantics are basically the same).
Note: Using ``disconnect`` is not required to cleanup routing when
an object is deleted; the framework will remove routes for deleted
objects automatically. It's only necessary to disconnect if you
want to stop routing to a live object.
Returns ``None``, may raise ``DispatcherTypeError`` or
``DispatcherKeyError``.
"""
if signal is None:
raise error.DispatcherTypeError(
'Signal cannot be None (receiver=%r sender=%r)'
% (receiver, sender))
if weak:
receiver = saferef.safe_ref(receiver)
senderkey = id(sender)
try:
signals = connections[senderkey]
receivers = signals[signal]
except KeyError:
raise error.DispatcherKeyError(
'No receivers found for signal %r from sender %r'
% (signal, sender)
)
try:
# also removes from receivers
_remove_old_back_refs(senderkey, signal, receiver, receivers)
except ValueError:
raise error.DispatcherKeyError(
'No connection to receiver %s for signal %s from sender %s'
% (receiver, signal, sender)
)
_cleanup_connections(senderkey, signal)
# Update stats.
if __debug__:
global disconnects
disconnects += 1
def get_receivers(sender=Any, signal=All):
"""Get list of receivers from global tables.
This function allows you to retrieve the raw list of receivers
from the connections table for the given sender and signal pair.
Note: There is no guarantee that this is the actual list stored in
the connections table, so the value should be treated as a simple
iterable/truth value rather than, for instance a list to which you
might append new records.
Normally you would use ``live_receivers(get_receivers(...))`` to
retrieve the actual receiver objects as an iterable object.
"""
try:
return connections[id(sender)][signal]
except KeyError:
return []
def live_receivers(receivers):
"""Filter sequence of receivers to get resolved, live receivers.
This is a generator which will iterate over the passed sequence,
checking for weak references and resolving them, then returning
all live receivers.
"""
for receiver in receivers:
if isinstance(receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
# Check installed plugins to make sure this receiver is
# live.
live = True
for plugin in plugins:
if not plugin.is_live(receiver):
live = False
break
if live:
yield receiver
def get_all_receivers(sender=Any, signal=All):
"""Get list of all receivers from global tables.
This gets all receivers which should receive the given signal from
sender, each receiver should be produced only once by the
resulting generator.
"""
yielded = set()
for receivers in (
# Get receivers that receive *this* signal from *this* sender.
get_receivers(sender, signal),
# Add receivers that receive *all* signals from *this* sender.
get_receivers(sender, All),
# Add receivers that receive *this* signal from *any* sender.
get_receivers(Any, signal),
# Add receivers that receive *all* signals from *any* sender.
get_receivers(Any, All),
):
for receiver in receivers:
if receiver: # filter out dead instance-method weakrefs
try:
if not receiver in yielded:
yielded.add(receiver)
yield receiver
except TypeError:
# dead weakrefs raise TypeError on hash...
pass
def send(signal=All, sender=Anonymous, *arguments, **named):
"""Send ``signal`` from ``sender`` to all connected receivers.
- ``signal``: (Hashable) signal value; see ``connect`` for details.
- ``sender``: The sender of the signal.
If ``Any``, only receivers registered for ``Any`` will receive the
message.
If ``Anonymous``, only receivers registered to receive messages
from ``Anonymous`` or ``Any`` will receive the message.
Otherwise can be any Python object (normally one registered with
a connect if you actually want something to occur).
- ``arguments``: Positional arguments which will be passed to *all*
receivers. Note that this may raise ``TypeError`` if the receivers
do not allow the particular arguments. Note also that arguments
are applied before named arguments, so they should be used with
care.
- ``named``: Named arguments which will be filtered according to the
parameters of the receivers to only provide those acceptable to
the receiver.
Return a list of tuple pairs ``[(receiver, response), ...]``
If any receiver raises an error, the error propagates back through
send, terminating the dispatch loop, so it is quite possible to
not have all receivers called if a raises an error.
"""
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in live_receivers(get_all_receivers(sender, signal)):
# Wrap receiver using installed plugins.
original = receiver
for plugin in plugins:
receiver = plugin.wrap_receiver(receiver)
response = robustapply.robust_apply(
receiver, original,
signal=signal,
sender=sender,
*arguments,
**named
)
responses.append((receiver, response))
# Update stats.
if __debug__:
global sends
sends += 1
return responses
def send_minimal(signal=All, sender=Anonymous, *arguments, **named):
"""Like ``send``, but does not attach ``signal`` and ``sender``
arguments to the call to the receiver."""
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in live_receivers(get_all_receivers(sender, signal)):
# Wrap receiver using installed plugins.
original = receiver
for plugin in plugins:
receiver = plugin.wrap_receiver(receiver)
response = robustapply.robust_apply(
receiver, original,
*arguments,
**named
)
responses.append((receiver, response))
# Update stats.
if __debug__:
global sends
sends += 1
return responses
def send_exact(signal=All, sender=Anonymous, *arguments, **named):
"""Send ``signal`` only to receivers registered for exact message.
``send_exact`` allows for avoiding ``Any``/``Anonymous`` registered
handlers, sending only to those receivers explicitly registered
for a particular signal on a particular sender.
"""
responses = []
for receiver in live_receivers(get_receivers(sender, signal)):
# Wrap receiver using installed plugins.
original = receiver
for plugin in plugins:
receiver = plugin.wrap_receiver(receiver)
response = robustapply.robust_apply(
receiver, original,
signal=signal,
sender=sender,
*arguments,
**named
)
responses.append((receiver, response))
return responses
def send_robust(signal=All, sender=Anonymous, *arguments, **named):
"""Send ``signal`` from ``sender`` to all connected receivers catching
errors
- ``signal``: (Hashable) signal value, see connect for details
- ``sender``: The sender of the signal.
If ``Any``, only receivers registered for ``Any`` will receive the
message.
If ``Anonymous``, only receivers registered to receive messages
from ``Anonymous`` or ``Any`` will receive the message.
Otherwise can be any Python object (normally one registered with
a connect if you actually want something to occur).
- ``arguments``: Positional arguments which will be passed to *all*
receivers. Note that this may raise ``TypeError`` if the receivers
do not allow the particular arguments. Note also that arguments
are applied before named arguments, so they should be used with
care.
- ``named``: Named arguments which will be filtered according to the
parameters of the receivers to only provide those acceptable to
the receiver.
Return a list of tuple pairs ``[(receiver, response), ... ]``
If any receiver raises an error (specifically, any subclass of
``Exception``), the error instance is returned as the result for
that receiver.
"""
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in live_receivers(get_all_receivers(sender, signal)):
original = receiver
for plugin in plugins:
receiver = plugin.wrap_receiver(receiver)
try:
response = robustapply.robust_apply(
receiver, original,
signal=signal,
sender=sender,
*arguments,
**named
)
except Exception, err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _remove_receiver(receiver):
"""Remove ``receiver`` from connections."""
if not senders_back:
# During module cleanup the mapping will be replaced with None.
return False
backKey = id(receiver)
for senderkey in senders_back.get(backKey, ()):
try:
signals = connections[senderkey].keys()
except KeyError:
pass
else:
for signal in signals:
try:
receivers = connections[senderkey][signal]
except KeyError:
pass
else:
try:
receivers.remove(receiver)
except Exception:
pass
_cleanup_connections(senderkey, signal)
try:
del senders_back[backKey]
except KeyError:
pass
def _cleanup_connections(senderkey, signal):
"""Delete empty signals for ``senderkey``. Delete ``senderkey`` if
empty."""
try:
receivers = connections[senderkey][signal]
except:
pass
else:
if not receivers:
# No more connected receivers. Therefore, remove the signal.
try:
signals = connections[senderkey]
except KeyError:
pass
else:
del signals[signal]
if not signals:
# No more signal connections. Therefore, remove the sender.
_remove_sender(senderkey)
def _remove_sender(senderkey):
"""Remove ``senderkey`` from connections."""
_remove_back_refs(senderkey)
try:
del connections[senderkey]
except KeyError:
pass
# Senderkey will only be in senders dictionary if sender
# could be weakly referenced.
try:
del senders[senderkey]
except:
pass
def _remove_back_refs(senderkey):
"""Remove all back-references to this ``senderkey``."""
try:
signals = connections[senderkey]
except KeyError:
signals = None
else:
for signal, receivers in signals.iteritems():
for receiver in receivers:
_kill_back_ref(receiver, senderkey)
def _remove_old_back_refs(senderkey, signal, receiver, receivers):
"""Kill old ``senders_back`` references from ``receiver``.
This guards against multiple registration of the same receiver for
a given signal and sender leaking memory as old back reference
records build up.
Also removes old receiver instance from receivers.
"""
try:
index = receivers.index(receiver)
# need to scan back references here and remove senderkey
except ValueError:
return False
else:
old_receiver = receivers[index]
del receivers[index]
found = 0
signals = connections.get(signal)
if signals is not None:
for sig, recs in connections.get(signal, {}).iteritems():
if sig != signal:
for rec in recs:
if rec is old_receiver:
found = 1
break
if not found:
_kill_back_ref(old_receiver, senderkey)
return True
return False
def _kill_back_ref(receiver, senderkey):
"""Do actual removal of back reference from ``receiver`` to
``senderkey``."""
receiverkey = id(receiver)
senders = senders_back.get(receiverkey, ())
while senderkey in senders:
try:
senders.remove(senderkey)
except:
break
if not senders:
try:
del senders_back[receiverkey]
except KeyError:
pass
return True

22
wlauto/external/louie/error.py vendored Normal file
View File

@@ -0,0 +1,22 @@
"""Error types for Louie."""
class LouieError(Exception):
"""Base class for all Louie errors"""
class DispatcherError(LouieError):
"""Base class for all Dispatcher errors"""
class DispatcherKeyError(KeyError, DispatcherError):
"""Error raised when unknown (sender, signal) specified"""
class DispatcherTypeError(TypeError, DispatcherError):
"""Error raised when inappropriate signal-type specified (None)"""
class PluginTypeError(TypeError, LouieError):
"""Error raise when trying to install more than one plugin of a
certain type."""

108
wlauto/external/louie/plugin.py vendored Normal file
View File

@@ -0,0 +1,108 @@
"""Common plugins for Louie."""
from louie import dispatcher
from louie import error
def install_plugin(plugin):
cls = plugin.__class__
for p in dispatcher.plugins:
if p.__class__ is cls:
raise error.PluginTypeError(
'Plugin of type %r already installed.' % cls)
dispatcher.plugins.append(plugin)
def remove_plugin(plugin):
dispatcher.plugins.remove(plugin)
class Plugin(object):
"""Base class for Louie plugins.
Plugins are used to extend or alter the behavior of Louie
in a uniform way without having to modify the Louie code
itself.
"""
def is_live(self, receiver):
"""Return True if the receiver is still live.
Only called for receivers who have already been determined to
be live by default Louie semantics.
"""
return True
def wrap_receiver(self, receiver):
"""Return a callable that passes arguments to the receiver.
Useful when you want to change the behavior of all receivers.
"""
return receiver
class QtWidgetPlugin(Plugin):
"""A Plugin for Louie that knows how to handle Qt widgets
when using PyQt built with SIP 4 or higher.
Weak references are not useful when dealing with QWidget
instances, because even after a QWidget is closed and destroyed,
only the C++ object is destroyed. The Python 'shell' object
remains, but raises a RuntimeError when an attempt is made to call
an underlying QWidget method.
This plugin alleviates this behavior, and if a QWidget instance is
found that is just an empty shell, it prevents Louie from
dispatching to any methods on those objects.
"""
def __init__(self):
try:
import qt
except ImportError:
self.is_live = self._is_live_no_qt
else:
self.qt = qt
def is_live(self, receiver):
"""If receiver is a method on a QWidget, only return True if
it hasn't been destroyed."""
if (hasattr(receiver, 'im_self') and
isinstance(receiver.im_self, self.qt.QWidget)
):
try:
receiver.im_self.x()
except RuntimeError:
return False
return True
def _is_live_no_qt(self, receiver):
return True
class TwistedDispatchPlugin(Plugin):
"""Plugin for Louie that wraps all receivers in callables
that return Twisted Deferred objects.
When the wrapped receiver is called, it adds a call to the actual
receiver to the reactor event loop, and returns a Deferred that is
called back with the result.
"""
def __init__(self):
# Don't import reactor ourselves, but make access to it
# easier.
from twisted import internet
from twisted.internet.defer import Deferred
self._internet = internet
self._Deferred = Deferred
def wrap_receiver(self, receiver):
def wrapper(*args, **kw):
d = self._Deferred()
def called(dummy):
return receiver(*args, **kw)
d.addCallback(called)
self._internet.reactor.callLater(0, d.callback, None)
return d
return wrapper

128
wlauto/external/louie/prioritylist.py vendored Normal file
View File

@@ -0,0 +1,128 @@
"""OrderedList class
This class keeps its elements ordered according to their priority.
"""
from collections import defaultdict
import numbers
from bisect import insort
class PriorityList(object):
def __init__(self):
"""
Returns an OrderedReceivers object that externaly behaves
like a list but it maintains the order of its elements
according to their priority.
"""
self.elements = defaultdict(list)
self.is_ordered = True
self.priorities = []
self.size = 0
self._cached_elements = None
def __del__(self):
pass
def __iter__(self):
"""
this method makes PriorityList class iterable
"""
self._order_elements()
for priority in reversed(self.priorities): # highest priority first
for element in self.elements[priority]:
yield element
def __getitem__(self, index):
self._order_elements()
return self._to_list()[index]
def __delitem__(self, index):
self._order_elements()
if isinstance(index, numbers.Integral):
index = int(index)
if index < 0:
index_range = [len(self)+index]
else:
index_range = [index]
elif isinstance(index, slice):
index_range = range(index.start or 0, index.stop, index.step or 1)
else:
raise ValueError('Invalid index {}'.format(index))
current_global_offset = 0
priority_counts = {priority : count for (priority, count) in
zip(self.priorities, [len(self.elements[p]) for p in self.priorities])}
for priority in self.priorities:
if not index_range:
break
priority_offset = 0
while index_range:
del_index = index_range[0]
if priority_counts[priority] + current_global_offset <= del_index:
current_global_offset += priority_counts[priority]
break
within_priority_index = del_index - (current_global_offset + priority_offset)
self._delete(priority, within_priority_index)
priority_offset += 1
index_range.pop(0)
def __len__(self):
return self.size
def add(self, new_element, priority=0, force_ordering=True):
"""
adds a new item in the list.
- ``new_element`` the element to be inserted in the PriorityList
- ``priority`` is the priority of the element which specifies its
order withing the List
- ``force_ordering`` indicates whether elements should be ordered
right now. If set to False, ordering happens on demand (lazy)
"""
self._add_element(new_element, priority)
if priority not in self.priorities:
self._add_priority(priority, force_ordering)
def index(self, element):
return self._to_list().index(element)
def remove(self, element):
index = self.index(element)
self.__delitem__(index)
def _order_elements(self):
if not self.is_ordered:
self.priorities = sorted(self.priorities)
self.is_ordered = True
def _to_list(self):
if self._cached_elements == None:
self._order_elements()
self._cached_elements = []
for priority in self.priorities:
self._cached_elements += self.elements[priority]
return self._cached_elements
def _add_element(self, element, priority):
self.elements[priority].append(element)
self.size += 1
self._cached_elements = None
def _delete(self, priority, priority_index):
del self.elements[priority][priority_index]
self.size -= 1
if len(self.elements[priority]) == 0:
self.priorities.remove(priority)
self._cached_elements = None
def _add_priority(self, priority, force_ordering):
if force_ordering and self.is_ordered:
insort(self.priorities, priority)
elif not force_ordering:
self.priorities.append(priority)
self.is_ordered = False
elif not self.is_ordered:
self.priorities.append(priority)
self._order_elements()
else:
raise AssertionError('Should never get here.')

58
wlauto/external/louie/robustapply.py vendored Normal file
View File

@@ -0,0 +1,58 @@
"""Robust apply mechanism.
Provides a function 'call', which can sort out what arguments a given
callable object can take, and subset the given arguments to match only
those which are acceptable.
"""
def function(receiver):
"""Get function-like callable object for given receiver.
returns (function_or_method, codeObject, fromMethod)
If fromMethod is true, then the callable already has its first
argument bound.
"""
if hasattr(receiver, '__call__'):
# receiver is a class instance; assume it is callable.
# Reassign receiver to the actual method that will be called.
c = receiver.__call__
if hasattr(c, 'im_func') or hasattr(c, 'im_code'):
receiver = c
if hasattr(receiver, 'im_func'):
# receiver is an instance-method.
return receiver, receiver.im_func.func_code, 1
elif not hasattr(receiver, 'func_code'):
raise ValueError(
'unknown reciever type %s %s' % (receiver, type(receiver)))
return receiver, receiver.func_code, 0
def robust_apply(receiver, signature, *arguments, **named):
"""Call receiver with arguments and appropriate subset of named.
``signature`` is the callable used to determine the call signature
of the receiver, in case ``receiver`` is a callable wrapper of the
actual receiver."""
signature, code_object, startIndex = function(signature)
acceptable = code_object.co_varnames[
startIndex + len(arguments):
code_object.co_argcount
]
for name in code_object.co_varnames[
startIndex:startIndex + len(arguments)
]:
if named.has_key(name):
raise TypeError(
'Argument %r specified both positionally '
'and as a keyword for calling %r'
% (name, signature)
)
if not (code_object.co_flags & 8):
# fc does not have a **kwds type parameter, therefore
# remove unacceptable arguments.
for arg in named.keys():
if arg not in acceptable:
del named[arg]
return receiver(*arguments, **named)

179
wlauto/external/louie/saferef.py vendored Normal file
View File

@@ -0,0 +1,179 @@
"""Refactored 'safe reference from dispatcher.py"""
import weakref
import traceback
def safe_ref(target, on_delete=None):
"""Return a *safe* weak reference to a callable target.
- ``target``: The object to be weakly referenced, if it's a bound
method reference, will create a BoundMethodWeakref, otherwise
creates a simple weakref.
- ``on_delete``: If provided, will have a hard reference stored to
the callable to be called after the safe reference goes out of
scope with the reference object, (either a weakref or a
BoundMethodWeakref) as argument.
"""
if hasattr(target, 'im_self'):
if target.im_self is not None:
# Turn a bound method into a BoundMethodWeakref instance.
# Keep track of these instances for lookup by disconnect().
assert hasattr(target, 'im_func'), (
"safe_ref target %r has im_self, but no im_func, "
"don't know how to create reference"
% target
)
reference = BoundMethodWeakref(target=target, on_delete=on_delete)
return reference
if callable(on_delete):
return weakref.ref(target, on_delete)
else:
return weakref.ref(target)
class BoundMethodWeakref(object):
"""'Safe' and reusable weak references to instance methods.
BoundMethodWeakref objects provide a mechanism for referencing a
bound method without requiring that the method object itself
(which is normally a transient object) is kept alive. Instead,
the BoundMethodWeakref object keeps weak references to both the
object and the function which together define the instance method.
Attributes:
- ``key``: The identity key for the reference, calculated by the
class's calculate_key method applied to the target instance method.
- ``deletion_methods``: Sequence of callable objects taking single
argument, a reference to this object which will be called when
*either* the target object or target function is garbage
collected (i.e. when this object becomes invalid). These are
specified as the on_delete parameters of safe_ref calls.
- ``weak_self``: Weak reference to the target object.
- ``weak_func``: Weak reference to the target function.
Class Attributes:
- ``_all_instances``: Class attribute pointing to all live
BoundMethodWeakref objects indexed by the class's
calculate_key(target) method applied to the target objects.
This weak value dictionary is used to short-circuit creation so
that multiple references to the same (object, function) pair
produce the same BoundMethodWeakref instance.
"""
_all_instances = weakref.WeakValueDictionary()
def __new__(cls, target, on_delete=None, *arguments, **named):
"""Create new instance or return current instance.
Basically this method of construction allows us to
short-circuit creation of references to already- referenced
instance methods. The key corresponding to the target is
calculated, and if there is already an existing reference,
that is returned, with its deletion_methods attribute updated.
Otherwise the new instance is created and registered in the
table of already-referenced methods.
"""
key = cls.calculate_key(target)
current = cls._all_instances.get(key)
if current is not None:
current.deletion_methods.append(on_delete)
return current
else:
base = super(BoundMethodWeakref, cls).__new__(cls)
cls._all_instances[key] = base
base.__init__(target, on_delete, *arguments, **named)
return base
def __init__(self, target, on_delete=None):
"""Return a weak-reference-like instance for a bound method.
- ``target``: The instance-method target for the weak reference,
must have im_self and im_func attributes and be
reconstructable via the following, which is true of built-in
instance methods::
target.im_func.__get__( target.im_self )
- ``on_delete``: Optional callback which will be called when
this weak reference ceases to be valid (i.e. either the
object or the function is garbage collected). Should take a
single argument, which will be passed a pointer to this
object.
"""
def remove(weak, self=self):
"""Set self.isDead to True when method or instance is destroyed."""
methods = self.deletion_methods[:]
del self.deletion_methods[:]
try:
del self.__class__._all_instances[self.key]
except KeyError:
pass
for function in methods:
try:
if callable(function):
function(self)
except Exception:
try:
traceback.print_exc()
except AttributeError, e:
print ('Exception during saferef %s '
'cleanup function %s: %s' % (self, function, e))
self.deletion_methods = [on_delete]
self.key = self.calculate_key(target)
self.weak_self = weakref.ref(target.im_self, remove)
self.weak_func = weakref.ref(target.im_func, remove)
self.self_name = str(target.im_self)
self.func_name = str(target.im_func.__name__)
def calculate_key(cls, target):
"""Calculate the reference key for this reference.
Currently this is a two-tuple of the id()'s of the target
object and the target function respectively.
"""
return (id(target.im_self), id(target.im_func))
calculate_key = classmethod(calculate_key)
def __str__(self):
"""Give a friendly representation of the object."""
return "%s(%s.%s)" % (
self.__class__.__name__,
self.self_name,
self.func_name,
)
__repr__ = __str__
def __nonzero__(self):
"""Whether we are still a valid reference."""
return self() is not None
def __cmp__(self, other):
"""Compare with another reference."""
if not isinstance(other, self.__class__):
return cmp(self.__class__, type(other))
return cmp(self.key, other.key)
def __call__(self):
"""Return a strong reference to the bound method.
If the target cannot be retrieved, then will return None,
otherwise returns a bound instance method for our object and
function.
Note: You may call this method any number of times, as it does
not invalidate the reference.
"""
target = self.weak_self()
if target is not None:
function = self.weak_func()
if function is not None:
return function.__get__(target)
return None

39
wlauto/external/louie/sender.py vendored Normal file
View File

@@ -0,0 +1,39 @@
"""Sender classes."""
class _SENDER(type):
"""Base metaclass for sender classes."""
def __str__(cls):
return '<Sender: %s>' % (cls.__name__, )
class Any(object):
"""Used to represent either 'any sender'.
The Any class can be used with connect, disconnect, send, or
sendExact to denote that the sender paramater should react to any
sender, not just a particular sender.
"""
__metaclass__ = _SENDER
class Anonymous(object):
"""Singleton used to signal 'anonymous sender'.
The Anonymous class is used to signal that the sender of a message
is not specified (as distinct from being 'any sender').
Registering callbacks for Anonymous will only receive messages
sent without senders. Sending with anonymous will only send
messages to those receivers registered for Any or Anonymous.
Note: The default sender for connect is Any, while the default
sender for send is Anonymous. This has the effect that if you do
not specify any senders in either function then all messages are
routed as though there was a single sender (Anonymous) being used
everywhere.
"""
__metaclass__ = _SENDER

30
wlauto/external/louie/signal.py vendored Normal file
View File

@@ -0,0 +1,30 @@
"""Signal class.
This class is provided as a way to consistently define and document
signal types. Signal classes also have a useful string
representation.
Louie does not require you to use a subclass of Signal for signals.
"""
class _SIGNAL(type):
"""Base metaclass for signal classes."""
def __str__(cls):
return '<Signal: %s>' % (cls.__name__, )
class Signal(object):
__metaclass__ = _SIGNAL
class All(Signal):
"""Used to represent 'all signals'.
The All class can be used with connect, disconnect, send, or
sendExact to denote that the signal should react to all signals,
not just a particular signal.
"""

View File

View File

@@ -0,0 +1,5 @@
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))

0
wlauto/external/louie/test/fixture.py vendored Normal file
View File

View File

@@ -0,0 +1,154 @@
import unittest
import louie
from louie import dispatcher
def x(a):
return a
class Dummy(object):
pass
class Callable(object):
def __call__(self, a):
return a
def a(self, a):
return a
class TestDispatcher(unittest.TestCase):
def setUp(self):
louie.reset()
def _isclean(self):
"""Assert that everything has been cleaned up automatically"""
assert len(dispatcher.senders_back) == 0, dispatcher.senders_back
assert len(dispatcher.connections) == 0, dispatcher.connections
assert len(dispatcher.senders) == 0, dispatcher.senders
def test_Exact(self):
a = Dummy()
signal = 'this'
louie.connect(x, signal, a)
expected = [(x, a)]
result = louie.send('this', a, a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, signal, a)
assert len(list(louie.get_all_receivers(a, signal))) == 0
self._isclean()
def test_AnonymousSend(self):
a = Dummy()
signal = 'this'
louie.connect(x, signal)
expected = [(x, a)]
result = louie.send(signal, None, a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, signal)
assert len(list(louie.get_all_receivers(None, signal))) == 0
self._isclean()
def test_AnyRegistration(self):
a = Dummy()
signal = 'this'
louie.connect(x, signal, louie.Any)
expected = [(x, a)]
result = louie.send('this', object(), a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, signal, louie.Any)
expected = []
result = louie.send('this', object(), a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
assert len(list(louie.get_all_receivers(louie.Any, signal))) == 0
self._isclean()
def test_AllRegistration(self):
a = Dummy()
signal = 'this'
louie.connect(x, louie.All, a)
expected = [(x, a)]
result = louie.send('this', a, a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, louie.All, a)
assert len(list(louie.get_all_receivers(a, louie.All))) == 0
self._isclean()
def test_GarbageCollected(self):
a = Callable()
b = Dummy()
signal = 'this'
louie.connect(a.a, signal, b)
expected = []
del a
result = louie.send('this', b, a=b)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
assert len(list(louie.get_all_receivers(b, signal))) == 0, (
"Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
self._isclean()
def test_GarbageCollectedObj(self):
class x:
def __call__(self, a):
return a
a = Callable()
b = Dummy()
signal = 'this'
louie.connect(a, signal, b)
expected = []
del a
result = louie.send('this', b, a=b)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
assert len(list(louie.get_all_receivers(b, signal))) == 0, (
"Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
self._isclean()
def test_MultipleRegistration(self):
a = Callable()
b = Dummy()
signal = 'this'
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
result = louie.send('this', b, a=b)
assert len(result) == 1, result
assert len(list(louie.get_all_receivers(b, signal))) == 1, (
"Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
del a
del b
del result
self._isclean()
def test_robust(self):
"""Test the sendRobust function."""
def fails():
raise ValueError('this')
a = object()
signal = 'this'
louie.connect(fails, louie.All, a)
result = louie.send_robust('this', a, a=a)
err = result[0][1]
assert isinstance(err, ValueError)
assert err.args == ('this', )

View File

@@ -0,0 +1,145 @@
"""Louie plugin tests."""
import unittest
import louie
try:
import qt
if not hasattr(qt.qApp, 'for_testing'):
_app = qt.QApplication([])
_app.for_testing = True
qt.qApp = _app
except ImportError:
qt = None
class ReceiverBase(object):
def __init__(self):
self.args = []
self.live = True
def __call__(self, arg):
self.args.append(arg)
class Receiver1(ReceiverBase):
pass
class Receiver2(ReceiverBase):
pass
class Plugin1(louie.Plugin):
def is_live(self, receiver):
"""ReceiverBase instances are only live if their `live`
attribute is True"""
if isinstance(receiver, ReceiverBase):
return receiver.live
return True
class Plugin2(louie.Plugin):
def is_live(self, receiver):
"""Pretend all Receiver2 instances are not live."""
if isinstance(receiver, Receiver2):
return False
return True
def test_only_one_instance():
louie.reset()
plugin1a = Plugin1()
plugin1b = Plugin1()
louie.install_plugin(plugin1a)
# XXX: Move these tests into test cases so we can use unittest's
# 'assertRaises' method.
try:
louie.install_plugin(plugin1b)
except louie.error.PluginTypeError:
pass
else:
raise Exception('PluginTypeError not raised')
def test_is_live():
louie.reset()
# Create receivers.
receiver1a = Receiver1()
receiver1b = Receiver1()
receiver2a = Receiver2()
receiver2b = Receiver2()
# Connect signals.
louie.connect(receiver1a, 'sig')
louie.connect(receiver1b, 'sig')
louie.connect(receiver2a, 'sig')
louie.connect(receiver2b, 'sig')
# Check reception without plugins.
louie.send('sig', arg='foo')
assert receiver1a.args == ['foo']
assert receiver1b.args == ['foo']
assert receiver2a.args == ['foo']
assert receiver2b.args == ['foo']
# Install plugin 1.
plugin1 = Plugin1()
louie.install_plugin(plugin1)
# Make some receivers not live.
receiver1a.live = False
receiver2b.live = False
# Check reception.
louie.send('sig', arg='bar')
assert receiver1a.args == ['foo']
assert receiver1b.args == ['foo', 'bar']
assert receiver2a.args == ['foo', 'bar']
assert receiver2b.args == ['foo']
# Remove plugin 1, install plugin 2.
plugin2 = Plugin2()
louie.remove_plugin(plugin1)
louie.install_plugin(plugin2)
# Check reception.
louie.send('sig', arg='baz')
assert receiver1a.args == ['foo', 'baz']
assert receiver1b.args == ['foo', 'bar', 'baz']
assert receiver2a.args == ['foo', 'bar']
assert receiver2b.args == ['foo']
# Install plugin 1 alongside plugin 2.
louie.install_plugin(plugin1)
# Check reception.
louie.send('sig', arg='fob')
assert receiver1a.args == ['foo', 'baz']
assert receiver1b.args == ['foo', 'bar', 'baz', 'fob']
assert receiver2a.args == ['foo', 'bar']
assert receiver2b.args == ['foo']
if qt is not None:
def test_qt_plugin():
louie.reset()
# Create receivers.
class Receiver(qt.QWidget):
def __init__(self):
qt.QObject.__init__(self)
self.args = []
def receive(self, arg):
self.args.append(arg)
receiver1 = Receiver()
receiver2 = Receiver()
# Connect signals.
louie.connect(receiver1.receive, 'sig')
louie.connect(receiver2.receive, 'sig')
# Destroy receiver2 so only a shell is left.
receiver2.close(True)
# Check reception without plugins.
louie.send('sig', arg='foo')
assert receiver1.args == ['foo']
assert receiver2.args == ['foo']
# Install plugin.
plugin = louie.QtWidgetPlugin()
louie.install_plugin(plugin)
# Check reception with plugins.
louie.send('sig', arg='bar')
assert receiver1.args == ['foo', 'bar']
assert receiver2.args == ['foo']

View File

@@ -0,0 +1,41 @@
import unittest
import louie
from louie import dispatcher
class Callable(object):
def __init__(self, val):
self.val = val
def __call__(self):
return self.val
one = Callable(1)
two = Callable(2)
three = Callable(3)
class TestPriorityDispatcher(unittest.TestCase):
def test_ConnectNotify(self):
louie.connect(
two,
'one',
priority=200
)
louie.connect(
one,
'one',
priority=100
)
louie.connect(
three,
'one',
priority=300
)
result = [ i[1] for i in louie.send('one')]
if not result == [1, 2, 3]:
print result
assert(False)

View File

@@ -0,0 +1,62 @@
import unittest
import louie.prioritylist
from louie.prioritylist import PriorityList
#def populate_list(plist):
class TestPriorityList(unittest.TestCase):
def test_Insert(self):
pl = PriorityList()
elements = {3: "element 3",
2: "element 2",
1: "element 1",
5: "element 5",
4: "element 4"
}
for key in elements:
pl.add(elements[key], priority=key)
match = zip(sorted(elements.values()), pl[:])
for pair in match:
assert(pair[0]==pair[1])
def test_Delete(self):
pl = PriorityList()
elements = {2: "element 3",
1: "element 2",
0: "element 1",
4: "element 5",
3: "element 4"
}
for key in elements:
pl.add(elements[key], priority=key)
del elements[2]
del pl[2]
match = zip(sorted(elements.values()) , pl[:])
for pair in match:
assert(pair[0]==pair[1])
def test_Multiple(self):
pl = PriorityList()
pl.add('1', 1)
pl.add('2.1', 2)
pl.add('3', 3)
pl.add('2.2', 2)
it = iter(pl)
assert(it.next() == '1')
assert(it.next() == '2.1')
assert(it.next() == '2.2')
assert(it.next() == '3')
def test_IteratorBreak(self):
pl = PriorityList()
pl.add('1', 1)
pl.add('2.1', 2)
pl.add('3', 3)
pl.add('2.2', 2)
for i in pl:
if i == '2.1':
break
assert(pl.index('3') == 3)

View File

@@ -0,0 +1,34 @@
import unittest
from louie.robustapply import robust_apply
def no_argument():
pass
def one_argument(blah):
pass
def two_arguments(blah, other):
pass
class TestRobustApply(unittest.TestCase):
def test_01(self):
robust_apply(no_argument, no_argument)
def test_02(self):
self.assertRaises(TypeError, robust_apply, no_argument, no_argument,
'this' )
def test_03(self):
self.assertRaises(TypeError, robust_apply, one_argument, one_argument)
def test_04(self):
"""Raise error on duplication of a particular argument"""
self.assertRaises(TypeError, robust_apply, one_argument, one_argument,
'this', blah='that')

View File

@@ -0,0 +1,83 @@
import unittest
from louie.saferef import safe_ref
class _Sample1(object):
def x(self):
pass
def _sample2(obj):
pass
class _Sample3(object):
def __call__(self, obj):
pass
class TestSaferef(unittest.TestCase):
# XXX: The original tests had a test for closure, and it had an
# off-by-one problem, perhaps due to scope issues. It has been
# removed from this test suite.
def setUp(self):
ts = []
ss = []
for x in xrange(5000):
t = _Sample1()
ts.append(t)
s = safe_ref(t.x, self._closure)
ss.append(s)
ts.append(_sample2)
ss.append(safe_ref(_sample2, self._closure))
for x in xrange(30):
t = _Sample3()
ts.append(t)
s = safe_ref(t, self._closure)
ss.append(s)
self.ts = ts
self.ss = ss
self.closure_count = 0
def tearDown(self):
if hasattr(self, 'ts'):
del self.ts
if hasattr(self, 'ss'):
del self.ss
def test_In(self):
"""Test the `in` operator for safe references (cmp)"""
for t in self.ts[:50]:
assert safe_ref(t.x) in self.ss
def test_Valid(self):
"""Test that the references are valid (return instance methods)"""
for s in self.ss:
assert s()
def test_ShortCircuit(self):
"""Test that creation short-circuits to reuse existing references"""
sd = {}
for s in self.ss:
sd[s] = 1
for t in self.ts:
if hasattr(t, 'x'):
assert sd.has_key(safe_ref(t.x))
else:
assert sd.has_key(safe_ref(t))
def test_Representation(self):
"""Test that the reference object's representation works
XXX Doesn't currently check the results, just that no error
is raised
"""
repr(self.ss[-1])
def _closure(self, ref):
"""Dumb utility mechanism to increment deletion counter"""
self.closure_count += 1

8
wlauto/external/louie/version.py vendored Normal file
View File

@@ -0,0 +1,8 @@
"""Louie version information."""
NAME = 'Louie'
DESCRIPTION = 'Signal dispatching mechanism'
VERSION = '1.1'

Some files were not shown because too many files have changed in this diff Show More