1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2025-05-17 06:45:19 +01:00

Compare commits

..

No commits in common. "master" and "v3.1.0" have entirely different histories.

306 changed files with 1787 additions and 10697 deletions
.github
.readthedocs.yml.travis.ymlMANIFEST.inREADME.rst
dev_scripts
doc
extras
pytest.inirequirements.txtsetup.py
tests
wa

@ -1,16 +0,0 @@
---
name: Bug report
about: Create a report to help resolve an issue.
title: ''
labels: bug
assignees: ''
---
**Describe the issue**
A clear and concise description of what the bug is.
**Run Log**
Please attach your `run.log` detailing the issue.
**Other comments (optional)**

@ -1,17 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is.
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Additional context**
Add any other context about the feature request here.

@ -1,10 +0,0 @@
---
name: 'Question / Support '
about: Ask a question or reqeust support
title: ''
labels: question
assignees: ''
---
**

@ -1,11 +0,0 @@
---
name: Question
about: Ask a question
title: ''
labels: question
assignees: ''
---
**Describe you query**
What would you like to know / what are you trying to achieve?

@ -1,92 +0,0 @@
name: WA Test Suite
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
types: [opened, synchronize, reopened, ready_for_review]
schedule:
- cron: 0 2 * * *
# Allows runing this workflow manually from the Actions tab
workflow_dispatch:
jobs:
Run-Linters-and-Tests:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.8.18
uses: actions/setup-python@v2
with:
python-version: 3.8.18
- name: git-bash
uses: pkg-src/github-action-git-bash@v1.1
- name: Install dependencies
run: |
python -m pip install --upgrade pip
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
cd $GITHUB_WORKSPACE && pip install .[test]
python -m pip install pylint==2.6.2 pep8 flake8 mock nose
- name: Run pylint
run: |
cd $GITHUB_WORKSPACE && ./dev_scripts/pylint wa/
- name: Run PEP8
run: |
cd $GITHUB_WORKSPACE && ./dev_scripts/pep8 wa
- name: Run nose tests
run: |
nosetests
Execute-Test-Workload-and-Process:
runs-on: ubuntu-22.04
strategy:
matrix:
python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: git-bash
uses: pkg-src/github-action-git-bash@v1.1
- name: Install dependencies
run: |
python -m pip install --upgrade pip
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
cd $GITHUB_WORKSPACE && pip install .
- name: Run test workload
run: |
cd /tmp && wa run $GITHUB_WORKSPACE/tests/ci/idle_agenda.yaml -v -d idle_workload
- name: Test Process Command
run: |
cd /tmp && wa process -f -p csv idle_workload
Test-WA-Commands:
runs-on: ubuntu-22.04
strategy:
matrix:
python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: git-bash
uses: pkg-src/github-action-git-bash@v1.1
- name: Install dependencies
run: |
python -m pip install --upgrade pip
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
cd $GITHUB_WORKSPACE && pip install .
- name: Test Show Command
run: |
wa show dhrystone && wa show generic_android && wa show trace-cmd && wa show csv
- name: Test List Command
run: |
wa list all
- name: Test Create Command
run: |
wa create agenda dhrystone generic_android csv trace_cmd && wa create package test && wa create workload test

@ -1,28 +0,0 @@
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Build documentation in the docs/ directory with Sphinx
sphinx:
builder: html
configuration: doc/source/conf.py
# Build the docs in additional formats such as PDF and ePub
formats: all
# Configure the build environment
build:
os: ubuntu-22.04
tools:
python: "3.11"
# Ensure doc dependencies are installed before building
python:
install:
- requirements: doc/requirements.txt
- method: pip
path: .

54
.travis.yml Normal file

@ -0,0 +1,54 @@
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
language: python
python:
- "3.6"
- "2.7"
install:
- pip install nose
- pip install nose2
- pip install flake8
- pip install pylint==1.9.2
- git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && cd /tmp/devlib && python setup.py install
- cd $TRAVIS_BUILD_DIR && python setup.py install
env:
global:
- PYLINT="cd $TRAVIS_BUILD_DIR && ./dev_scripts/pylint wa"
- PEP8="cd $TRAVIS_BUILD_DIR && ./dev_scripts/pep8 wa"
- NOSETESTS="nose2 -s $TRAVIS_BUILD_DIR/tests"
- WORKLOAD="cd /tmp && wa run $TRAVIS_BUILD_DIR/tests/travis/idle_agenda.yaml -v -d idle_workload"
- PROCESS_CMD="$WORKLOAD && wa process -f -p csv idle_workload"
- SHOW_CMD="wa show dhrystone && wa show generic_android && wa show trace-cmd && wa show csv"
- LIST_CMD="wa list all"
- CREATE_CMD="wa create agenda dhrystone generic_android csv trace_cmd && wa create package test && wa create workload test"
matrix:
- TEST=$PYLINT
- TEST=$PEP8
- TEST=$NOSETESTS
- TEST=$WORKLOAD
- TEST="$PROCESS_CMD && $SHOW_CMD && $LIST_CMD && $CREATE_CMD"
script:
- echo $TEST && eval $TEST
matrix:
exclude:
- python: "2.7"
env: TEST=$PYLINT
- python: "2.7"
env: TEST=$PEP8

@ -1,3 +1,2 @@
recursive-include scripts *
recursive-include doc *
recursive-include wa *

@ -18,7 +18,7 @@ workloads, instruments or output processing.
Requirements
============
- Python 3.5+
- Python 2.7 or Python 3
- Linux (should work on other Unixes, but untested)
- Latest Android SDK (ANDROID_HOME must be set) for Android devices, or
- SSH for Linux devices
@ -30,11 +30,7 @@ Installation
To install::
git clone git@github.com:ARM-software/workload-automation.git workload-automation
sudo -H python setup [install|develop]
Note: A `requirements.txt` is included however this is designed to be used as a
reference for known working versions rather than as part of a standard
installation.
sudo -H pip install ./workload-automation
Please refer to the `installation section <http://workload-automation.readthedocs.io/en/latest/user_information.html#install>`_
in the documentation for more details.

@ -6,7 +6,7 @@ DEFAULT_DIRS=(
EXCLUDE=wa/tests,wa/framework/target/descriptor.py
EXCLUDE_COMMA=
IGNORE=E501,E265,E266,W391,E401,E402,E731,W503,W605,F401
IGNORE=E501,E265,E266,W391,E401,E402,E731,W504,W605,F401
if ! hash flake8 2>/dev/null; then
echo "flake8 not found in PATH"

@ -36,9 +36,6 @@ pylint_version=$(python -c 'from pylint.__pkginfo__ import version; print(versio
if [ "x$pylint_version" == "x" ]; then
pylint_version=$(python3 -c 'from pylint.__pkginfo__ import version; print(version)' 2>/dev/null)
fi
if [ "x$pylint_version" == "x" ]; then
pylint_version=$(python3 -c 'from pylint import version; print(version)' 2>/dev/null)
fi
if [ "x$pylint_version" == "x" ]; then
echo "ERROR: no pylint verison found; is it installed?"
exit 1

@ -32,11 +32,17 @@ def transform(mod):
if b'pylint:' in text[0]:
msg = 'pylint directive found on the first line of {}; please move to below copyright header'
raise RuntimeError(msg.format(mod.name))
char = chr(text[0][0])
if sys.version_info[0] == 3:
char = chr(text[0][0])
else:
char = text[0][0]
if text[0].strip() and char != '#':
msg = 'first line of {} is not a comment; is the copyright header missing?'
raise RuntimeError(msg.format(mod.name))
text[0] = '# pylint: disable={}'.format(','.join(errors)).encode('utf-8')
if sys.version_info[0] == 3:
text[0] = '# pylint: disable={}'.format(','.join(errors)).encode('utf-8')
else:
text[0] = '# pylint: disable={}'.format(','.join(errors))
mod.file_bytes = b'\n'.join(text)
# This is what *should* happen, but doesn't work.

@ -1,5 +1,5 @@
#!/usr/bin/env python
# Copyright 2015-2019 ARM Limited
# Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -26,11 +26,10 @@ OUTPUT_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), 'source', 'instr
def generate_instrument_method_map(outfile):
signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.items()],
signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.iteritems()],
headers=['method name', 'signal'], align='<<')
decorator_names = map(lambda x: x.replace('high', 'fast').replace('low', 'slow'), CallbackPriority.names)
priority_table = format_simple_table(zip(decorator_names, CallbackPriority.names, CallbackPriority.values),
headers=['decorator', 'CallbackPriority name', 'CallbackPriority value'], align='<>')
priority_table = format_simple_table(zip(CallbackPriority.names, CallbackPriority.values),
headers=['decorator', 'priority'], align='<>')
with open(OUTPUT_TEMPLATE_FILE) as fh:
template = string.Template(fh.read())
with open(outfile, 'w') as wfh:
@ -38,4 +37,4 @@ def generate_instrument_method_map(outfile):
if __name__ == '__main__':
generate_instrument_method_map(sys.argv[1])
generate_instrumentation_method_map(sys.argv[1])

@ -1,5 +1,5 @@
#!/usr/bin/env python
# Copyright 2014-2019 ARM Limited
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -25,12 +25,7 @@ from wa.utils.doc import (strip_inlined_text, get_rst_from_plugin,
get_params_rst, underline, line_break)
from wa.utils.misc import capitalize
GENERATE_FOR_PACKAGES = [
'wa.workloads',
'wa.instruments',
'wa.output_processors',
]
GENERATE_FOR_PACKAGES = ['wa.workloads', 'wa.instruments', 'wa.output_processors']
def insert_contents_table(title='', depth=1):
"""
@ -46,7 +41,6 @@ def insert_contents_table(title='', depth=1):
def generate_plugin_documentation(source_dir, outdir, ignore_paths):
# pylint: disable=unused-argument
pluginloader.clear()
pluginloader.update(packages=GENERATE_FOR_PACKAGES)
if not os.path.exists(outdir):
@ -63,7 +57,7 @@ def generate_plugin_documentation(source_dir, outdir, ignore_paths):
exts = pluginloader.list_plugins(ext_type)
sorted_exts = iter(sorted(exts, key=lambda x: x.name))
try:
wfh.write(get_rst_from_plugin(next(sorted_exts)))
wfh.write(get_rst_from_plugin(sorted_exts.next()))
except StopIteration:
return
for ext in sorted_exts:
@ -79,11 +73,9 @@ def generate_target_documentation(outdir):
'juno_linux',
'juno_android']
intro = (
'\nThis is a list of commonly used targets and their device '
'parameters, to see a complete for a complete reference please use the'
' WA :ref:`list command <list-command>`.\n\n\n'
)
intro = '\nThis is a list of commonly used targets and their device '\
'parameters, to see a complete for a complete reference please use the '\
'WA :ref:`list command <list-command>`.\n\n\n'
pluginloader.clear()
pluginloader.update(packages=['wa.framework.target.descriptor'])
@ -120,8 +112,7 @@ def generate_config_documentation(config, outdir):
if not os.path.exists(outdir):
os.mkdir(outdir)
config_name = '_'.join(config.name.split())
outfile = os.path.join(outdir, '{}.rst'.format(config_name))
outfile = os.path.join(outdir, '{}.rst'.format('_'.join(config.name.split())))
with open(outfile, 'w') as wfh:
wfh.write(get_params_rst(config.config_points))

@ -1,7 +1,4 @@
nose
numpy
pandas
sphinx_rtd_theme==1.0.0
sphinx==4.2
docutils<0.18
devlib @ git+https://github.com/ARM-software/devlib@master
sphinx_rtd_theme>=0.3.1

@ -284,13 +284,6 @@ methods
:return: A list of `str` labels of workloads that were part of this run.
.. method:: RunOutput.add_classifier(name, value, overwrite=False)
Add a classifier to the run as a whole. If a classifier with the specified
``name`` already exists, a``ValueError`` will be raised, unless
`overwrite=True` is specified.
:class:`RunDatabaseOutput`
---------------------------
@ -322,12 +315,9 @@ methods
.. method:: RunDatabaseOutput.get_artifact_path(name)
If the artifcat is a file this method returns a `StringIO` object containing
the contents of the artifact specified by ``name``. If the aritifcat is a
directory, the method returns a path to a locally extracted version of the
directory which is left to the user to remove after use. This will only look
at the run artifacts; this will not search the artifacts of the individual
jobs.
Returns a `StringIO` object containing the contents of the artifact
specified by ``name``. This will only look at the run artifacts; this will
not search the artifacts of the individual jobs.
:param name: The name of the artifact who's path to retrieve.
:return: A `StringIO` object with the contents of the artifact
@ -409,7 +399,7 @@ artifacts, metadata, and configuration. It has the following attributes:
methods
~~~~~~~
.. method:: JobOutput.get_artifact(name)
.. method:: RunOutput.get_artifact(name)
Return the :class:`Artifact` specified by ``name`` associated with this job.
@ -417,7 +407,7 @@ methods
:return: The :class:`Artifact` with that name
:raises HostError: If the artifact with the specified name does not exist.
.. method:: JobOutput.get_artifact_path(name)
.. method:: RunOutput.get_artifact_path(name)
Return the path to the file backing the artifact specified by ``name``,
associated with this job.
@ -426,20 +416,13 @@ methods
:return: The path to the artifact
:raises HostError: If the artifact with the specified name does not exist.
.. method:: JobOutput.get_metric(name)
.. method:: RunOutput.get_metric(name)
Return the :class:`Metric` associated with this job with the specified
`name`.
:return: The :class:`Metric` object for the metric with the specified name.
.. method:: JobOutput.add_classifier(name, value, overwrite=False)
Add a classifier to the job. The classifier will be propagated to all
existing artifacts and metrics, as well as those added afterwards. If a
classifier with the specified ``name`` already exists, a ``ValueError`` will
be raised, unless `overwrite=True` is specified.
:class:`JobDatabaseOutput`
---------------------------
@ -469,11 +452,8 @@ methods
.. method:: JobDatabaseOutput.get_artifact_path(name)
If the artifcat is a file this method returns a `StringIO` object containing
the contents of the artifact specified by ``name`` associated with this job.
If the aritifcat is a directory, the method returns a path to a locally
extracted version of the directory which is left to the user to remove after
use.
Returns a ``StringIO`` object containing the contents of the artifact
specified by ``name`` associated with this job.
:param name: The name of the artifact who's path to retrieve.
:return: A `StringIO` object with the contents of the artifact
@ -517,11 +497,6 @@ A :class:`Metric` has the following attributes:
or they may have been added by the workload to help distinguish between
otherwise identical metrics.
``label``
This is a string constructed from the name and classifiers, to provide a
more unique identifier, e.g. for grouping values across iterations. The
format is in the form ``name/cassifier1=value1/classifier2=value2/...``.
:class:`Artifact`
-----------------
@ -622,12 +597,6 @@ The available attributes of the class are as follows:
The name of the target class that was uised ot interact with the device
during the run E.g. ``"AndroidTarget"``, ``"LinuxTarget"`` etc.
``modules``
A list of names of modules that have been loaded by the target. Modules
provide additional functionality, such as access to ``cpufreq`` and which
modules are installed may impact how much of the ``TargetInfo`` has been
populated.
``cpus``
A list of :class:`CpuInfo` objects describing the capabilities of each CPU.

@ -178,16 +178,6 @@ methods.
locations) and device will be searched for an application with a matching
package name.
``supported_versions``
This attribute should be a list of apk versions that are suitable for this
workload, if a specific apk version is not specified then any available
supported version may be chosen.
``activity``
This attribute can be optionally set to override the default activity that
will be extracted from the selected APK file which will be used when
launching the APK.
``view``
This is the "view" associated with the application. This is used by
instruments like ``fps`` to monitor the current framerate being generated by

@ -2,338 +2,6 @@
What's New in Workload Automation
=================================
***********
Version 3.3.1
***********
.. warning:: This is the last release supporting Python 3.5 and Python 3.6.
Subsequent releases will support Python 3.7+.
New Features:
==============
Commands:
---------
Instruments:
------------
- ``perf``: Add support for ``report-sample``.
Workloads:
----------------
- ``PCMark``: Add support for PCMark 3.0.
- ``Antutu``: Add support for 9.1.6.
- ``Geekbench``: Add support for Geekbench5.
- ``gfxbench``: Support the non corporate version.
Fixes/Improvements
==================
Framework:
----------
- Fix installation on systems without git installed.
- Avoid querying online cpus if hotplug is disabled.
Dockerfile:
-----------
- Update base image to Ubuntu 20.04.
Instruments:
------------
- ``perf``: Fix parsing csv with using interval-only-values.
- ``perf``: Improve error reporting of an invalid agenda.
Output Processors:
------------------
- ``postgres``: Fixed SQL command when creating a new event.
Workloads:
----------
- ``speedometer``: Fix adb reverse when rebooting a device.
- ``googleplaybook``: Support newer apk version.
- ``googlephotos``: Support newer apk version.
- ``gmail``: Support newer apk version.
Other:
------
- Upgrade Android Gradle to 7.2 and Gradle plugin to 4.2.
***********
Version 3.3
***********
New Features:
==============
Commands:
---------
- Add ``report`` command to provide a summary of a run.
Instruments:
------------
- Add ``proc_stat`` instrument to monitor CPU load using data from ``/proc/stat``.
Framework:
----------
- Add support for simulating atomic writes to prevent race conditions when running current instances of WA.
- Add support file transfer for SSH connections via SFTP and falling back to using SCP implementation.
- Support detection of logcat buffer overflow and present a warning if this occurs.
- Allow skipping all remaining jobs if a job had exhausted all of its retires.
- Add polling mechanism for file transfers rather than relying on timeouts.
- Add `run_completed` reboot policy to enable rebooting a target after a run has been completed.
Android Devices:
----------------
- Enable configuration of whether to keep the screen on while the device is plugged in.
Output Processors:
------------------
- Enable the use of cascading deletion in Postgres databases to clean up after deletion of a run entry.
Fixes/Improvements
==================
Framework:
----------
- Improvements to the ``process`` command to correctly handle skipped and in process jobs.
- Add support for deprecated parameters allowing for a warning to be raised when providing
a parameter that will no longer have an effect.
- Switch implementation of SSH connections to use Paramiko for greater stability.
- By default use sftp for file transfers with SSH connections, allow falling back to scp
by setting ``use_scp``.
- Fix callbacks not being disconnected correctly when requested.
- ``ApkInfo`` objects are now cached to reduce re-parsing of APK files.
- Speed up discovery of wa output directories.
- Fix merge handling of parameters from multiple files.
Dockerfile:
-----------
- Install additional instruments for use in the docker environment.
- Fix environment variables not being defined in non interactive environments.
Instruments:
------------
- ``trace_cmd`` additional fixes for python 3 support.
Output Processors:
------------------
- ``postgres``: Fixed SQL command when creating a new event.
Workloads:
----------
- ``aitutu``: Improve reliability of results extraction.
- ``androbench``: Enabling dismissing of additional popups on some devices.
- ``antutu``: Now supports major version 8 in additional to version 7.X.
- ``exoplayer``: Add support for Android 10.
- ``googlephotos``: Support newer apk version.
- ``gfxbench``: Allow user configuration for which tests should be ran.
- ``gfxbench``: Improved score detection for a wider range of devices.
- ``gfxbench``: Moved results extraction out of run stage.
- ``jankbench``: Support newer versions of Pandas for processing.
- ``pcmark``: Add support for handling additional popups and installation flows.
- ``pcmark``: No longer clear and re-download test data before each execution.
- ``speedometer``: Enable the workload to run offline and drops requirement for
UiAutomator. To support this root access is now required to run the workload.
- ``youtube``: Update to support later versions of the apk.
Other:
------
- ``cpustates``: Improved name handling for unknown idle states.
***********
Version 3.2
***********
.. warning:: This release only supports Python 3.5+. Python 2 support has now
been dropped.
Fixes/Improvements
==================
Framework:
----------
- ``TargetInfo`` now tracks installed modules and will ensure the cache is
also updated on module change.
- Migrated the build scripts for uiauto based workloads to Python 3.
- Uiauto applications now target SDK version 28 to prevent PlayProtect
blocking the installation of the automation apks on some devices.
- The workload metadata now includes the apk package name if applicable.
Instruments:
------------
- ``energy_instruments`` will now have their ``teardown`` method called
correctly.
- ``energy_instruments``: Added a ``keep_raw`` parameter to control whether
raw files generated during execution should be deleted upon teardown.
- Update relevant instruments to make use of the new devlib collector
interface, for more information please see the
`devlib documentation <https://devlib.readthedocs.io/en/latest/collectors.html>`_.
Output Processors:
------------------
- ``postgres``: If initialisation fails then the output processor will no
longer attempt to reconnect at a later point during the run.
- ``postgres``: Will now ensure that the connection to the database is
re-established if it is dropped e.g. due to a long expecting workload.
- ``postgres``: Change the type of the ``hostid`` field to ``Bigint`` to
allow a larger range of ids.
- ``postgres``: Bump schema version to 1.5.
- ``perf``: Added support for the ``simpleperf`` profiling tool for android
devices.
- ``perf``: Added support for the perf ``record`` command.
- ``cpustates``: Improve handling of situations where cpufreq and/or cpuinfo
data is unavailable.
Workloads:
----------
- ``adodereader``: Now support apk version 19.7.1.10709.
- ``antutu``: Supports dismissing of popup asking to create a shortcut on
the homescreen.
- ``gmail``: Now supports apk version 2019.05.26.252424914.
- ``googlemaps``: Now supports apk version 10.19.1.
- ``googlephotos``: Now supports apk version 4.28.0.
- ``geekbench``: Added support for versions 4.3.4, 4.4.0 and 4.4.2.
- ``geekbench-corporate``: Added support for versions 5.0.1 and 5.0.3.
- ``pcmark``: Now locks device orientation to portrait to increase
compatibility.
- ``pcmark``: Supports dismissing new Android 10 permission warnings.
Other:
------
- Improve documentation to help debugging module installation errors.
*************
Version 3.1.4
*************
.. warning:: This is the last release that supports Python 2. Subsequent versions
will be support Python 3.5+ only.
New Features:
==============
Framework:
----------
- ``ApkWorkload``: Allow specifying A maximum and minimum version of an APK
instead of requiring a specific version.
- ``TestPackageHandler``: Added to support running android applications that
are invoked via ``am instrument``.
- Directories can now be added as ``Artifacts``.
Workloads:
----------
- ``aitutu``: Executes the Aitutu Image Speed/Accuracy and Object
Speed/Accuracy tests.
- ``uibench``: Run a configurable activity of the UIBench workload suite.
- ``uibenchjanktests``: Run an automated and instrument version of the
UIBench JankTests.
- ``motionmark``: Run a browser graphical benchmark.
Other:
------
- Added ``requirements.txt`` as a reference for known working package versions.
Fixes/Improvements
==================
Framework:
----------
- ``JobOuput``: Added an ``augmentation`` attribute to allow listing of
enabled augmentations for individual jobs.
- Better error handling for misconfiguration job selection.
- All ``Workload`` classes now have an ``uninstall`` parameter to control whether
any binaries installed to the target should be uninstalled again once the
run has completed.
- The ``cleanup_assets`` parameter is now more consistently utilized across
workloads.
- ``ApkWorkload``: Added an ``activity`` attribute to allow for overriding the
automatically detected version from the APK.
- ``ApkWorkload`` Added support for providing an implicit activity path.
- Fixed retrieving job level artifacts from a database backend.
Output Processors:
------------------
- ``SysfsExtractor``: Ensure that the extracted directories are added as
``Artifacts``.
- ``InterruptStatsInstrument``: Ensure that the output files are added as
``Artifacts``.
- ``Postgres``: Fix missing ``system_id`` field from ``TargetInfo``.
- ``Postgres``: Support uploading directory ``Artifacts``.
- ``Postgres``: Bump the schema version to v1.3.
Workloads:
----------
- ``geekbench``: Improved apk version handling.
- ``geekbench``: Now supports apk version 4.3.2.
Other:
------
- ``Dockerfile``: Now installs all optional extras for use with WA.
- Fixed support for YAML anchors.
- Fixed building of documentation with Python 3.
- Changed shorthand of installing all of WA extras to `all` as per
the documentation.
- Upgraded the Dockerfile to use Ubuntu 18.10 and Python 3.
- Restricted maximum versions of ``numpy`` and ``pandas`` for Python 2.7.
*************
Version 3.1.3
*************
Fixes/Improvements
==================
Other:
------
- Security update for PyYAML to attempt prevention of arbitrary code execution
during parsing.
*************
Version 3.1.2
*************
Fixes/Improvements
==================
Framework:
----------
- Implement an explicit check for Devlib versions to ensure that versions
are kept in sync with each other.
- Added a ``View`` parameter to ApkWorkloads for use with certain instruments
for example ``fps``.
- Added ``"supported_versions"`` attribute to workloads to allow specifying a
list of supported version for a particular workload.
- Change default behaviour to run any available version of a workload if a
specific version is not specified.
Output Processors:
------------------
- ``Postgres``: Fix handling of ``screen_resoultion`` during processing.
Other
-----
- Added additional information to documentation
- Added fix for Devlib's ``KernelConfig`` refactor
- Added a ``"label"`` property to ``Metrics``
*************
Version 3.1.1
*************
Fixes/Improvements
==================
Other
-----
- Improve formatting when displaying metrics
- Update revent binaries to include latest fixes
- Update DockerImage to use new released version of WA and Devlib
- Fix broken package on PyPi
*************
Version 3.1.0
*************

@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2023 ARM Limited
# Copyright 2018 ARM Limited
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@ -68,7 +68,7 @@ master_doc = 'index'
# General information about the project.
project = u'wa'
copyright = u'2023, ARM Limited'
copyright = u'2018, ARM Limited'
author = u'ARM Limited'
# The version info for the project you're documenting, acts as replacement for

File diff suppressed because one or more lines are too long

Before

(image error) Size: 74 KiB

After

(image error) Size: 63 KiB

@ -47,10 +47,6 @@ submitting a pull request:
- If significant additions have been made to the framework, unit
tests should be added to cover the new functionality.
- If modifications have been made to the UI Automation source of a workload, the
corresponding APK should be rebuilt and submitted as part of the same pull
request. This can be done via the ``build.sh`` script in the relevant
``uiauto`` subdirectory.
- If modifications have been made to documentation (this includes description
attributes for Parameters and Extensions), documentation should be built to
make sure no errors or warning during build process, and a visual inspection

@ -37,8 +37,8 @@ This section contains reference information common to plugins of all types.
The Context
~~~~~~~~~~~
.. note:: For clarification on the meaning of "workload specification" "spec", "job"
and "workload" and the distinction between them, please see the :ref:`glossary <glossary>`.
.. note:: For clarification on the meaning of "workload specification" ("spec"), "job"
and "workload" and the distiction between them, please see the :ref:`glossary <glossary>`.
The majority of methods in plugins accept a context argument. This is an
instance of :class:`wa.framework.execution.ExecutionContext`. It contains
@ -119,7 +119,7 @@ context.output_directory
This is the output directory for the current iteration. This will an
iteration-specific subdirectory under the main results location. If
there is no current iteration (e.g. when processing overall run results)
this will point to the same location as ``run_output_directory``.
this will point to the same location as ``root_output_directory``.
Additionally, the global ``wa.settings`` object exposes on other location:
@ -158,7 +158,7 @@ irrespective of the host's path notation. For example:
.. note:: Output processors, unlike workloads and instruments, do not have their
own target attribute as they are designed to be able to be run offline.
.. _plugin-parameters:
.. _plugin-parmeters:
Parameters
~~~~~~~~~~~

@ -5,12 +5,10 @@ Convention for Naming revent Files for Revent Workloads
-------------------------------------------------------------------------------
There is a convention for naming revent files which you should follow if you
want to record your own revent files. Each revent file must be called (case sensitive)
``<device name>.<stage>.revent``,
where ``<device name>`` is the name of your device (as defined by the model
name of your device which can be retrieved with
``adb shell getprop ro.product.model`` or by the ``name`` attribute of your
customized device class), and ``<stage>`` is one of the following currently
want to record your own revent files. Each revent file must start with the
device name(case sensitive) then followed by a dot '.' then the stage name
then '.revent'. All your custom revent files should reside at
``'~/.workload_automation/dependencies/WORKLOAD NAME/'``. These are the current
supported stages:
:setup: This stage is where the application is loaded (if present). It is
@ -28,12 +26,10 @@ Only the run stage is mandatory, the remaining stages will be replayed if a
recording is present otherwise no actions will be performed for that particular
stage.
All your custom revent files should reside at
``'$WA_USER_DIRECTORY/dependencies/WORKLOAD NAME/'``. So
typically to add a custom revent files for a device named "mydevice" and a
workload name "myworkload", you would need to add the revent files to the
directory ``~/.workload_automation/dependencies/myworkload/revent_files``
creating the directory structure if necessary. ::
For instance, to add a custom revent files for a device named "mydevice" and
a workload name "myworkload", you need to add the revent files to the directory
``/home/$WA_USER_HOME/dependencies/myworkload/revent_files`` creating it if
necessary. ::
mydevice.setup.revent
mydevice.run.revent
@ -336,6 +332,6 @@ recordings in scripts. Here is an example:
from wa.utils.revent import ReventRecording
with ReventRecording('/path/to/recording.revent') as recording:
print("Recording: {}".format(recording.filepath))
print("There are {} input events".format(recording.num_events))
print("Over a total of {} seconds".format(recording.duration))
print "Recording: {}".format(recording.filepath)
print "There are {} input events".format(recording.num_events)
print "Over a total of {} seconds".format(recording.duration)

@ -58,28 +58,22 @@ will automatically generate a workload in the your ``WA_CONFIG_DIR/plugins``. If
you wish to specify a custom location this can be provided with ``-p
<path>``
A typical invocation of the :ref:`create <create-command>` command would be in
the form::
wa create workload -k <workload_kind> <workload_name>
.. _adding-a-basic-workload-example:
Adding a Basic Workload
-----------------------
To add a ``basic`` workload template for our example workload we can simply use the
command::
To add a basic workload you can simply use the command::
wa create workload -k basic ziptest
wa create workload basic
This will generate a very basic workload with dummy methods for the each method in
the workload interface and it is left to the developer to add any required functionality.
This will generate a very basic workload with dummy methods for the workload
interface and it is left to the developer to add any required functionality to
the workload.
Not all the methods from the interface are required to be implemented, this
example shows how a subset might be used to implement a simple workload that
times how long it takes to compress a file of a particular size on the device.
Not all the methods are required to be implemented, this example shows how a
subset might be used to implement a simple workload that times how long it takes
to compress a file of a particular size on the device.
.. note:: This is intended as an example of how to implement the Workload
@ -93,15 +87,14 @@ in this example we are implementing a very simple workload and do not
require any additional feature so shall inherit directly from the the base
:class:`Workload` class. We then need to provide a ``name`` for our workload
which is what will be used to identify your workload for example in an
agenda or via the show command, if you used the `create` command this will
already be populated for you.
agenda or via the show command.
.. code-block:: python
import os
from wa import Workload, Parameter
class ZipTest(Workload):
class ZipTestWorkload(Workload):
name = 'ziptest'
@ -120,7 +113,7 @@ separated by a new line.
'''
In order to allow for additional configuration of the workload from a user a
list of :ref:`parameters <plugin-parameters>` can be supplied. These can be
list of :ref:`parameters <plugin-parmeters>` can be supplied. These can be
configured in a variety of different ways. For example here we are ensuring that
the value of the parameter is an integer and larger than 0 using the ``kind``
and ``constraint`` options, also if no value is provided we are providing a
@ -183,7 +176,7 @@ allow it to decide whether to keep the file or not.
# Pull the results file to the host
self.host_outfile = os.path.join(context.output_directory, 'timing_results')
self.target.pull(self.target_outfile, self.host_outfile)
context.add_artifact('ziptest-results', self.host_outfile, kind='raw')
context.add_artifact('ziptest-results', host_output_file, kind='raw')
The ``update_output`` method we can do any generation of metrics that we wish to
for our workload. In this case we are going to simply convert the times reported
@ -259,7 +252,7 @@ The full implementation of this workload would look something like:
# Pull the results file to the host
self.host_outfile = os.path.join(context.output_directory, 'timing_results')
self.target.pull(self.target_outfile, self.host_outfile)
context.add_artifact('ziptest-results', self.host_outfile, kind='raw')
context.add_artifact('ziptest-results', host_output_file, kind='raw')
def update_output(self, context):
super(ZipTestWorkload, self).update_output(context)
@ -492,10 +485,9 @@ Adding an Instrument
====================
This is an example of how we would create a instrument which will trace device
errors using a custom "trace" binary file. For more detailed information please see the
:ref:`Instrument Reference <instrument-reference>`. The first thing to do is to create
a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass
:class:`Instrument`. Make sure to overwrite the variable name with what we want our instrument
to be called and then locate our binary for the instrument.
:ref:`Instrument Reference <instrument-reference>`. The first thing to do is to subclass
:class:`Instrument`, overwrite the variable name with what we want our instrument
to be called and locate our binary for our instrument.
::
@ -503,8 +495,8 @@ to be called and then locate our binary for the instrument.
name = 'trace-errors'
def __init__(self, target, **kwargs):
super(TraceErrorsInstrument, self).__init__(target, **kwargs)
def __init__(self, target):
super(TraceErrorsInstrument, self).__init__(target)
self.binary_name = 'trace'
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
self.trace_on_target = None
@ -541,20 +533,21 @@ again decorated the method. ::
Once we have generated our result data we need to retrieve it from the device
for further processing or adding directly to WA's output for that job. For
example for trace data we will want to pull it to the device and add it as a
:ref:`artifact <artifact>` to WA's :ref:`context <context>`. Once we have
retrieved the data, we can now do any further processing and add any relevant
:ref:`Metrics <metrics>` to the :ref:`context <context>`. For this we will use
the the ``add_metric`` method to add the results to the final output for that
workload. The method can be passed 4 params, which are the metric `key`,
`value`, `unit` and `lower_is_better`. ::
:ref:`artifact <artifact>` to WA's :ref:`context <context>` as shown below::
def update_output(self, context):
def extract_results(self, context):
# pull the trace file from the target
self.result = os.path.join(self.target.working_directory, 'trace.txt')
self.outfile = os.path.join(context.output_directory, 'trace.txt')
self.target.pull(self.result, self.outfile)
context.add_artifact('error_trace', self.outfile, kind='export')
self.target.pull(self.result, context.working_directory)
context.add_artifact('error_trace', self.result, kind='export')
Once we have retrieved the data we can now do any further processing and add any
relevant :ref:`Metrics <metrics>` to the :ref:`context <context>`. For this we
will use the the ``add_metric`` method to add the results to the final output
for that workload. The method can be passed 4 params, which are the metric
`key`, `value`, `unit` and `lower_is_better`. ::
def update_output(self, context):
# parse the file if needs to be parsed, or add result directly to
# context.
@ -574,14 +567,12 @@ At the very end of the run we would want to uninstall the binary we deployed ear
So the full example would look something like::
from wa import Instrument
class TraceErrorsInstrument(Instrument):
name = 'trace-errors'
def __init__(self, target, **kwargs):
super(TraceErrorsInstrument, self).__init__(target, **kwargs)
def __init__(self, target):
super(TraceErrorsInstrument, self).__init__(target)
self.binary_name = 'trace'
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
self.trace_on_target = None
@ -597,12 +588,12 @@ So the full example would look something like::
def stop(self, context):
self.target.execute('{} stop'.format(self.trace_on_target))
def update_output(self, context):
def extract_results(self, context):
self.result = os.path.join(self.target.working_directory, 'trace.txt')
self.outfile = os.path.join(context.output_directory, 'trace.txt')
self.target.pull(self.result, self.outfile)
context.add_artifact('error_trace', self.outfile, kind='export')
self.target.pull(self.result, context.working_directory)
context.add_artifact('error_trace', self.result, kind='export')
def update_output(self, context):
metric = # ..
context.add_metric('number_of_errors', metric, lower_is_better=True
@ -618,9 +609,8 @@ Adding an Output Processor
==========================
This is an example of how we would create an output processor which will format
the run metrics as a column-aligned table. The first thing to do is to create
a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass
:class:`OutputProcessor`. Make sure to overwrite the variable name with what we want our
the run metrics as a column-aligned table. The first thing to do is to subclass
:class:`OutputProcessor` and overwrite the variable name with what we want our
processor to be called and provide a short description.
Next we need to implement any relevant methods, (please see

@ -69,72 +69,7 @@ WA3 config file.
**Q:** My Juno board keeps resetting upon starting WA even if it hasn't crashed.
--------------------------------------------------------------------------------
**A** Please ensure that you do not have any other terminals (e.g. ``screen``
Please ensure that you do not have any other terminals (e.g. ``screen``
sessions) connected to the board's UART. When WA attempts to open the connection
for its own use this can cause the board to reset if a connection is already
present.
**Q:** I'm using the FPS instrument but I do not get any/correct results for my workload
-----------------------------------------------------------------------------------------
**A:** If your device is running with Android 6.0 + then the default utility for
collecting fps metrics will be ``gfxinfo`` however this does not seem to be able
to extract any meaningful information for some workloads. In this case please
try setting the ``force_surfaceflinger`` parameter for the ``fps`` augmentation
to ``True``. This will attempt to guess the "View" for the workload
automatically however this is device specific and therefore may need
customizing. If this is required please open the application and execute
``dumpsys SurfaceFlinger --list`` on the device via adb. This will provide a
list of all views available for measuring.
As an example, when trying to find the view for the AngryBirds Rio workload you
may get something like:
.. code-block:: none
...
AppWindowToken{41dfe54 token=Token{77819a7 ActivityRecord{a151266 u0 com.rovio.angrybirdsrio/com.rovio.fusion.App t506}}}#0
a3d001c com.rovio.angrybirdsrio/com.rovio.fusion.App#0
Background for -SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0
SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0
com.rovio.angrybirdsrio/com.rovio.fusion.App#0
boostedAnimationLayer#0
mAboveAppWindowsContainers#0
...
From these ``"SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0"`` is
the mostly likely the View that needs to be set as the ``view`` workload
parameter and will be picked up be the ``fps`` augmentation.
**Q:** I am getting an error which looks similar to ``'CONFIG_SND_BT87X is not exposed in kernel config'...``
-------------------------------------------------------------------------------------------------------------
**A:** If you are receiving this under normal operation this can be caused by a
mismatch of your WA and devlib versions. Please update both to their latest
versions and delete your ``$USER_HOME/.workload_automation/cache/targets.json``
(or equivalent) file.
**Q:** I get an error which looks similar to ``UnicodeDecodeError('ascii' codec can't decode byte...``
------------------------------------------------------------------------------------------------------
**A:** If you receive this error or a similar warning about your environment,
please ensure that you configure your environment to use a locale which supports
UTF-8. Otherwise this can cause issues when attempting to parse files containing
none ascii characters.
**Q:** I get the error ``Module "X" failed to install on target``
------------------------------------------------------------------------------------------------------
**A:** By default a set of devlib modules will be automatically loaded onto the
target designed to add additional functionality. If the functionality provided
by the module is not required then the module can be safely disabled by setting
``load_default_modules`` to ``False`` in the ``device_config`` entry of the
:ref:`agenda <config-agenda-entry>` and then re-enabling any specific modules
that are still required. An example agenda snippet is shown below:
.. code-block:: none
config:
device: generic_android
device_config:
load_default_modules: False
modules: ['list', 'of', 'modules', 'to', 'enable']

@ -13,11 +13,10 @@ these signals are dispatched during execution please see the
$signal_names
The methods above may be decorated with on the listed decorators to set the
priority (a value in the ``wa.framework.signal.CallbackPriority`` enum) of the
Instrument method relative to other callbacks registered for the signal (within
the same priority level, callbacks are invoked in the order they were
registered). The table below shows the mapping of the decorator to the
corresponding priority name and level:
priority of the Instrument method relative to other callbacks registered for the
signal (within the same priority level, callbacks are invoked in the order they
were registered). The table below shows the mapping of the decorator to the
corresponding priority:
$priority_prefixes

@ -16,7 +16,7 @@ Configuration
Default configuration file change
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Instead of the standard ``config.py`` file located at
``$WA_USER_DIRECTORY/config.py`` WA now uses a ``confg.yaml`` file (at the same
``$WA_USER_HOME/config.py`` WA now uses a ``confg.yaml`` file (at the same
location) which is written in the YAML format instead of python. Additionally
upon first invocation WA3 will automatically try and detect whether a WA2 config
file is present and convert it to use the new WA3 format. During this process

@ -690,7 +690,7 @@ Workload-specific augmentation
It is possible to enable or disable (but not configure) augmentations at
workload or section level, as well as in the global config, in which case, the
augmentations would only be enabled/disabled for that workload/section. If the
same augmentation is enabled at one level and disabled at another, as with all
same augmentation is enabled at one level and disabled at another, as will all
WA configuration, the more specific settings will take precedence over the less
specific ones (i.e. workloads override sections that, in turn, override global
config).

@ -17,8 +17,6 @@ further configuration will be required.
Android
-------
.. _android-general-device-setup:
General Device Setup
^^^^^^^^^^^^^^^^^^^^
@ -46,15 +44,12 @@ common parameters you might want to change are outlined below.
Android builds. If this is not the case for your device, you will need to
specify an alternative working directory (e.g. under ``/data/local``).
:load_default_modules: A number of "default" modules (e.g. for cpufreq
subsystem) are loaded automatically, unless explicitly disabled. If you
encounter an issue with one of the modules then this setting can be set to
``False`` and any specific modules that you require can be request via the
``modules`` entry.
:modules: A list of additional modules to be installed for the target. Devlib
implements functionality for particular subsystems as modules. If additional
modules need to be loaded, they may be specified using this parameter.
implements functionality for particular subsystems as modules. A number of
"default" modules (e.g. for cpufreq subsystem) are loaded automatically,
unless explicitly disabled. If additional modules need to be loaded, they
may be specified using this parameter.
Please see the `devlib documentation <http://devlib.readthedocs.io/en/latest/modules.html>`_
for information on the available modules.
@ -88,7 +83,6 @@ or a more specific config could be:
device_config:
device: 0123456789ABCDEF
working_direcory: '/sdcard/wa-working'
load_default_modules: True
modules: ['hotplug', 'cpufreq']
core_names : ['a7', 'a7', 'a7', 'a15', 'a15']
# ...

@ -14,9 +14,9 @@ Using revent with workloads
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Some workloads (pretty much all games) rely on recorded revents for their
execution. ReventWorkloads require between 1 and 4 revent files to be ran.
There is one mandatory recording, ``run``, for performing the actual execution of
the workload and the remaining stages are optional. ``setup`` can be used to perform
execution. ReventWorkloads will require between 1 and 4 revent files be be ran.
There is one mandatory recording ``run`` for performing the actual execution of
the workload and the remaining are optional. ``setup`` can be used to perform
the initial setup (navigating menus, selecting game modes, etc).
``extract_results`` can be used to perform any actions after the main stage of
the workload for example to navigate a results or summary screen of the app. And
@ -26,21 +26,17 @@ exiting the app.
Because revents are very device-specific\ [*]_, these files would need to
be recorded for each device.
The files must be called ``<device name>.(setup|run|extract_results|teardown).revent``,
where ``<device name>`` is the name of your device (as defined by the model
name of your device which can be retrieved with
``adb shell getprop ro.product.model`` or by the ``name`` attribute of your
customized device class).
WA will look for these files in two places:
``<installdir>/wa/workloads/<workload name>/revent_files`` and
``$WA_USER_DIRECTORY/dependencies/<workload name>``. The
first location is primarily intended for revent files that come with WA (and if
The files must be called ``<device name>.(setup|run|extract_results|teardown).revent``
, where ``<device name>`` is the name of your device (as defined by the ``name``
attribute of your device's class). WA will look for these files in two
places: ``<install dir>/wa/workloads/<workload name>/revent_files``
and ``~/.workload_automation/dependencies/<workload name>``. The first
location is primarily intended for revent files that come with WA (and if
you did a system-wide install, you'll need sudo to add files there), so it's
probably easier to use the second location for the files you record. Also, if
revent files for a workload exist in both locations, the files under
``$WA_USER_DIRECTORY/dependencies`` will be used in favour
of those installed with WA.
probably easier to use the second location for the files you record. Also,
if revent files for a workload exist in both locations, the files under
``~/.workload_automation/dependencies`` will be used in favour of those
installed with WA.
.. [*] It's not just about screen resolution -- the event codes may be different
even if devices use the same screen.

@ -12,9 +12,8 @@ Installation
.. module:: wa
This page describes the 3 methods of installing Workload Automation 3. The first
option is to use :ref:`pip` which will install the latest release of WA, the
latest development version from :ref:`github <github>` or via a
:ref:`dockerfile`.
option is to use :ref:`pip` which
will install the latest release of WA, the latest development version from :ref:`github <github>` or via a :ref:`dockerfile`.
Prerequisites
@ -23,11 +22,11 @@ Prerequisites
Operating System
----------------
WA runs on a native Linux install. It has been tested on recent Ubuntu releases,
but other recent Linux distributions should work as well. It should run on
either 32-bit or 64-bit OS, provided the correct version of dependencies (see
below) are installed. Officially, **other environments are not supported**.
WA has been known to run on Linux Virtual machines and in Cygwin environments,
WA runs on a native Linux install. It was tested with Ubuntu 14.04,
but any recent Linux distribution should work. It should run on either
32-bit or 64-bit OS, provided the correct version of Android (see below)
was installed. Officially, **other environments are not supported**. WA
has been known to run on Linux Virtual machines and in Cygwin environments,
though additional configuration may be required in both cases (known issues
include makings sure USB/serial connections are passed to the VM, and wrong
python/pip binaries being picked up in Cygwin). WA *should* work on other
@ -46,8 +45,7 @@ possible to get limited functionality with minimal porting effort).
Android SDK
-----------
To interact with Android devices you will need to have the Android SDK
with at least one platform installed.
You need to have the Android SDK with at least one platform installed.
To install it, download the ADT Bundle from here_. Extract it
and add ``<path_to_android_sdk>/sdk/platform-tools`` and ``<path_to_android_sdk>/sdk/tools``
to your ``PATH``. To test that you've installed it properly, run ``adb
@ -74,11 +72,7 @@ the install location of the SDK (i.e. ``<path_to_android_sdk>/sdk``).
Python
------
Workload Automation 3 currently supports Python 3.5+
.. note:: If your system's default python version is still Python 2, please
replace the commands listed here with their Python3 equivalent
(e.g. python3, pip3 etc.)
Workload Automation 3 currently supports both Python 2.7 and Python 3.
.. _pip:
@ -100,11 +94,11 @@ similar distributions, this may be done with APT::
sudo -H pip install --upgrade pip
sudo -H pip install --upgrade setuptools
If you do run into this issue after already installing some packages,
If you do run into this issue after already installing some packages,
you can resolve it by running ::
sudo chmod -R a+r /usr/local/lib/python3.X/dist-packages
sudo find /usr/local/lib/python3.X/dist-packages -type d -exec chmod a+x {} \;
sudo chmod -R a+r /usr/local/lib/python2.7/dist-packagessudo
find /usr/local/lib/python2.7/dist-packages -type d -exec chmod a+x {} \;
(The paths above will work for Ubuntu; they may need to be adjusted
for other distros).
@ -177,11 +171,9 @@ install them upfront (e.g. if you're planning to use WA to an environment that
may not always have Internet access).
* nose
* mock
* daqpower
* sphinx
* sphinx_rtd_theme
* psycopg2-binary
* PyDAQmx
* pymongo
* jinja2
@ -192,12 +184,12 @@ Installing
Installing the latest released version from PyPI (Python Package Index)::
sudo -H pip install wlauto
sudo -H pip install wa
This will install WA along with its mandatory dependencies. If you would like to
install all optional dependencies at the same time, do the following instead::
sudo -H pip install wlauto[all]
sudo -H pip install wa[all]
Alternatively, you can also install the latest development version from GitHub
@ -207,18 +199,6 @@ Alternatively, you can also install the latest development version from GitHub
cd workload-automation
sudo -H python setup.py install
.. note:: Please note that if using pip to install from github this will most
likely result in an older and incompatible version of devlib being
installed alongside WA. If you wish to use pip please also manually
install the latest version of
`devlib <https://github.com/ARM-software/devlib>`_.
.. note:: Please note that while a `requirements.txt` is included, this is
designed to be a reference of known working packages rather to than to
be used as part of a standard installation. The version restrictions
in place as part of `setup.py` should automatically ensure the correct
packages are install however if encountering issues please try
updating/downgrading to the package versions list within.
If the above succeeds, try ::
@ -242,7 +222,7 @@ image in a container.
The Dockerfile can be found in the "extras" directory or online at
`<https://github.com/ARM-software /workload- automation/blob/next/extras/Dockerfile>`_
which contains additional information about how to build and to use the file.
which contains addional information about how to build and to use the file.
(Optional) Post Installation

@ -20,7 +20,7 @@ Install
.. note:: This is a quick summary. For more detailed instructions, please see
the :ref:`installation` section.
Make sure you have Python 3.5+ and a recent Android SDK with API
Make sure you have Python 2.7 or Python 3 and a recent Android SDK with API
level 18 or above installed on your system. A complete install of the Android
SDK is required, as WA uses a number of its utilities, not just adb. For the
SDK, make sure that either ``ANDROID_HOME`` environment variable is set, or that
@ -125,7 +125,7 @@ There are multiple options for configuring your device depending on your
particular use case.
You can either add your configuration to the default configuration file
``config.yaml``, under the ``$WA_USER_DIRECTORY/`` directory or you can specify it in
``config.yaml``, under the ``$WA_USER_HOME/`` directory or you can specify it in
the ``config`` section of your agenda directly.
Alternatively if you are using multiple devices, you may want to create separate
@ -318,7 +318,7 @@ like this:
config:
augmentations:
- ~execution_time
- targz
- json
iterations: 2
workloads:
- memcpy
@ -332,7 +332,7 @@ This agenda:
- Specifies two workloads: memcpy and dhrystone.
- Specifies that dhrystone should run in one thread and execute five million loops.
- Specifies that each of the two workloads should be run twice.
- Enables the targz output processor, in addition to the output processors enabled in
- Enables json output processor, in addition to the output processors enabled in
the config.yaml.
- Disables execution_time instrument, if it is enabled in the config.yaml
@ -352,13 +352,13 @@ in-depth information please see the :ref:`Create Command <create-command>` docum
In order to populate the agenda with relevant information you can supply all of
the plugins you wish to use as arguments to the command, for example if we want
to create an agenda file for running ``dhrystone`` on a `generic_android` device and we
to create an agenda file for running ``dhystrone`` on a 'generic android' device and we
want to enable the ``execution_time`` and ``trace-cmd`` instruments and display the
metrics using the ``csv`` output processor. We would use the following command::
wa create agenda generic_android dhrystone execution_time trace-cmd csv -o my_agenda.yaml
This will produce a ``my_agenda.yaml`` file containing all the relevant
This will produce a `my_agenda.yaml` file containing all the relevant
configuration for the specified plugins along with their default values as shown
below:
@ -373,7 +373,6 @@ below:
device: generic_android
device_config:
adb_server: null
adb_port: null
big_core: null
core_clusters: null
core_names: null
@ -400,7 +399,6 @@ below:
no_install: false
report: true
report_on_target: false
mode: write-to-memory
csv:
extra_columns: null
use_all_classifiers: false
@ -485,14 +483,14 @@ that parses the contents of the output directory:
>>> ro = RunOutput('./wa_output')
>>> for job in ro.jobs:
... if job.status != 'OK':
... print('Job "{}" did not complete successfully: {}'.format(job, job.status))
... print 'Job "{}" did not complete successfully: {}'.format(job, job.status)
... continue
... print('Job "{}":'.format(job))
... print 'Job "{}":'.format(job)
... for metric in job.metrics:
... if metric.units:
... print('\t{}: {} {}'.format(metric.name, metric.value, metric.units))
... print '\t{}: {} {}'.format(metric.name, metric.value, metric.units)
... else:
... print('\t{}: {}'.format(metric.name, metric.value))
... print '\t{}: {}'.format(metric.name, metric.value)
...
Job "wk1-dhrystone-1":
thread 0 score: 20833333

@ -30,7 +30,7 @@ An example agenda can be seen here:
device: generic_android
device_config:
device: R32C801B8XY # The adb name of our device we want to run on
device: R32C801B8XY # Th adb name of our device we want to run on
disable_selinux: true
load_default_modules: true
package_data_directory: /data/data
@ -45,7 +45,6 @@ An example agenda can be seen here:
no_install: false
report: true
report_on_target: false
mode: write-to-disk
csv: # Provide config for the csv augmentation
use_all_classifiers: true
@ -117,9 +116,7 @@ whole will behave. The most common options that that you may want to specify are
to connect to (e.g. ``host`` for an SSH connection or
``device`` to specific an ADB name) as well as configure other
options for the device for example the ``working_directory``
or the list of ``modules`` to be loaded onto the device. (For
more information please see
:ref:`here <android-general-device-setup>`)
or the list of ``modules`` to be loaded onto the device.
:execution_order: Defines the order in which the agenda spec will be executed.
:reboot_policy: Defines when during execution of a run a Device will be rebooted.
:max_retries: The maximum number of times failed jobs will be retried before giving up.
@ -127,7 +124,7 @@ whole will behave. The most common options that that you may want to specify are
For more information and a full list of these configuration options please see
:ref:`Run Configuration <run-configuration>` and
:ref:`Meta Configuration <meta-configuration>`.
:ref:`"Meta Configuration" <meta-configuration>`.
Plugins

@ -40,7 +40,7 @@ Will display help for this subcommand that will look something like this:
AGENDA Agenda for this workload automation run. This defines
which workloads will be executed, how many times, with
which tunables, etc. See example agendas in
/usr/local/lib/python3.X/dist-packages/wa for an
/usr/local/lib/python2.7/dist-packages/wa for an
example of how this file should be structured.
optional arguments:

@ -33,7 +33,6 @@ states.
iterations: 1
runtime_parameters:
screen_on: false
unlock_screen: 'vertical'
- name: benchmarkpi
iterations: 1
sections:
@ -209,13 +208,6 @@ Android Specific Runtime Parameters
:screen_on: A ``boolean`` to specify whether the devices screen should be
turned on. Defaults to ``True``.
:unlock_screen: A ``String`` to specify how the devices screen should be
unlocked. Unlocking screen is disabled by default. ``vertical``, ``diagonal``
and ``horizontal`` are the supported values (see :meth:`devlib.AndroidTarget.swipe_to_unlock`).
Note that unlocking succeeds when no passcode is set. Since unlocking screen
requires turning on the screen, this option overrides value of ``screen_on``
option.
.. _setting-sysfiles:
Setting Sysfiles

@ -6,7 +6,7 @@
#
# docker build -t wa .
#
# This will create an image called wa, which is preconfigured to
# This will create an image called wadocker, which is preconfigured to
# run WA and devlib. Please note that the build process automatically
# accepts the licenses for the Android SDK, so please be sure that you
# are willing to accept these prior to building and running the image
@ -17,13 +17,6 @@
#
# docker run -it --privileged -v /dev/bus/usb:/dev/bus/usb --volume ${PWD}:/workspace --workdir /workspace wa
#
# If using selinux you may need to add the `z` option when mounting
# volumes e.g.:
# --volume ${PWD}:/workspace:z
# Warning: Please ensure you do not use this option when mounting
# system directores. For more information please see:
# https://docs.docker.com/storage/bind-mounts/#configure-the-selinux-label
#
# The above command starts the container in privileged mode, with
# access to USB devices. The current directory is mounted into the
# image, allowing you to work from there. Any files written to this
@ -39,80 +32,27 @@
#
# When you are finished, please run `exit` to leave the container.
#
# The relevant environment variables are stored in a separate
# file which is automatically sourced in an interactive shell.
# If running from a non-interactive environment this can
# be manually sourced with `source /home/wa/.wa_environment`
#
# NOTE: Please make sure that the ADB server is NOT running on the
# host. If in doubt, run `adb kill-server` before running the docker
# container.
#
# We want to make sure to base this on a recent ubuntu release
FROM ubuntu:20.04
FROM ubuntu:17.10
# Please update the references below to use different versions of
# devlib, WA or the Android SDK
ARG DEVLIB_REF=v1.3.4
ARG WA_REF=v3.3.1
ARG DEVLIB_REF=v1.0.0
ARG WA_REF=v3.0.0
ARG ANDROID_SDK_URL=https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip
# Set a default timezone to use
ENV TZ=Europe/London
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y \
apache2-utils \
bison \
cmake \
curl \
emacs \
flex \
git \
libcdk5-dev \
libiio-dev \
libxml2 \
libxml2-dev \
locales \
nano \
openjdk-8-jre-headless \
python3 \
python3-pip \
ssh \
sshpass \
sudo \
trace-cmd \
usbutils \
vim \
wget \
zip
# Clone and download iio-capture
RUN git clone -v https://github.com/BayLibre/iio-capture.git /tmp/iio-capture && \
cd /tmp/iio-capture && \
make && \
make install
RUN pip3 install pandas
# Ensure we're using utf-8 as our default encoding
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
RUN apt-get update
RUN apt-get install -y python-pip git wget zip openjdk-8-jre-headless vim emacs nano curl sshpass ssh usbutils
RUN pip install pandas
# Let's get the two repos we need, and install them
RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && \
cd /tmp/devlib && \
git checkout $DEVLIB_REF && \
python3 setup.py install && \
pip3 install .[full]
RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && \
cd /tmp/wa && \
git checkout $WA_REF && \
python3 setup.py install && \
pip3 install .[all]
RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && cd /tmp/devlib && git checkout $DEVLIB_REF && python setup.py install
RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && cd /tmp/wa && git checkout $WA_REF && python setup.py install
# Clean-up
RUN rm -R /tmp/devlib /tmp/wa
@ -126,19 +66,10 @@ RUN mkdir -p /home/wa/.android
RUN mkdir -p /home/wa/AndroidSDK && cd /home/wa/AndroidSDK && wget $ANDROID_SDK_URL -O sdk.zip && unzip sdk.zip
RUN cd /home/wa/AndroidSDK/tools/bin && yes | ./sdkmanager --licenses && ./sdkmanager platform-tools && ./sdkmanager 'build-tools;27.0.3'
# Download Monsoon
RUN mkdir -p /home/wa/monsoon
RUN curl https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py\?format\=TEXT | base64 --decode > /home/wa/monsoon/monsoon.py
RUN chmod +x /home/wa/monsoon/monsoon.py
# Update WA's required environment variables.
RUN echo 'export PATH=/home/wa/monsoon:${PATH}' >> /home/wa/.wa_environment
RUN echo 'export PATH=/home/wa/AndroidSDK/platform-tools:${PATH}' >> /home/wa/.wa_environment
RUN echo 'export PATH=/home/wa/AndroidSDK/build-tools:${PATH}' >> /home/wa/.wa_environment
RUN echo 'export ANDROID_HOME=/home/wa/AndroidSDK' >> /home/wa/.wa_environment
# Source WA environment variables in an interactive environment
RUN echo 'source /home/wa/.wa_environment' >> /home/wa/.bashrc
# Update the path
RUN echo 'export PATH=/home/wa/AndroidSDK/platform-tools:${PATH}' >> /home/wa/.bashrc
RUN echo 'export PATH=/home/wa/AndroidSDK/build-tools:${PATH}' >> /home/wa/.bashrc
RUN echo 'export ANDROID_HOME=/home/wa/AndroidSDK' >> /home/wa/.bashrc
# Generate some ADB keys. These will change each time the image is build but will otherwise persist.
RUN /home/wa/AndroidSDK/platform-tools/adb keygen /home/wa/.android/adbkey

@ -43,7 +43,7 @@ ignore=external
# https://bitbucket.org/logilab/pylint/issue/232/wrong-hanging-indentation-false-positive
# TODO: disabling no-value-for-parameter and logging-format-interpolation, as they appear to be broken
# in version 1.4.1 and return a lot of false postives; should be re-enabled once fixed.
disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330,no-value-for-parameter,logging-format-interpolation,no-else-return,inconsistent-return-statements,keyword-arg-before-vararg,consider-using-enumerate,no-member,super-with-arguments,useless-object-inheritance,raise-missing-from,no-else-raise,no-else-break,no-else-continue
disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330,no-value-for-parameter,logging-format-interpolation,no-else-return,inconsistent-return-statements,keyword-arg-before-vararg,consider-using-enumerate,no-member
[FORMAT]
max-module-lines=4000

@ -1,3 +0,0 @@
[pytest]
filterwarnings=
ignore::DeprecationWarning:past[.*]

@ -1,30 +0,0 @@
bcrypt==4.0.1
certifi==2024.7.4
cffi==1.15.1
charset-normalizer==3.1.0
colorama==0.4.6
cryptography==43.0.1
devlib==1.3.4
future==0.18.3
idna==3.7
Louie-latest==1.3.1
lxml==4.9.2
nose==1.3.7
numpy==1.24.3
pandas==2.0.1
paramiko==3.4.0
pexpect==4.8.0
ptyprocess==0.7.0
pycparser==2.21
PyNaCl==1.5.0
pyserial==3.5
python-dateutil==2.8.2
pytz==2023.3
PyYAML==6.0
requests==2.32.0
scp==0.14.5
six==1.16.0
tzdata==2023.3
urllib3==1.26.19
wlauto==3.3.1
wrapt==1.15.0

@ -29,8 +29,7 @@ except ImportError:
wa_dir = os.path.join(os.path.dirname(__file__), 'wa')
sys.path.insert(0, os.path.join(wa_dir, 'framework'))
from version import (get_wa_version, get_wa_version_with_commit,
format_version, required_devlib_version)
from version import get_wa_version, get_wa_version_with_commit
# happens if falling back to distutils
warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
@ -62,14 +61,9 @@ for root, dirs, files in os.walk(wa_dir):
scripts = [os.path.join('scripts', s) for s in os.listdir('scripts')]
with open("README.rst", "r") as fh:
long_description = fh.read()
devlib_version = format_version(required_devlib_version)
params = dict(
name='wlauto',
description='A framework for automating workload execution and measurement collection on ARM devices.',
long_description=long_description,
version=get_wa_version_with_commit(),
packages=packages,
package_data=data_files,
@ -79,45 +73,42 @@ params = dict(
license='Apache v2',
maintainer='ARM Architecture & Technology Device Lab',
maintainer_email='workload-automation@arm.com',
python_requires='>= 3.7',
setup_requires=[
'numpy<=1.16.4; python_version<"3"',
'numpy; python_version>="3"',
'numpy'
],
install_requires=[
'python-dateutil', # converting between UTC and local time.
'pexpect>=3.3', # Send/receive to/from device
'pyserial', # Serial port interface
'colorama', # Printing with colors
'pyYAML>=5.1b3', # YAML-formatted agenda parsing
'pyYAML', # YAML-formatted agenda parsing
'requests', # Fetch assets over HTTP
'devlib>={}'.format(devlib_version), # Interacting with devices
'devlib>=1.1.0', # Interacting with devices
'louie-latest', # callbacks dispatch
'wrapt', # better decorators
'pandas>=0.23.0,<=0.24.2; python_version<"3.5.3"', # Data analysis and manipulation
'pandas>=0.23.0; python_version>="3.5.3"', # Data analysis and manipulation
'pandas>=0.23.0', # Data analysis and manipulation
'future', # Python 2-3 compatiblity
],
dependency_links=['https://github.com/ARM-software/devlib/tarball/master#egg=devlib-{}'.format(devlib_version)],
extras_require={
'other': ['jinja2'],
'test': ['nose', 'mock'],
'mongodb': ['pymongo'],
'notify': ['notify2'],
'doc': ['sphinx', 'sphinx_rtd_theme'],
'postgres': ['psycopg2-binary'],
'daq': ['daqpower'],
'doc': ['sphinx'],
},
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
)
all_extras = list(chain(iter(params['extras_require'].values())))
params['extras_require']['all'] = all_extras
params['extras_require']['everything'] = all_extras
class sdist(orig_sdist):
@ -131,6 +122,7 @@ class sdist(orig_sdist):
orig_sdist.initialize_options(self)
self.strip_commit = False
def run(self):
if self.strip_commit:
self.distribution.get_version = get_wa_version

@ -17,7 +17,7 @@
from wa import Plugin
class MockDevice(Plugin):
class TestDevice(Plugin):
name = 'test-device'
kind = 'device'

@ -18,6 +18,7 @@
# pylint: disable=R0201
import os
import sys
import yaml
from collections import defaultdict
from unittest import TestCase
@ -30,7 +31,6 @@ os.environ['WA_USER_DIRECTORY'] = os.path.join(DATA_DIR, 'includes')
from wa.framework.configuration.execution import ConfigManager
from wa.framework.configuration.parsers import AgendaParser
from wa.framework.exception import ConfigError
from wa.utils.serializer import yaml
from wa.utils.types import reset_all_counters
@ -44,6 +44,8 @@ workloads:
workload_parameters:
test: 1
"""
invalid_agenda = yaml.load(invalid_agenda_text)
invalid_agenda.name = 'invalid1'
duplicate_agenda_text = """
global:
@ -56,10 +58,14 @@ workloads:
- id: "1"
workload_name: benchmarkpi
"""
duplicate_agenda = yaml.load(duplicate_agenda_text)
duplicate_agenda.name = 'invalid2'
short_agenda_text = """
workloads: [antutu, dhrystone, benchmarkpi]
"""
short_agenda = yaml.load(short_agenda_text)
short_agenda.name = 'short'
default_ids_agenda_text = """
workloads:
@ -72,6 +78,8 @@ workloads:
cpus: 1
- vellamo
"""
default_ids_agenda = yaml.load(default_ids_agenda_text)
default_ids_agenda.name = 'default_ids'
sectioned_agenda_text = """
sections:
@ -94,6 +102,8 @@ sections:
workloads:
- memcpy
"""
sectioned_agenda = yaml.load(sectioned_agenda_text)
sectioned_agenda.name = 'sectioned'
dup_sectioned_agenda_text = """
sections:
@ -106,22 +116,8 @@ sections:
workloads:
- memcpy
"""
yaml_anchors_agenda_text = """
workloads:
- name: dhrystone
params: &dhrystone_single_params
cleanup_assets: true
cpus: 0
delay: 3
duration: 0
mloops: 10
threads: 1
- name: dhrystone
params:
<<: *dhrystone_single_params
threads: 4
"""
dup_sectioned_agenda = yaml.load(dup_sectioned_agenda_text)
dup_sectioned_agenda.name = 'dup-sectioned'
class AgendaTest(TestCase):
@ -136,8 +132,6 @@ class AgendaTest(TestCase):
assert_equal(len(self.config.jobs_config.root_node.workload_entries), 4)
def test_duplicate_id(self):
duplicate_agenda = yaml.load(duplicate_agenda_text)
try:
self.parser.load(self.config, duplicate_agenda, 'test')
except ConfigError as e:
@ -146,8 +140,6 @@ class AgendaTest(TestCase):
raise Exception('ConfigError was not raised for an agenda with duplicate ids.')
def test_yaml_missing_field(self):
invalid_agenda = yaml.load(invalid_agenda_text)
try:
self.parser.load(self.config, invalid_agenda, 'test')
except ConfigError as e:
@ -156,26 +148,20 @@ class AgendaTest(TestCase):
raise Exception('ConfigError was not raised for an invalid agenda.')
def test_defaults(self):
short_agenda = yaml.load(short_agenda_text)
self.parser.load(self.config, short_agenda, 'test')
workload_entries = self.config.jobs_config.root_node.workload_entries
assert_equal(len(workload_entries), 3)
assert_equal(workload_entries[0].config['workload_name'], 'antutu')
assert_equal(workload_entries[0].id, 'wk1')
def test_default_id_assignment(self):
default_ids_agenda = yaml.load(default_ids_agenda_text)
self.parser.load(self.config, default_ids_agenda, 'test2')
workload_entries = self.config.jobs_config.root_node.workload_entries
assert_equal(workload_entries[0].id, 'wk2')
assert_equal(workload_entries[3].id, 'wk3')
def test_sections(self):
sectioned_agenda = yaml.load(sectioned_agenda_text)
self.parser.load(self.config, sectioned_agenda, 'test')
root_node_workload_entries = self.config.jobs_config.root_node.workload_entries
leaves = list(self.config.jobs_config.root_node.leaves())
section1_workload_entries = leaves[0].workload_entries
@ -185,22 +171,8 @@ class AgendaTest(TestCase):
assert_true(section1_workload_entries[0].config['workload_parameters']['markers_enabled'])
assert_equal(section2_workload_entries[0].config['workload_name'], 'antutu')
def test_yaml_anchors(self):
yaml_anchors_agenda = yaml.load(yaml_anchors_agenda_text)
self.parser.load(self.config, yaml_anchors_agenda, 'test')
workload_entries = self.config.jobs_config.root_node.workload_entries
assert_equal(len(workload_entries), 2)
assert_equal(workload_entries[0].config['workload_name'], 'dhrystone')
assert_equal(workload_entries[0].config['workload_parameters']['threads'], 1)
assert_equal(workload_entries[0].config['workload_parameters']['delay'], 3)
assert_equal(workload_entries[1].config['workload_name'], 'dhrystone')
assert_equal(workload_entries[1].config['workload_parameters']['threads'], 4)
assert_equal(workload_entries[1].config['workload_parameters']['delay'], 3)
@raises(ConfigError)
def test_dup_sections(self):
dup_sectioned_agenda = yaml.load(dup_sectioned_agenda_text)
self.parser.load(self.config, dup_sectioned_agenda, 'test')
@raises(ConfigError)

@ -16,7 +16,6 @@
import unittest
from nose.tools import assert_equal
from wa.framework.configuration.execution import ConfigManager
from wa.utils.misc import merge_config_values
@ -39,21 +38,3 @@ class TestConfigUtils(unittest.TestCase):
if v2 is not None:
assert_equal(type(result), type(v2))
class TestConfigParser(unittest.TestCase):
def test_param_merge(self):
config = ConfigManager()
config.load_config({'workload_params': {'one': 1, 'three': {'ex': 'x'}}, 'runtime_params': {'aye': 'a'}}, 'file_one')
config.load_config({'workload_params': {'two': 2, 'three': {'why': 'y'}}, 'runtime_params': {'bee': 'b'}}, 'file_two')
assert_equal(
config.jobs_config.job_spec_template['workload_parameters'],
{'one': 1, 'two': 2, 'three': {'why': 'y'}},
)
assert_equal(
config.jobs_config.job_spec_template['runtime_parameters'],
{'aye': 'a', 'bee': 'b'},
)

@ -21,10 +21,9 @@ from nose.tools import assert_equal, assert_raises
from wa.utils.exec_control import (init_environment, reset_environment,
activate_environment, once,
once_per_class, once_per_instance,
once_per_attribute_value)
once_per_class, once_per_instance)
class MockClass(object):
class TestClass(object):
called = 0
@ -33,7 +32,7 @@ class MockClass(object):
@once
def called_once(self):
MockClass.called += 1
TestClass.called += 1
@once
def initilize_once(self):
@ -51,7 +50,7 @@ class MockClass(object):
return '{}: Called={}'.format(self.__class__.__name__, self.called)
class SubClass(MockClass):
class SubClass(TestClass):
def __init__(self):
super(SubClass, self).__init__()
@ -111,19 +110,7 @@ class AnotherClass(object):
self.count += 1
class NamedClass:
count = 0
def __init__(self, name):
self.name = name
@once_per_attribute_value('name')
def initilize(self):
NamedClass.count += 1
class AnotherSubClass(MockClass):
class AnotherSubClass(TestClass):
def __init__(self):
super(AnotherSubClass, self).__init__()
@ -155,7 +142,7 @@ class EnvironmentManagementTest(TestCase):
def test_reset_current_environment(self):
activate_environment('CURRENT_ENVIRONMENT')
t1 = MockClass()
t1 = TestClass()
t1.initilize_once()
assert_equal(t1.count, 1)
@ -165,7 +152,7 @@ class EnvironmentManagementTest(TestCase):
def test_switch_environment(self):
activate_environment('ENVIRONMENT1')
t1 = MockClass()
t1 = TestClass()
t1.initilize_once()
assert_equal(t1.count, 1)
@ -179,7 +166,7 @@ class EnvironmentManagementTest(TestCase):
def test_reset_environment_name(self):
activate_environment('ENVIRONMENT')
t1 = MockClass()
t1 = TestClass()
t1.initilize_once()
assert_equal(t1.count, 1)
@ -208,7 +195,7 @@ class OnlyOnceEnvironmentTest(TestCase):
reset_environment('TEST_ENVIRONMENT')
def test_single_instance(self):
t1 = MockClass()
t1 = TestClass()
ac = AnotherClass()
t1.initilize_once()
@ -222,8 +209,8 @@ class OnlyOnceEnvironmentTest(TestCase):
def test_mulitple_instances(self):
t1 = MockClass()
t2 = MockClass()
t1 = TestClass()
t2 = TestClass()
t1.initilize_once()
assert_equal(t1.count, 1)
@ -233,7 +220,7 @@ class OnlyOnceEnvironmentTest(TestCase):
def test_sub_classes(self):
t1 = MockClass()
t1 = TestClass()
sc = SubClass()
ss = SubSubClass()
asc = AnotherSubClass()
@ -263,7 +250,7 @@ class OncePerClassEnvironmentTest(TestCase):
reset_environment('TEST_ENVIRONMENT')
def test_single_instance(self):
t1 = MockClass()
t1 = TestClass()
ac = AnotherClass()
t1.initilize_once_per_class()
@ -277,8 +264,8 @@ class OncePerClassEnvironmentTest(TestCase):
def test_mulitple_instances(self):
t1 = MockClass()
t2 = MockClass()
t1 = TestClass()
t2 = TestClass()
t1.initilize_once_per_class()
assert_equal(t1.count, 1)
@ -288,7 +275,7 @@ class OncePerClassEnvironmentTest(TestCase):
def test_sub_classes(self):
t1 = MockClass()
t1 = TestClass()
sc1 = SubClass()
sc2 = SubClass()
ss1 = SubSubClass()
@ -321,7 +308,7 @@ class OncePerInstanceEnvironmentTest(TestCase):
reset_environment('TEST_ENVIRONMENT')
def test_single_instance(self):
t1 = MockClass()
t1 = TestClass()
ac = AnotherClass()
t1.initilize_once_per_instance()
@ -335,8 +322,8 @@ class OncePerInstanceEnvironmentTest(TestCase):
def test_mulitple_instances(self):
t1 = MockClass()
t2 = MockClass()
t1 = TestClass()
t2 = TestClass()
t1.initilize_once_per_instance()
assert_equal(t1.count, 1)
@ -346,7 +333,7 @@ class OncePerInstanceEnvironmentTest(TestCase):
def test_sub_classes(self):
t1 = MockClass()
t1 = TestClass()
sc = SubClass()
ss = SubSubClass()
asc = AnotherSubClass()
@ -365,30 +352,3 @@ class OncePerInstanceEnvironmentTest(TestCase):
asc.initilize_once_per_instance()
asc.initilize_once_per_instance()
assert_equal(asc.count, 2)
class OncePerAttributeValueTest(TestCase):
def setUp(self):
activate_environment('TEST_ENVIRONMENT')
def tearDown(self):
reset_environment('TEST_ENVIRONMENT')
def test_once_attribute_value(self):
classes = [
NamedClass('Rick'),
NamedClass('Morty'),
NamedClass('Rick'),
NamedClass('Morty'),
NamedClass('Morty'),
NamedClass('Summer'),
]
for c in classes:
c.initilize()
for c in classes:
c.initilize()
assert_equal(NamedClass.count, 3)

@ -1,315 +0,0 @@
# Copyright 2020 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
from unittest import TestCase
from mock.mock import Mock
from nose.tools import assert_equal
from datetime import datetime
from wa.framework.configuration import RunConfiguration
from wa.framework.configuration.core import JobSpec, Status
from wa.framework.execution import ExecutionContext, Runner
from wa.framework.job import Job
from wa.framework.output import RunOutput, init_run_output
from wa.framework.output_processor import ProcessorManager
import wa.framework.signal as signal
from wa.framework.run import JobState
from wa.framework.exception import ExecutionError
class MockConfigManager(Mock):
@property
def jobs(self):
return self._joblist
@property
def loaded_config_sources(self):
return []
@property
def plugin_cache(self):
return MockPluginCache()
def __init__(self, *args, **kwargs):
super(MockConfigManager, self).__init__(*args, **kwargs)
self._joblist = None
self.run_config = RunConfiguration()
def to_pod(self):
return {}
class MockPluginCache(Mock):
def list_plugins(self, kind=None):
return []
class MockProcessorManager(Mock):
def __init__(self, *args, **kwargs):
super(MockProcessorManager, self).__init__(*args, **kwargs)
def get_enabled(self):
return []
class JobState_force_retry(JobState):
@property
def status(self):
return self._status
@status.setter
def status(self, value):
if(self.retries != self.times_to_retry) and (value == Status.RUNNING):
self._status = Status.FAILED
if self.output:
self.output.status = Status.FAILED
else:
self._status = value
if self.output:
self.output.status = value
def __init__(self, to_retry, *args, **kwargs):
self.retries = 0
self._status = Status.NEW
self.times_to_retry = to_retry
self.output = None
super(JobState_force_retry, self).__init__(*args, **kwargs)
class Job_force_retry(Job):
'''This class imitates a job that retries as many times as specified by
``retries`` in its constructor'''
def __init__(self, to_retry, *args, **kwargs):
super(Job_force_retry, self).__init__(*args, **kwargs)
self.state = JobState_force_retry(to_retry, self.id, self.label, self.iteration, Status.NEW)
self.initialized = False
self.finalized = False
def initialize(self, context):
self.initialized = True
return super().initialize(context)
def finalize(self, context):
self.finalized = True
return super().finalize(context)
class TestRunState(TestCase):
def setUp(self):
self.path = tempfile.mkstemp()[1]
os.remove(self.path)
self.initialise_signals()
self.context = get_context(self.path)
self.job_spec = get_jobspec()
def tearDown(self):
signal.disconnect(self._verify_serialized_state, signal.RUN_INITIALIZED)
signal.disconnect(self._verify_serialized_state, signal.JOB_STARTED)
signal.disconnect(self._verify_serialized_state, signal.JOB_RESTARTED)
signal.disconnect(self._verify_serialized_state, signal.JOB_COMPLETED)
signal.disconnect(self._verify_serialized_state, signal.JOB_FAILED)
signal.disconnect(self._verify_serialized_state, signal.JOB_ABORTED)
signal.disconnect(self._verify_serialized_state, signal.RUN_FINALIZED)
def test_job_state_transitions_pass(self):
'''Tests state equality when the job passes first try'''
job = Job(self.job_spec, 1, self.context)
job.workload = Mock()
self.context.cm._joblist = [job]
self.context.run_state.add_job(job)
runner = Runner(self.context, MockProcessorManager())
runner.run()
def test_job_state_transitions_fail(self):
'''Tests state equality when job fails completely'''
job = Job_force_retry(3, self.job_spec, 1, self.context)
job.workload = Mock()
self.context.cm._joblist = [job]
self.context.run_state.add_job(job)
runner = Runner(self.context, MockProcessorManager())
runner.run()
def test_job_state_transitions_retry(self):
'''Tests state equality when job fails initially'''
job = Job_force_retry(1, self.job_spec, 1, self.context)
job.workload = Mock()
self.context.cm._joblist = [job]
self.context.run_state.add_job(job)
runner = Runner(self.context, MockProcessorManager())
runner.run()
def initialise_signals(self):
signal.connect(self._verify_serialized_state, signal.RUN_INITIALIZED)
signal.connect(self._verify_serialized_state, signal.JOB_STARTED)
signal.connect(self._verify_serialized_state, signal.JOB_RESTARTED)
signal.connect(self._verify_serialized_state, signal.JOB_COMPLETED)
signal.connect(self._verify_serialized_state, signal.JOB_FAILED)
signal.connect(self._verify_serialized_state, signal.JOB_ABORTED)
signal.connect(self._verify_serialized_state, signal.RUN_FINALIZED)
def _verify_serialized_state(self, _):
fs_state = RunOutput(self.path).state
ex_state = self.context.run_output.state
assert_equal(fs_state.status, ex_state.status)
fs_js_zip = zip(
[value for key, value in fs_state.jobs.items()],
[value for key, value in ex_state.jobs.items()]
)
for fs_jobstate, ex_jobstate in fs_js_zip:
assert_equal(fs_jobstate.iteration, ex_jobstate.iteration)
assert_equal(fs_jobstate.retries, ex_jobstate.retries)
assert_equal(fs_jobstate.status, ex_jobstate.status)
class TestJobState(TestCase):
def test_job_retry_status(self):
job_spec = get_jobspec()
context = get_context()
job = Job_force_retry(2, job_spec, 1, context)
job.workload = Mock()
context.cm._joblist = [job]
context.run_state.add_job(job)
verifier = lambda _: assert_equal(job.status, Status.PENDING)
signal.connect(verifier, signal.JOB_RESTARTED)
runner = Runner(context, MockProcessorManager())
runner.run()
signal.disconnect(verifier, signal.JOB_RESTARTED)
def test_skipped_job_state(self):
# Test, if the first job fails and the bail parameter set,
# that the remaining jobs have status: SKIPPED
job_spec = get_jobspec()
context = get_context()
context.cm.run_config.bail_on_job_failure = True
job1 = Job_force_retry(3, job_spec, 1, context)
job2 = Job(job_spec, 1, context)
job1.workload = Mock()
job2.workload = Mock()
context.cm._joblist = [job1, job2]
context.run_state.add_job(job1)
context.run_state.add_job(job2)
runner = Runner(context, MockProcessorManager())
try:
runner.run()
except ExecutionError:
assert_equal(job2.status, Status.SKIPPED)
else:
assert False, "ExecutionError not raised"
def test_normal_job_finalized(self):
# Test that a job is initialized then finalized normally
job_spec = get_jobspec()
context = get_context()
job = Job_force_retry(0, job_spec, 1, context)
job.workload = Mock()
context.cm._joblist = [job]
context.run_state.add_job(job)
runner = Runner(context, MockProcessorManager())
runner.run()
assert_equal(job.initialized, True)
assert_equal(job.finalized, True)
def test_skipped_job_finalized(self):
# Test that a skipped job has been finalized
job_spec = get_jobspec()
context = get_context()
context.cm.run_config.bail_on_job_failure = True
job1 = Job_force_retry(3, job_spec, 1, context)
job2 = Job_force_retry(0, job_spec, 1, context)
job1.workload = Mock()
job2.workload = Mock()
context.cm._joblist = [job1, job2]
context.run_state.add_job(job1)
context.run_state.add_job(job2)
runner = Runner(context, MockProcessorManager())
try:
runner.run()
except ExecutionError:
assert_equal(job2.finalized, True)
else:
assert False, "ExecutionError not raised"
def test_failed_job_finalized(self):
# Test that a failed job, while the bail parameter is set,
# is finalized
job_spec = get_jobspec()
context = get_context()
context.cm.run_config.bail_on_job_failure = True
job1 = Job_force_retry(3, job_spec, 1, context)
job1.workload = Mock()
context.cm._joblist = [job1]
context.run_state.add_job(job1)
runner = Runner(context, MockProcessorManager())
try:
runner.run()
except ExecutionError:
assert_equal(job1.finalized, True)
else:
assert False, "ExecutionError not raised"
def get_context(path=None):
if not path:
path = tempfile.mkstemp()[1]
os.remove(path)
config = MockConfigManager()
output = init_run_output(path, config)
return ExecutionContext(config, Mock(), output)
def get_jobspec():
job_spec = JobSpec()
job_spec.augmentations = {}
job_spec.finalize()
return job_spec

@ -30,27 +30,6 @@ class Callable(object):
return self.val
class TestSignalDisconnect(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.callback_ctr = 0
def setUp(self):
signal.connect(self._call_me_once, 'first')
signal.connect(self._call_me_once, 'second')
def test_handler_disconnected(self):
signal.send('first')
signal.send('second')
def _call_me_once(self):
assert_equal(self.callback_ctr, 0)
self.callback_ctr += 1
signal.disconnect(self._call_me_once, 'first')
signal.disconnect(self._call_me_once, 'second')
class TestPriorityDispatcher(unittest.TestCase):
def setUp(self):
@ -82,16 +61,12 @@ class TestPriorityDispatcher(unittest.TestCase):
def test_wrap_propagate(self):
d = {'before': False, 'after': False, 'success': False}
def before():
d['before'] = True
def after():
d['after'] = True
def success():
d['success'] = True
signal.connect(before, signal.BEFORE_WORKLOAD_SETUP)
signal.connect(after, signal.AFTER_WORKLOAD_SETUP)
signal.connect(success, signal.SUCCESSFUL_WORKLOAD_SETUP)
@ -101,7 +76,7 @@ class TestPriorityDispatcher(unittest.TestCase):
with signal.wrap('WORKLOAD_SETUP'):
raise RuntimeError()
except RuntimeError:
caught = True
caught=True
assert_true(d['before'])
assert_true(d['after'])

@ -190,10 +190,3 @@ class TestToggleSet(TestCase):
ts6 = ts2.merge_into(ts3).merge_with(ts1)
assert_equal(ts6, toggle_set(['one', 'two', 'three', 'four', 'five', '~~']))
def test_order_on_create(self):
ts1 = toggle_set(['one', 'two', 'three', '~one'])
assert_equal(ts1, toggle_set(['~one', 'two', 'three']))
ts1 = toggle_set(['~one', 'two', 'three', 'one'])
assert_equal(ts1, toggle_set(['one', 'two', 'three']))

@ -33,7 +33,7 @@ from wa.framework.target.descriptor import (TargetDescriptor, TargetDescription,
create_target_description, add_description_for_target)
from wa.framework.workload import (Workload, ApkWorkload, ApkUiautoWorkload,
ApkReventWorkload, UIWorkload, UiautoWorkload,
PackageHandler, ReventWorkload, TestPackageHandler)
ReventWorkload)
from wa.framework.version import get_wa_version, get_wa_version_with_commit

Binary file not shown.

Binary file not shown.

@ -23,6 +23,7 @@ import re
import uuid
import getpass
from collections import OrderedDict
from distutils.dir_util import copy_tree # pylint: disable=no-name-in-module, import-error
from devlib.utils.types import identifier
try:
@ -42,24 +43,6 @@ from wa.utils.misc import (ensure_directory_exists as _d, capitalize,
from wa.utils.postgres import get_schema, POSTGRES_SCHEMA_DIR
from wa.utils.serializer import yaml
if sys.version_info >= (3, 8):
def copy_tree(src, dst):
from shutil import copy, copytree # pylint: disable=import-outside-toplevel
copytree(
src,
dst,
# dirs_exist_ok=True only exists in Python >= 3.8
dirs_exist_ok=True,
# Align with devlib and only copy the content without metadata
copy_function=copy
)
else:
def copy_tree(src, dst):
# pylint: disable=import-outside-toplevel, redefined-outer-name
from distutils.dir_util import copy_tree
# Align with devlib and only copy the content without metadata
copy_tree(src, dst, preserve_mode=False, preserve_times=False)
TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates')
@ -123,8 +106,8 @@ class CreateDatabaseSubcommand(SubCommand):
def execute(self, state, args): # pylint: disable=too-many-branches
if not psycopg2:
raise CommandError(
'The module psycopg2 is required for the wa '
+ 'create database command.')
'The module psycopg2 is required for the wa ' +
'create database command.')
if args.dbname == 'postgres':
raise ValueError('Databasename to create cannot be postgres.')
@ -148,8 +131,8 @@ class CreateDatabaseSubcommand(SubCommand):
config = yaml.load(config_file)
if 'postgres' in config and not args.force_update_config:
raise CommandError(
"The entry 'postgres' already exists in the config file. "
+ "Please specify the -F flag to force an update.")
"The entry 'postgres' already exists in the config file. " +
"Please specify the -F flag to force an update.")
possible_connection_errors = [
(
@ -278,8 +261,8 @@ class CreateDatabaseSubcommand(SubCommand):
else:
if not self.force:
raise CommandError(
"Database {} already exists. ".format(self.dbname)
+ "Please specify the -f flag to create it from afresh."
"Database {} already exists. ".format(self.dbname) +
"Please specify the -f flag to create it from afresh."
)
def _create_database_postgres(self):
@ -417,14 +400,14 @@ class CreateWorkloadSubcommand(SubCommand):
self.parser.add_argument('name', metavar='NAME',
help='Name of the workload to be created')
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
help='The location at which the workload will be created. If not specified, '
+ 'this defaults to "~/.workload_automation/plugins".')
help='The location at which the workload will be created. If not specified, ' +
'this defaults to "~/.workload_automation/plugins".')
self.parser.add_argument('-f', '--force', action='store_true',
help='Create the new workload even if a workload with the specified '
+ 'name already exists.')
help='Create the new workload even if a workload with the specified ' +
'name already exists.')
self.parser.add_argument('-k', '--kind', metavar='KIND', default='basic', choices=list(create_funcs.keys()),
help='The type of workload to be created. The available options '
+ 'are: {}'.format(', '.join(list(create_funcs.keys()))))
help='The type of workload to be created. The available options ' +
'are: {}'.format(', '.join(list(create_funcs.keys()))))
def execute(self, state, args): # pylint: disable=R0201
where = args.path or 'local'
@ -447,8 +430,8 @@ class CreatePackageSubcommand(SubCommand):
self.parser.add_argument('name', metavar='NAME',
help='Name of the package to be created')
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
help='The location at which the new package will be created. If not specified, '
+ 'current working directory will be used.')
help='The location at which the new package will be created. If not specified, ' +
'current working directory will be used.')
self.parser.add_argument('-f', '--force', action='store_true',
help='Create the new package even if a file or directory with the same name '
'already exists at the specified location.')

@ -1,4 +1,4 @@
--!VERSION!1.6!ENDVERSION!
--!VERSION!1.2!ENDVERSION!
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS "lo";
@ -61,7 +61,7 @@ CREATE TABLE Runs (
CREATE TABLE Jobs (
oid uuid NOT NULL,
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
run_oid uuid NOT NULL references Runs(oid),
status status_enum,
retry int,
label text,
@ -76,13 +76,12 @@ CREATE TABLE Jobs (
CREATE TABLE Targets (
oid uuid NOT NULL,
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
run_oid uuid NOT NULL references Runs(oid),
target text,
modules text[],
cpus text[],
os text,
os_version jsonb,
hostid bigint,
hostid int,
hostname text,
abi text,
is_rooted boolean,
@ -97,13 +96,12 @@ CREATE TABLE Targets (
android_id text,
_pod_version int,
_pod_serialization_version int,
system_id text,
PRIMARY KEY (oid)
);
CREATE TABLE Events (
oid uuid NOT NULL,
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
run_oid uuid NOT NULL references Runs(oid),
job_oid uuid references Jobs(oid),
timestamp timestamp,
message text,
@ -114,28 +112,28 @@ CREATE TABLE Events (
CREATE TABLE Resource_Getters (
oid uuid NOT NULL,
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
run_oid uuid NOT NULL references Runs(oid),
name text,
PRIMARY KEY (oid)
);
CREATE TABLE Augmentations (
oid uuid NOT NULL,
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
run_oid uuid NOT NULL references Runs(oid),
name text,
PRIMARY KEY (oid)
);
CREATE TABLE Jobs_Augs (
oid uuid NOT NULL,
job_oid uuid NOT NULL references Jobs(oid) ON DELETE CASCADE,
augmentation_oid uuid NOT NULL references Augmentations(oid) ON DELETE CASCADE,
job_oid uuid NOT NULL references Jobs(oid),
augmentation_oid uuid NOT NULL references Augmentations(oid),
PRIMARY KEY (oid)
);
CREATE TABLE Metrics (
oid uuid NOT NULL,
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
run_oid uuid NOT NULL references Runs(oid),
job_oid uuid references Jobs(oid),
name text,
value double precision,
@ -158,7 +156,7 @@ CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON LargeObjects
CREATE TABLE Artifacts (
oid uuid NOT NULL,
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
run_oid uuid NOT NULL references Runs(oid),
job_oid uuid references Jobs(oid),
name text,
large_object_uuid uuid NOT NULL references LargeObjects(oid),
@ -166,22 +164,15 @@ CREATE TABLE Artifacts (
kind text,
_pod_version int,
_pod_serialization_version int,
is_dir boolean,
PRIMARY KEY (oid)
);
CREATE RULE del_lo AS
ON DELETE TO Artifacts
DO DELETE FROM LargeObjects
WHERE LargeObjects.oid = old.large_object_uuid
;
CREATE TABLE Classifiers (
oid uuid NOT NULL,
artifact_oid uuid references Artifacts(oid) ON DELETE CASCADE,
metric_oid uuid references Metrics(oid) ON DELETE CASCADE,
job_oid uuid references Jobs(oid) ON DELETE CASCADE,
run_oid uuid references Runs(oid) ON DELETE CASCADE,
artifact_oid uuid references Artifacts(oid),
metric_oid uuid references Metrics(oid),
job_oid uuid references Jobs(oid),
run_oid uuid references Runs(oid),
key text,
value text,
PRIMARY KEY (oid)
@ -189,7 +180,7 @@ CREATE TABLE Classifiers (
CREATE TABLE Parameters (
oid uuid NOT NULL,
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
run_oid uuid NOT NULL references Runs(oid),
job_oid uuid references Jobs(oid),
augmentation_oid uuid references Augmentations(oid),
resource_getter_oid uuid references Resource_Getters(oid),

@ -1,3 +0,0 @@
ALTER TABLE targets ADD COLUMN system_id text;
ALTER TABLE artifacts ADD COLUMN is_dir boolean;

@ -1,2 +0,0 @@
ALTER TABLE targets ADD COLUMN modules text[];

@ -1 +0,0 @@
ALTER TABLE targets ALTER hostid TYPE BIGINT;

@ -1,109 +0,0 @@
ALTER TABLE jobs
DROP CONSTRAINT jobs_run_oid_fkey,
ADD CONSTRAINT jobs_run_oid_fkey
FOREIGN KEY (run_oid)
REFERENCES runs(oid)
ON DELETE CASCADE
;
ALTER TABLE targets
DROP CONSTRAINT targets_run_oid_fkey,
ADD CONSTRAINT targets_run_oid_fkey
FOREIGN KEY (run_oid)
REFERENCES runs(oid)
ON DELETE CASCADE
;
ALTER TABLE events
DROP CONSTRAINT events_run_oid_fkey,
ADD CONSTRAINT events_run_oid_fkey
FOREIGN KEY (run_oid)
REFERENCES runs(oid)
ON DELETE CASCADE
;
ALTER TABLE resource_getters
DROP CONSTRAINT resource_getters_run_oid_fkey,
ADD CONSTRAINT resource_getters_run_oid_fkey
FOREIGN KEY (run_oid)
REFERENCES runs(oid)
ON DELETE CASCADE
;
ALTER TABLE augmentations
DROP CONSTRAINT augmentations_run_oid_fkey,
ADD CONSTRAINT augmentations_run_oid_fkey
FOREIGN KEY (run_oid)
REFERENCES runs(oid)
ON DELETE CASCADE
;
ALTER TABLE jobs_augs
DROP CONSTRAINT jobs_augs_job_oid_fkey,
DROP CONSTRAINT jobs_augs_augmentation_oid_fkey,
ADD CONSTRAINT jobs_augs_job_oid_fkey
FOREIGN KEY (job_oid)
REFERENCES Jobs(oid)
ON DELETE CASCADE,
ADD CONSTRAINT jobs_augs_augmentation_oid_fkey
FOREIGN KEY (augmentation_oid)
REFERENCES Augmentations(oid)
ON DELETE CASCADE
;
ALTER TABLE metrics
DROP CONSTRAINT metrics_run_oid_fkey,
ADD CONSTRAINT metrics_run_oid_fkey
FOREIGN KEY (run_oid)
REFERENCES runs(oid)
ON DELETE CASCADE
;
ALTER TABLE artifacts
DROP CONSTRAINT artifacts_run_oid_fkey,
ADD CONSTRAINT artifacts_run_oid_fkey
FOREIGN KEY (run_oid)
REFERENCES runs(oid)
ON DELETE CASCADE
;
CREATE RULE del_lo AS
ON DELETE TO Artifacts
DO DELETE FROM LargeObjects
WHERE LargeObjects.oid = old.large_object_uuid
;
ALTER TABLE classifiers
DROP CONSTRAINT classifiers_artifact_oid_fkey,
DROP CONSTRAINT classifiers_metric_oid_fkey,
DROP CONSTRAINT classifiers_job_oid_fkey,
DROP CONSTRAINT classifiers_run_oid_fkey,
ADD CONSTRAINT classifiers_artifact_oid_fkey
FOREIGN KEY (artifact_oid)
REFERENCES artifacts(oid)
ON DELETE CASCADE,
ADD CONSTRAINT classifiers_metric_oid_fkey
FOREIGN KEY (metric_oid)
REFERENCES metrics(oid)
ON DELETE CASCADE,
ADD CONSTRAINT classifiers_job_oid_fkey
FOREIGN KEY (job_oid)
REFERENCES jobs(oid)
ON DELETE CASCADE,
ADD CONSTRAINT classifiers_run_oid_fkey
FOREIGN KEY (run_oid)
REFERENCES runs(oid)
ON DELETE CASCADE
;
ALTER TABLE parameters
DROP CONSTRAINT parameters_run_oid_fkey,
ADD CONSTRAINT parameters_run_oid_fkey
FOREIGN KEY (run_oid)
REFERENCES runs(oid)
ON DELETE CASCADE
;

@ -17,7 +17,6 @@ import os
from wa import Command
from wa import discover_wa_outputs
from wa.framework.configuration.core import Status
from wa.framework.exception import CommandError
from wa.framework.output import RunOutput
from wa.framework.output_processor import ProcessorManager
@ -58,9 +57,8 @@ class ProcessCommand(Command):
""")
self.parser.add_argument('-f', '--force', action='store_true',
help="""
Run processors that have already been run. By
default these will be skipped. Also, forces
processing of in-progress runs.
Run processors that have already been
run. By default these will be skipped.
""")
self.parser.add_argument('-r', '--recursive', action='store_true',
help="""
@ -78,15 +76,10 @@ class ProcessCommand(Command):
if not args.recursive:
output_list = [RunOutput(process_directory)]
else:
output_list = list(discover_wa_outputs(process_directory))
output_list = [output for output in discover_wa_outputs(process_directory)]
pc = ProcessContext()
for run_output in output_list:
if run_output.status < Status.OK and not args.force:
msg = 'Skipping {} as it has not completed -- {}'
self.logger.info(msg.format(run_output.basepath, run_output.status))
continue
pc.run_output = run_output
pc.target_info = run_output.target_info
@ -119,12 +112,6 @@ class ProcessCommand(Command):
pm.initialize(pc)
for job_output in run_output.jobs:
if job_output.status < Status.OK or job_output.status in [Status.SKIPPED, Status.ABORTED]:
msg = 'Skipping job {} {} iteration {} -- {}'
self.logger.info(msg.format(job_output.id, job_output.label,
job_output.iteration, job_output.status))
continue
pc.job_output = job_output
pm.enable_all()
if not args.force:
@ -155,6 +142,5 @@ class ProcessCommand(Command):
pm.export_run_output(pc)
pm.finalize(pc)
run_output.write_info()
run_output.write_result()
self.logger.info('Done.')

@ -1,288 +0,0 @@
from collections import Counter
from datetime import datetime, timedelta
import logging
import os
from wa import Command, settings
from wa.framework.configuration.core import Status
from wa.framework.output import RunOutput, discover_wa_outputs
from wa.utils.doc import underline
from wa.utils.log import COLOR_MAP, RESET_COLOR
from wa.utils.terminalsize import get_terminal_size
class ReportCommand(Command):
name = 'report'
description = '''
Monitor an ongoing run and provide information on its progress.
Specify the output directory of the run you would like the monitor;
alternatively report will attempt to discover wa output directories
within the current directory. The output includes run information such as
the UUID, start time, duration, project name and a short summary of the
run's progress (number of completed jobs, the number of jobs in each
different status).
If verbose output is specified, the output includes a list of all events
labelled as not specific to any job, followed by a list of the jobs in the
order executed, with their retries (if any), current status and, if the job
is finished, a list of events that occurred during that job's execution.
This is an example of a job status line:
wk1 (exoplayer) [1] - 2, PARTIAL
It contains two entries delimited by a comma: the job's descriptor followed
by its completion status (``PARTIAL``, in this case). The descriptor
consists of the following elements:
- the job ID (``wk1``)
- the job label (which defaults to the workload name) in parentheses
- job iteration number in square brakets (``1`` in this case)
- a hyphen followed by the retry attempt number.
(note: this will only be shown if the job has been retried as least
once. If the job has not yet run, or if it completed on the first
attempt, the hyphen and retry count -- which in that case would be
zero -- will not appear).
'''
def initialize(self, context):
self.parser.add_argument('-d', '--directory',
help='''
Specify the WA output path. report will
otherwise attempt to discover output
directories in the current directory.
''')
def execute(self, state, args):
if args.directory:
output_path = args.directory
run_output = RunOutput(output_path)
else:
possible_outputs = list(discover_wa_outputs(os.getcwd()))
num_paths = len(possible_outputs)
if num_paths > 1:
print('More than one possible output directory found,'
' please choose a path from the following:'
)
for i in range(num_paths):
print("{}: {}".format(i, possible_outputs[i].basepath))
while True:
try:
select = int(input())
except ValueError:
print("Please select a valid path number")
continue
if select not in range(num_paths):
print("Please select a valid path number")
continue
break
run_output = possible_outputs[select]
else:
run_output = possible_outputs[0]
rm = RunMonitor(run_output)
print(rm.generate_output(args.verbose))
class RunMonitor:
@property
def elapsed_time(self):
if self._elapsed is None:
if self.ro.info.duration is None:
self._elapsed = datetime.utcnow() - self.ro.info.start_time
else:
self._elapsed = self.ro.info.duration
return self._elapsed
@property
def job_outputs(self):
if self._job_outputs is None:
self._job_outputs = {
(j_o.id, j_o.label, j_o.iteration): j_o for j_o in self.ro.jobs
}
return self._job_outputs
@property
def projected_duration(self):
elapsed = self.elapsed_time.total_seconds()
proj = timedelta(seconds=elapsed * (len(self.jobs) / len(self.segmented['finished'])))
return proj - self.elapsed_time
def __init__(self, ro):
self.ro = ro
self._elapsed = None
self._p_duration = None
self._job_outputs = None
self._termwidth = None
self._fmt = _simple_formatter()
self.get_data()
def get_data(self):
self.jobs = [state for label_id, state in self.ro.state.jobs.items()]
if self.jobs:
rc = self.ro.run_config
self.segmented = segment_jobs_by_state(self.jobs,
rc.max_retries,
rc.retry_on_status
)
def generate_run_header(self):
info = self.ro.info
header = underline('Run Info')
header += "UUID: {}\n".format(info.uuid)
if info.run_name:
header += "Run name: {}\n".format(info.run_name)
if info.project:
header += "Project: {}\n".format(info.project)
if info.project_stage:
header += "Project stage: {}\n".format(info.project_stage)
if info.start_time:
duration = _seconds_as_smh(self.elapsed_time.total_seconds())
header += ("Start time: {}\n"
"Duration: {:02}:{:02}:{:02}\n"
).format(info.start_time,
duration[2], duration[1], duration[0],
)
if self.segmented['finished'] and not info.end_time:
p_duration = _seconds_as_smh(self.projected_duration.total_seconds())
header += "Projected time remaining: {:02}:{:02}:{:02}\n".format(
p_duration[2], p_duration[1], p_duration[0]
)
elif self.ro.info.end_time:
header += "End time: {}\n".format(info.end_time)
return header + '\n'
def generate_job_summary(self):
total = len(self.jobs)
num_fin = len(self.segmented['finished'])
summary = underline('Job Summary')
summary += 'Total: {}, Completed: {} ({}%)\n'.format(
total, num_fin, (num_fin / total) * 100
) if total > 0 else 'No jobs created\n'
ctr = Counter()
for run_state, jobs in ((k, v) for k, v in self.segmented.items() if v):
if run_state == 'finished':
ctr.update([job.status.name.lower() for job in jobs])
else:
ctr[run_state] += len(jobs)
return summary + ', '.join(
[str(count) + ' ' + self._fmt.highlight_keyword(status) for status, count in ctr.items()]
) + '\n\n'
def generate_job_detail(self):
detail = underline('Job Detail')
for job in self.jobs:
detail += ('{} ({}) [{}]{}, {}\n').format(
job.id,
job.label,
job.iteration,
' - ' + str(job.retries)if job.retries else '',
self._fmt.highlight_keyword(str(job.status))
)
job_output = self.job_outputs[(job.id, job.label, job.iteration)]
for event in job_output.events:
detail += self._fmt.fit_term_width(
'\t{}\n'.format(event.summary)
)
return detail
def generate_run_detail(self):
detail = underline('Run Events') if self.ro.events else ''
for event in self.ro.events:
detail += '{}\n'.format(event.summary)
return detail + '\n'
def generate_output(self, verbose):
if not self.jobs:
return 'No jobs found in output directory\n'
output = self.generate_run_header()
output += self.generate_job_summary()
if verbose:
output += self.generate_run_detail()
output += self.generate_job_detail()
return output
def _seconds_as_smh(seconds):
seconds = int(seconds)
hours = seconds // 3600
minutes = (seconds % 3600) // 60
seconds = seconds % 60
return seconds, minutes, hours
def segment_jobs_by_state(jobstates, max_retries, retry_status):
finished_states = [
Status.PARTIAL, Status.FAILED,
Status.ABORTED, Status.OK, Status.SKIPPED
]
segmented = {
'finished': [], 'other': [], 'running': [],
'pending': [], 'uninitialized': []
}
for jobstate in jobstates:
if (jobstate.status in retry_status) and jobstate.retries < max_retries:
segmented['running'].append(jobstate)
elif jobstate.status in finished_states:
segmented['finished'].append(jobstate)
elif jobstate.status == Status.RUNNING:
segmented['running'].append(jobstate)
elif jobstate.status == Status.PENDING:
segmented['pending'].append(jobstate)
elif jobstate.status == Status.NEW:
segmented['uninitialized'].append(jobstate)
else:
segmented['other'].append(jobstate)
return segmented
class _simple_formatter:
color_map = {
'running': COLOR_MAP[logging.INFO],
'partial': COLOR_MAP[logging.WARNING],
'failed': COLOR_MAP[logging.CRITICAL],
'aborted': COLOR_MAP[logging.ERROR]
}
def __init__(self):
self.termwidth = get_terminal_size()[0]
self.color = settings.logging['color']
def fit_term_width(self, text):
text = text.expandtabs()
if len(text) <= self.termwidth:
return text
else:
return text[0:self.termwidth - 4] + " ...\n"
def highlight_keyword(self, kw):
if not self.color or kw not in _simple_formatter.color_map:
return kw
color = _simple_formatter.color_map[kw.lower()]
return '{}{}{}'.format(color, kw, RESET_COLOR)

@ -25,6 +25,10 @@ from wa.framework.target.manager import TargetManager
from wa.utils.revent import ReventRecorder
if sys.version_info[0] == 3:
raw_input = input # pylint: disable=redefined-builtin
class RecordCommand(Command):
name = 'record'
@ -92,8 +96,8 @@ class RecordCommand(Command):
if args.workload and args.output:
self.logger.error("Output file cannot be specified with Workload")
sys.exit()
if not args.workload and (args.setup or args.extract_results
or args.teardown or args.all):
if not args.workload and (args.setup or args.extract_results or
args.teardown or args.all):
self.logger.error("Cannot specify a recording stage without a Workload")
sys.exit()
if args.workload and not any([args.all, args.teardown, args.extract_results, args.run, args.setup]):
@ -133,11 +137,11 @@ class RecordCommand(Command):
def record(self, revent_file, name, output_path):
msg = 'Press Enter when you are ready to record {}...'
self.logger.info(msg.format(name))
input('')
raw_input('')
self.revent_recorder.start_record(revent_file)
msg = 'Press Enter when you have finished recording {}...'
self.logger.info(msg.format(name))
input('')
raw_input('')
self.revent_recorder.stop_record()
if not os.path.isdir(output_path):

@ -7,22 +7,3 @@
was done following an extended discussion and tests that verified
that the savings in processing power were not enough to warrant
the creation of a dedicated server or file handler.
## 1.2
- Rename the `resourcegetters` table to `resource_getters` for consistency.
- Add Job and Run level classifiers.
- Add missing android specific properties to targets.
- Add new POD meta data to relevant tables.
- Correct job column name from `retires` to `retry`.
- Add missing run information.
## 1.3
- Add missing "system_id" field from TargetInfo.
- Enable support for uploading Artifact that represent directories.
## 1.4
- Add "modules" field to TargetInfo to list the modules loaded by the target
during the run.
## 1.5
- Change the type of the "hostid" in TargetInfo from Int to Bigint.
## 1.6
- Add cascading deletes to most tables to allow easy deletion of a run
and its associated data
- Add rule to delete associated large object on deletion of artifact

@ -73,8 +73,11 @@ class ShowCommand(Command):
if which('pandoc'):
p = Popen(['pandoc', '-f', 'rst', '-t', 'man'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, _ = p.communicate(rst_output.encode(sys.stdin.encoding))
output = output.decode(sys.stdout.encoding)
if sys.version_info[0] == 3:
output, _ = p.communicate(rst_output.encode(sys.stdin.encoding))
output = output.decode(sys.stdout.encoding)
else:
output, _ = p.communicate(rst_output)
# Make sure to double escape back slashes
output = output.replace('\\', '\\\\\\')

@ -59,7 +59,7 @@ params = dict(
'Environment :: Console',
'License :: Other/Proprietary License',
'Operating System :: Unix',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
],
)

@ -1,18 +1,18 @@
apply plugin: 'com.android.application'
android {
compileSdkVersion 28
buildToolsVersion '28.0.0'
compileSdkVersion 18
buildToolsVersion '25.0.0'
defaultConfig {
applicationId "${package_name}"
minSdkVersion 18
targetSdkVersion 28
targetSdkVersion 25
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
}
buildTypes {
applicationVariants.all { variant ->
variant.outputs.each { output ->
output.outputFileName = "${package_name}.apk"
output.outputFile = file("$$project.buildDir/apk/${package_name}.apk")
}
}
}

@ -16,7 +16,7 @@ fi
# Copy base class library from wlauto dist
libs_dir=app/libs
base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"`
base_class=`python -c "import os, wa; print os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar')"`
mkdir -p $$libs_dir
cp $$base_class $$libs_dir
@ -31,8 +31,8 @@ fi
# If successful move APK file to workload folder (overwrite previous)
rm -f ../$package_name
if [[ -f app/build/outputs/apk/debug/$package_name.apk ]]; then
cp app/build/outputs/apk/debug/$package_name.apk ../$package_name.apk
if [[ -f app/build/apk/$package_name.apk ]]; then
cp app/build/apk/$package_name.apk ../$package_name.apk
else
echo 'ERROR: UiAutomator apk could not be found!'
exit 9

@ -3,10 +3,9 @@
buildscript {
repositories {
jcenter()
google()
}
dependencies {
classpath 'com.android.tools.build:gradle:7.2.1'
classpath 'com.android.tools.build:gradle:2.3.1'
// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files
@ -16,7 +15,6 @@ buildscript {
allprojects {
repositories {
jcenter()
google()
}
}

@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-3.3-all.zip

@ -65,6 +65,7 @@ class SubCommand(object):
options to the command's parser). ``context`` is always ``None``.
"""
pass
def execute(self, state, args):
"""

@ -13,7 +13,6 @@
# limitations under the License.
import os
import logging
from copy import copy, deepcopy
from collections import OrderedDict, defaultdict
@ -37,8 +36,6 @@ Status = enum(['UNKNOWN', 'NEW', 'PENDING',
'STARTED', 'CONNECTED', 'INITIALIZED', 'RUNNING',
'OK', 'PARTIAL', 'FAILED', 'ABORTED', 'SKIPPED'])
logger = logging.getLogger('config')
##########################
### CONFIG POINT TYPES ###
@ -58,11 +55,10 @@ class RebootPolicy(object):
executing the first workload spec.
:each_spec: The device will be rebooted before running a new workload spec.
:each_iteration: The device will be rebooted before each new iteration.
:run_completion: The device will be rebooted after the run has been completed.
"""
valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_job', 'run_completion']
valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_job']
@staticmethod
def from_pod(pod):
@ -93,10 +89,6 @@ class RebootPolicy(object):
def reboot_on_each_spec(self):
return self.policy == 'each_spec'
@property
def reboot_on_run_completion(self):
return self.policy == 'run_completion'
def __str__(self):
return self.policy
@ -200,8 +192,7 @@ class ConfigurationPoint(object):
constraint=None,
merge=False,
aliases=None,
global_alias=None,
deprecated=False):
global_alias=None):
"""
Create a new Parameter object.
@ -252,12 +243,10 @@ class ConfigurationPoint(object):
:param global_alias: An alias for this parameter that can be specified at
the global level. A global_alias can map onto many
ConfigurationPoints.
:param deprecated: Specify that this parameter is deprecated and its
config should be ignored. If supplied WA will display
a warning to the user however will continue execution.
"""
self.name = identifier(name)
kind = KIND_MAP.get(kind, kind)
if kind in KIND_MAP:
kind = KIND_MAP[kind]
if kind is not None and not callable(kind):
raise ValueError('Kind must be callable.')
self.kind = kind
@ -277,7 +266,6 @@ class ConfigurationPoint(object):
self.merge = merge
self.aliases = aliases or []
self.global_alias = global_alias
self.deprecated = deprecated
if self.default is not None:
try:
@ -293,11 +281,6 @@ class ConfigurationPoint(object):
return False
def set_value(self, obj, value=None, check_mandatory=True):
if self.deprecated:
if value is not None:
msg = 'Depreciated parameter supplied for "{}" in "{}". The value will be ignored.'
logger.warning(msg.format(self.name, obj.name))
return
if value is None:
if self.default is not None:
value = self.kind(self.default)
@ -319,8 +302,6 @@ class ConfigurationPoint(object):
setattr(obj, self.name, value)
def validate(self, obj, check_mandatory=True):
if self.deprecated:
return
value = getattr(obj, self.name, None)
if value is not None:
self.validate_value(obj.name, value)
@ -469,7 +450,6 @@ class MetaConfiguration(Configuration):
description="""
The local mount point for the filer hosting WA assets.
""",
default=''
),
ConfigurationPoint(
'logging',
@ -486,6 +466,7 @@ class MetaConfiguration(Configuration):
contain bash color escape codes. Set this to ``False`` if
console output will be piped somewhere that does not know
how to handle those.
""",
),
ConfigurationPoint(
@ -542,10 +523,6 @@ class MetaConfiguration(Configuration):
def target_info_cache_file(self):
return os.path.join(self.cache_directory, 'targets.json')
@property
def apk_info_cache_file(self):
return os.path.join(self.cache_directory, 'apk_info.json')
def __init__(self, environ=None):
super(MetaConfiguration, self).__init__()
if environ is None:
@ -667,18 +644,15 @@ class RunConfiguration(Configuration):
``"each_spec"``
The device will be rebooted before running a new workload spec.
.. note:: This acts the same as ``each_job`` when execution order
.. note:: this acts the same as each_job when execution order
is set to by_iteration
``"run_completion"``
The device will be rebooted after the run has been completed.
'''),
ConfigurationPoint(
'device',
kind=str,
default='generic_android',
description='''
This setting defines what specific ``Device`` subclass will be used to
This setting defines what specific Device subclass will be used to
interact the connected device. Obviously, this must match your
setup.
''',
@ -732,17 +706,6 @@ class RunConfiguration(Configuration):
failed, but continue attempting to run others.
'''
),
ConfigurationPoint(
'bail_on_job_failure',
kind=bool,
default=False,
description='''
When a job fails during its run phase, WA will attempt to retry the
job, then continue with remaining jobs after. Setting this to
``True`` means WA will skip remaining jobs and end the run if a job
has retried the maximum number of times, and still fails.
'''
),
ConfigurationPoint(
'allow_phone_home',
kind=bool, default=True,
@ -830,12 +793,12 @@ class JobSpec(Configuration):
description='''
The name of the workload to run.
'''),
ConfigurationPoint('workload_parameters', kind=obj_dict, merge=True,
ConfigurationPoint('workload_parameters', kind=obj_dict,
aliases=["params", "workload_params", "parameters"],
description='''
Parameter to be passed to the workload
'''),
ConfigurationPoint('runtime_parameters', kind=obj_dict, merge=True,
ConfigurationPoint('runtime_parameters', kind=obj_dict,
aliases=["runtime_params"],
description='''
Runtime parameters to be set prior to running

@ -24,7 +24,7 @@ from wa.framework.configuration.core import (MetaConfiguration, RunConfiguration
JobGenerator, settings)
from wa.framework.configuration.parsers import ConfigParser
from wa.framework.configuration.plugin_cache import PluginCache
from wa.framework.exception import NotFoundError, ConfigError
from wa.framework.exception import NotFoundError
from wa.framework.job import Job
from wa.utils import log
from wa.utils.serializer import Podable
@ -148,9 +148,6 @@ class ConfigManager(object):
def generate_jobs(self, context):
job_specs = self.jobs_config.generate_job_specs(context.tm)
if not job_specs:
msg = 'No jobs available for running.'
raise ConfigError(msg)
exec_order = self.run_config.execution_order
log.indent()
for spec, i in permute_iterations(job_specs, exec_order):

@ -238,47 +238,20 @@ def _load_file(filepath, error_name):
return raw, includes
def _config_values_from_includes(filepath, include_path, error_name):
source_dir = os.path.dirname(filepath)
included_files = []
if isinstance(include_path, str):
include_path = os.path.expanduser(os.path.join(source_dir, include_path))
replace_value, includes = _load_file(include_path, error_name)
included_files.append(include_path)
included_files.extend(includes)
elif isinstance(include_path, list):
replace_value = {}
for path in include_path:
include_path = os.path.expanduser(os.path.join(source_dir, path))
sub_replace_value, includes = _load_file(include_path, error_name)
for key, val in sub_replace_value.items():
replace_value[key] = merge_config_values(val, replace_value.get(key, None))
included_files.append(include_path)
included_files.extend(includes)
else:
message = "{} does not contain a valid {} structure; value for 'include#' must be a string or a list"
raise ConfigError(message.format(filepath, error_name))
return replace_value, included_files
def _process_includes(raw, filepath, error_name):
if not raw:
return []
source_dir = os.path.dirname(filepath)
included_files = []
replace_value = None
if hasattr(raw, 'items'):
for key, value in raw.items():
if key == 'include#':
replace_value, includes = _config_values_from_includes(filepath, value, error_name)
include_path = os.path.expanduser(os.path.join(source_dir, value))
included_files.append(include_path)
replace_value, includes = _load_file(include_path, error_name)
included_files.extend(includes)
elif hasattr(value, 'items') or isiterable(value):
includes = _process_includes(value, filepath, error_name)
@ -324,7 +297,7 @@ def merge_augmentations(raw):
raise ConfigError(msg.format(value, n, exc))
# Make sure none of the specified aliases conflict with each other
to_check = list(entries)
to_check = [e for e in entries]
while len(to_check) > 1:
check_entry = to_check.pop()
for e in to_check:

@ -84,9 +84,9 @@ class PluginCache(object):
'defined in a config file, move the entry content into the top level'
raise ConfigError(msg.format((plugin_name)))
if (not self.loader.has_plugin(plugin_name)
and plugin_name not in self.targets
and plugin_name not in GENERIC_CONFIGS):
if (not self.loader.has_plugin(plugin_name) and
plugin_name not in self.targets and
plugin_name not in GENERIC_CONFIGS):
msg = 'configuration provided for unknown plugin "{}"'
raise ConfigError(msg.format(plugin_name))
@ -95,8 +95,8 @@ class PluginCache(object):
raise ConfigError(msg.format(plugin_name, repr(values), type(values)))
for name, value in values.items():
if (plugin_name not in GENERIC_CONFIGS
and name not in self.get_plugin_parameters(plugin_name)):
if (plugin_name not in GENERIC_CONFIGS and
name not in self.get_plugin_parameters(plugin_name)):
msg = "'{}' is not a valid parameter for '{}'"
raise ConfigError(msg.format(name, plugin_name))

@ -33,7 +33,6 @@ class JobSpecSource(object):
def id(self):
return self.config['id']
@property
def name(self):
raise NotImplementedError()

@ -16,25 +16,19 @@
import sys
import argparse
import locale
import logging
import os
import warnings
import devlib
try:
from devlib.utils.version import version as installed_devlib_version
except ImportError:
installed_devlib_version = None
from wa.framework import pluginloader
from wa.framework.command import init_argument_parser
from wa.framework.configuration import settings
from wa.framework.configuration.execution import ConfigManager
from wa.framework.host import init_user_directory, init_config
from wa.framework.exception import ConfigError, HostError
from wa.framework.version import (get_wa_version_with_commit, format_version,
required_devlib_version)
from wa.framework.exception import ConfigError
from wa.framework.version import get_wa_version_with_commit
from wa.utils import log
from wa.utils.doc import format_body
@ -70,27 +64,6 @@ def split_joined_options(argv):
return output
# Instead of presenting an obscure error due to a version mismatch explicitly warn the user.
def check_devlib_version():
if not installed_devlib_version or installed_devlib_version[:-1] <= required_devlib_version[:-1]:
# Check the 'dev' field separately to account for comparing with release versions.
if installed_devlib_version.dev and installed_devlib_version.dev < required_devlib_version.dev:
msg = 'WA requires Devlib version >={}. Please update the currently installed version {}'
raise HostError(msg.format(format_version(required_devlib_version), devlib.__version__))
# If the default encoding is not UTF-8 warn the user as this may cause compatibility issues
# when parsing files.
def check_system_encoding():
system_encoding = locale.getpreferredencoding()
msg = 'System Encoding: {}'.format(system_encoding)
if 'UTF-8' not in system_encoding:
logger.warning(msg)
logger.warning('To prevent encoding issues please use a locale setting which supports UTF-8')
else:
logger.debug(msg)
def main():
if not os.path.exists(settings.user_directory):
init_user_directory()
@ -129,8 +102,6 @@ def main():
logger.debug('Version: {}'.format(get_wa_version_with_commit()))
logger.debug('devlib version: {}'.format(devlib.__full_version__))
logger.debug('Command Line: {}'.format(' '.join(sys.argv)))
check_devlib_version()
check_system_encoding()
# each command will add its own subparser
subparsers = parser.add_subparsers(dest='command')

@ -30,49 +30,60 @@ class WAError(Exception):
class NotFoundError(WAError):
"""Raised when the specified item is not found."""
pass
class ValidationError(WAError):
"""Raised on failure to validate an extension."""
pass
class ExecutionError(WAError):
"""Error encountered by the execution framework."""
pass
class WorkloadError(WAError):
"""General Workload error."""
pass
class JobError(WAError):
"""Job execution error."""
pass
class InstrumentError(WAError):
"""General Instrument error."""
pass
class OutputProcessorError(WAError):
"""General OutputProcessor error."""
pass
class ResourceError(WAError):
"""General Resolver error."""
pass
class CommandError(WAError):
"""Raised by commands when they have encountered an error condition
during execution."""
pass
class ToolError(WAError):
"""Raised by tools when they have encountered an error condition
during execution."""
pass
class ConfigError(WAError):
"""Raised when configuration provided is invalid. This error suggests that
the user should modify their config and try again."""
pass
class SerializerSyntaxError(Exception):

@ -25,7 +25,7 @@ from datetime import datetime
import wa.framework.signal as signal
from wa.framework import instrument as instrumentation
from wa.framework.configuration.core import Status
from wa.framework.exception import TargetError, HostError, WorkloadError, ExecutionError
from wa.framework.exception import TargetError, HostError, WorkloadError
from wa.framework.exception import TargetNotRespondingError, TimeoutError # pylint: disable=redefined-builtin
from wa.framework.job import Job
from wa.framework.output import init_job_output
@ -128,8 +128,8 @@ class ExecutionContext(object):
self.run_state.status = status
self.run_output.status = status
self.run_output.info.end_time = datetime.utcnow()
self.run_output.info.duration = (self.run_output.info.end_time
- self.run_output.info.start_time)
self.run_output.info.duration = (self.run_output.info.end_time -
self.run_output.info.start_time)
self.write_output()
def finalize(self):
@ -141,24 +141,21 @@ class ExecutionContext(object):
self.current_job = self.job_queue.pop(0)
job_output = init_job_output(self.run_output, self.current_job)
self.current_job.set_output(job_output)
self.update_job_state(self.current_job)
return self.current_job
def end_job(self):
if not self.current_job:
raise RuntimeError('No jobs in progress')
self.completed_jobs.append(self.current_job)
self.update_job_state(self.current_job)
self.output.write_result()
self.current_job = None
def set_status(self, status, force=False, write=True):
def set_status(self, status, force=False):
if not self.current_job:
raise RuntimeError('No jobs in progress')
self.set_job_status(self.current_job, status, force, write)
def set_job_status(self, job, status, force=False, write=True):
job.set_status(status, force)
if write:
self.run_output.write_state()
self.current_job.set_status(status, force)
def extract_results(self):
self.tm.extract_results(self)
@ -166,8 +163,13 @@ class ExecutionContext(object):
def move_failed(self, job):
self.run_output.move_failed(job.output)
def update_job_state(self, job):
self.run_state.update_job(job)
self.run_output.write_state()
def skip_job(self, job):
self.set_job_status(job, Status.SKIPPED, force=True)
job.status = Status.SKIPPED
self.run_state.update_job(job)
self.completed_jobs.append(job)
def skip_remaining_jobs(self):
@ -247,11 +249,6 @@ class ExecutionContext(object):
def add_event(self, message):
self.output.add_event(message)
def add_classifier(self, name, value, overwrite=False):
self.output.add_classifier(name, value, overwrite)
if self.current_job:
self.current_job.add_classifier(name, value, overwrite)
def add_metadata(self, key, *args, **kwargs):
self.output.add_metadata(key, *args, **kwargs)
@ -291,7 +288,7 @@ class ExecutionContext(object):
try:
job.initialize(self)
except WorkloadError as e:
self.set_job_status(job, Status.FAILED, write=False)
job.set_status(Status.FAILED)
log.log_error(e, self.logger)
failed_ids.append(job.id)
@ -301,7 +298,6 @@ class ExecutionContext(object):
new_queue.append(job)
self.job_queue = new_queue
self.write_state()
def _load_resource_getters(self):
self.logger.debug('Loading resource discoverers')
@ -337,7 +333,7 @@ class Executor(object):
returning.
The initial context set up involves combining configuration from various
sources, loading of required workloads, loading and installation of
sources, loading of requided workloads, loading and installation of
instruments and output processors, etc. Static validation of the combined
configuration is also performed.
@ -353,7 +349,7 @@ class Executor(object):
def execute(self, config_manager, output):
"""
Execute the run specified by an agenda. Optionally, selectors may be
used to only execute a subset of the specified agenda.
used to only selecute a subset of the specified agenda.
Params::
@ -403,7 +399,7 @@ class Executor(object):
attempts = context.cm.run_config.max_retries
while attempts:
try:
self.target_manager.reboot(context)
self.target_manager.reboot()
except TargetError as e:
if attempts:
attempts -= 1
@ -449,7 +445,7 @@ class Executor(object):
for status in reversed(Status.levels):
if status in counter:
parts.append('{} {}'.format(counter[status], status))
self.logger.info('{}{}'.format(status_summary, ', '.join(parts)))
self.logger.info(status_summary + ', '.join(parts))
self.logger.info('Results can be found in {}'.format(output.basepath))
@ -537,9 +533,6 @@ class Runner(object):
self.pm.process_run_output(self.context)
self.pm.export_run_output(self.context)
self.pm.finalize(self.context)
if self.context.reboot_policy.reboot_on_run_completion:
self.logger.info('Rebooting target on run completion.')
self.context.tm.reboot(self.context)
signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED)
signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED)
@ -559,15 +552,15 @@ class Runner(object):
with signal.wrap('JOB', self, context):
context.tm.start()
self.do_run_job(job, context)
context.set_job_status(job, Status.OK)
job.set_status(Status.OK)
except (Exception, KeyboardInterrupt) as e: # pylint: disable=broad-except
log.log_error(e, self.logger)
if isinstance(e, KeyboardInterrupt):
context.run_interrupted = True
context.set_job_status(job, Status.ABORTED)
job.set_status(Status.ABORTED)
raise e
else:
context.set_job_status(job, Status.FAILED)
job.set_status(Status.FAILED)
if isinstance(e, TargetNotRespondingError):
raise e
elif isinstance(e, TargetError):
@ -590,7 +583,7 @@ class Runner(object):
self.context.skip_job(job)
return
context.set_job_status(job, Status.RUNNING)
job.set_status(Status.RUNNING)
self.send(signal.JOB_STARTED)
job.configure_augmentations(context, self.pm)
@ -601,7 +594,7 @@ class Runner(object):
try:
job.setup(context)
except Exception as e:
context.set_job_status(job, Status.FAILED)
job.set_status(Status.FAILED)
log.log_error(e, self.logger)
if isinstance(e, (TargetError, TimeoutError)):
context.tm.verify_target_responsive(context)
@ -614,10 +607,10 @@ class Runner(object):
job.run(context)
except KeyboardInterrupt:
context.run_interrupted = True
context.set_job_status(job, Status.ABORTED)
job.set_status(Status.ABORTED)
raise
except Exception as e:
context.set_job_status(job, Status.FAILED)
job.set_status(Status.FAILED)
log.log_error(e, self.logger)
if isinstance(e, (TargetError, TimeoutError)):
context.tm.verify_target_responsive(context)
@ -630,7 +623,7 @@ class Runner(object):
self.pm.process_job_output(context)
self.pm.export_job_output(context)
except Exception as e:
context.set_job_status(job, Status.PARTIAL)
job.set_status(Status.PARTIAL)
if isinstance(e, (TargetError, TimeoutError)):
context.tm.verify_target_responsive(context)
self.context.record_ui_state('output-error')
@ -638,7 +631,7 @@ class Runner(object):
except KeyboardInterrupt:
context.run_interrupted = True
context.set_status(Status.ABORTED)
job.set_status(Status.ABORTED)
raise
finally:
# If setup was successfully completed, teardown must
@ -660,9 +653,6 @@ class Runner(object):
self.logger.error(msg.format(job.id, job.iteration, job.status))
self.context.failed_jobs += 1
self.send(signal.JOB_FAILED)
if rc.bail_on_job_failure:
raise ExecutionError('Job {} failed, bailing.'.format(job.id))
else: # status not in retry_on_status
self.logger.info('Job completed with status {}'.format(job.status))
if job.status != 'ABORTED':
@ -674,9 +664,8 @@ class Runner(object):
def retry_job(self, job):
retry_job = Job(job.spec, job.iteration, self.context)
retry_job.workload = job.workload
retry_job.state = job.state
retry_job.retries = job.retries + 1
self.context.set_job_status(retry_job, Status.PENDING, force=True)
retry_job.set_status(Status.PENDING)
self.context.job_queue.insert(0, retry_job)
self.send(signal.JOB_RESTARTED)

@ -31,7 +31,7 @@ import requests
from wa import Parameter, settings, __file__ as _base_filepath
from wa.framework.resource import ResourceGetter, SourcePriority, NO_ONE
from wa.framework.exception import ResourceError
from wa.utils.misc import (ensure_directory_exists as _d, atomic_write_path,
from wa.utils.misc import (ensure_directory_exists as _d,
ensure_file_directory_exists as _f, sha256, urljoin)
from wa.utils.types import boolean, caseless_string
@ -78,20 +78,15 @@ def get_path_matches(resource, files):
return matches
# pylint: disable=too-many-return-statements
def get_from_location(basepath, resource):
if resource.kind == 'file':
path = os.path.join(basepath, resource.path)
if os.path.exists(path):
return path
elif resource.kind == 'executable':
bin_dir = os.path.join(basepath, 'bin', resource.abi)
if not os.path.exists(bin_dir):
return None
for entry in os.listdir(bin_dir):
path = os.path.join(bin_dir, entry)
if resource.match(path):
return path
path = os.path.join(basepath, 'bin', resource.abi, resource.filename)
if os.path.exists(path):
return path
elif resource.kind == 'revent':
path = os.path.join(basepath, 'revent_files')
if os.path.exists(path):
@ -239,19 +234,21 @@ class Http(ResourceGetter):
index_url = urljoin(self.url, 'index.json')
response = self.geturl(index_url)
if response.status_code != http.client.OK:
message = 'Could not fetch "{}"; received "{} {}"'
message = 'Could not fetch "{}"; recieved "{} {}"'
self.logger.error(message.format(index_url,
response.status_code,
response.reason))
return {}
content = response.content.decode('utf-8')
if sys.version_info[0] == 3:
content = response.content.decode('utf-8')
else:
content = response.content
return json.loads(content)
def download_asset(self, asset, owner_name):
url = urljoin(self.url, owner_name, asset['path'])
local_path = _f(os.path.join(settings.dependencies_directory, '__remote',
owner_name, asset['path'].replace('/', os.sep)))
if os.path.exists(local_path) and not self.always_fetch:
local_sha = sha256(local_path)
if local_sha == asset['sha256']:
@ -260,15 +257,14 @@ class Http(ResourceGetter):
self.logger.debug('Downloading {}'.format(url))
response = self.geturl(url, stream=True)
if response.status_code != http.client.OK:
message = 'Could not download asset "{}"; received "{} {}"'
message = 'Could not download asset "{}"; recieved "{} {}"'
self.logger.warning(message.format(url,
response.status_code,
response.reason))
return
with atomic_write_path(local_path) as at_path:
with open(at_path, 'wb') as wfh:
for chunk in response.iter_content(chunk_size=self.chunk_size):
wfh.write(chunk)
with open(local_path, 'wb') as wfh:
for chunk in response.iter_content(chunk_size=self.chunk_size):
wfh.write(chunk)
return local_path
def geturl(self, url, stream=False):
@ -326,8 +322,7 @@ class Filer(ResourceGetter):
"""
parameters = [
Parameter('remote_path', global_alias='remote_assets_path',
default=settings.assets_repository,
Parameter('remote_path', global_alias='remote_assets_path', default='',
description="""
Path, on the local system, where the assets are located.
"""),

@ -50,7 +50,6 @@ def init_user_directory(overwrite_existing=False): # pylint: disable=R0914
# If running with sudo on POSIX, change the ownership to the real user.
real_user = os.getenv('SUDO_USER')
if real_user:
# pylint: disable=import-outside-toplevel
import pwd # done here as module won't import on win32
user_entry = pwd.getpwnam(real_user)
uid, gid = user_entry.pw_uid, user_entry.pw_gid

@ -98,12 +98,13 @@ and the code to clear these file goes in teardown method. ::
"""
import sys
import logging
import inspect
from collections import OrderedDict
from wa.framework import signal
from wa.framework.plugin import TargetedPlugin
from wa.framework.plugin import Plugin
from wa.framework.exception import (TargetNotRespondingError, TimeoutError, # pylint: disable=redefined-builtin
WorkloadError, TargetError)
from wa.utils.log import log_error
@ -324,7 +325,10 @@ def install(instrument, context):
if not callable(attr):
msg = 'Attribute {} not callable in {}.'
raise ValueError(msg.format(attr_name, instrument))
argspec = inspect.getfullargspec(attr)
if sys.version_info[0] == 3:
argspec = inspect.getfullargspec(attr)
else:
argspec = inspect.getargspec(attr) # pylint: disable=deprecated-method
arg_num = len(argspec.args)
# Instrument callbacks will be passed exactly two arguments: self
# (the instrument instance to which the callback is bound) and
@ -417,13 +421,14 @@ def get_disabled():
return [i for i in installed if not i.is_enabled]
class Instrument(TargetedPlugin):
class Instrument(Plugin):
"""
Base class for instrument implementations.
"""
kind = "instrument"
def __init__(self, *args, **kwargs):
super(Instrument, self).__init__(*args, **kwargs)
def __init__(self, target, **kwargs):
super(Instrument, self).__init__(**kwargs)
self.target = target
self.is_enabled = True
self.is_broken = False

@ -23,7 +23,6 @@ from datetime import datetime
from wa.framework import pluginloader, signal, instrument
from wa.framework.configuration.core import Status
from wa.utils.log import indentcontext
from wa.framework.run import JobState
class Job(object):
@ -38,29 +37,24 @@ class Job(object):
def label(self):
return self.spec.label
@property
def classifiers(self):
return self.spec.classifiers
@property
def status(self):
return self.state.status
return self._status
@property
def has_been_initialized(self):
return self._has_been_initialized
@property
def retries(self):
return self.state.retries
@status.setter
def status(self, value):
self.state.status = value
self.state.timestamp = datetime.utcnow()
self._status = value
if self.output:
self.output.status = value
@retries.setter
def retries(self, value):
self.state.retries = value
def __init__(self, spec, iteration, context):
self.logger = logging.getLogger('job')
self.spec = spec
@ -69,13 +63,13 @@ class Job(object):
self.workload = None
self.output = None
self.run_time = None
self.classifiers = copy(self.spec.classifiers)
self.retries = 0
self._has_been_initialized = False
self.state = JobState(self.id, self.label, self.iteration, Status.NEW)
self._status = Status.NEW
def load(self, target, loader=pluginloader):
self.logger.info('Loading job {}'.format(self))
if self.id not in self._workload_cache:
if self.iteration == 1:
self.workload = loader.get_workload(self.spec.workload_name,
target,
**self.spec.workload_parameters)
@ -97,6 +91,7 @@ class Job(object):
self.workload.initialize(context)
self.set_status(Status.PENDING)
self._has_been_initialized = True
context.update_job_state(self)
def configure_augmentations(self, context, pm):
self.logger.info('Configuring augmentations')
@ -186,11 +181,6 @@ class Job(object):
if force or self.status < status:
self.status = status
def add_classifier(self, name, value, overwrite=False):
if name in self.classifiers and not overwrite:
raise ValueError('Cannot overwrite "{}" classifier.'.format(name))
self.classifiers[name] = value
def __str__(self):
return '{} ({}) [{}]'.format(self.id, self.label, self.iteration)

@ -23,8 +23,6 @@ except ImportError:
import logging
import os
import shutil
import tarfile
import tempfile
from collections import OrderedDict, defaultdict
from copy import copy, deepcopy
from datetime import datetime
@ -39,8 +37,7 @@ from wa.framework.run import RunState, RunInfo
from wa.framework.target.info import TargetInfo
from wa.framework.version import get_wa_version_with_commit
from wa.utils.doc import format_simple_table
from wa.utils.misc import (touch, ensure_directory_exists, isiterable,
format_ordered_dict, safe_extract)
from wa.utils.misc import touch, ensure_directory_exists, isiterable
from wa.utils.postgres import get_schema_versions
from wa.utils.serializer import write_pod, read_pod, Podable, json
from wa.utils.types import enum, numeric
@ -148,10 +145,9 @@ class Output(object):
if not os.path.exists(path):
msg = 'Attempting to add non-existing artifact: {}'
raise HostError(msg.format(path))
is_dir = os.path.isdir(path)
path = os.path.relpath(path, self.basepath)
self.result.add_artifact(name, path, kind, description, classifiers, is_dir)
self.result.add_artifact(name, path, kind, description, classifiers)
def add_event(self, message):
self.result.add_event(message)
@ -166,9 +162,6 @@ class Output(object):
artifact = self.get_artifact(name)
return self.get_path(artifact.path)
def add_classifier(self, name, value, overwrite=False):
self.result.add_classifier(name, value, overwrite)
def add_metadata(self, key, *args, **kwargs):
self.result.add_metadata(key, *args, **kwargs)
@ -269,8 +262,8 @@ class RunOutput(Output, RunOutputCommon):
self._combined_config = None
self.jobs = []
self.job_specs = []
if (not os.path.isfile(self.statefile)
or not os.path.isfile(self.infofile)):
if (not os.path.isfile(self.statefile) or
not os.path.isfile(self.infofile)):
msg = '"{}" does not exist or is not a valid WA output directory.'
raise ValueError(msg.format(self.basepath))
self.reload()
@ -353,13 +346,6 @@ class JobOutput(Output):
self.spec = None
self.reload()
@property
def augmentations(self):
job_augs = set([])
for aug in self.spec.augmentations:
job_augs.add(aug)
return list(job_augs)
class Result(Podable):
@ -392,10 +378,9 @@ class Result(Podable):
logger.debug('Adding metric: {}'.format(metric))
self.metrics.append(metric)
def add_artifact(self, name, path, kind, description=None, classifiers=None,
is_dir=False):
def add_artifact(self, name, path, kind, description=None, classifiers=None):
artifact = Artifact(name, path, kind, description=description,
classifiers=classifiers, is_dir=is_dir)
classifiers=classifiers)
logger.debug('Adding artifact: {}'.format(artifact))
self.artifacts.append(artifact)
@ -414,21 +399,6 @@ class Result(Podable):
return artifact
raise HostError('Artifact "{}" not found'.format(name))
def add_classifier(self, name, value, overwrite=False):
if name in self.classifiers and not overwrite:
raise ValueError('Cannot overwrite "{}" classifier.'.format(name))
self.classifiers[name] = value
for metric in self.metrics:
if name in metric.classifiers and not overwrite:
raise ValueError('Cannot overwrite "{}" classifier; clashes with {}.'.format(name, metric))
metric.classifiers[name] = value
for artifact in self.artifacts:
if name in artifact.classifiers and not overwrite:
raise ValueError('Cannot overwrite "{}" classifier; clashes with {}.'.format(name, artifact))
artifact.classifiers[name] = value
def add_metadata(self, key, *args, **kwargs):
force = kwargs.pop('force', False)
if kwargs:
@ -546,7 +516,7 @@ class Artifact(Podable):
"""
_pod_serialization_version = 2
_pod_serialization_version = 1
@staticmethod
def from_pod(pod):
@ -555,11 +525,9 @@ class Artifact(Podable):
pod['kind'] = ArtifactType(pod['kind'])
instance = Artifact(**pod)
instance._pod_version = pod_version # pylint: disable =protected-access
instance.is_dir = pod.pop('is_dir')
return instance
def __init__(self, name, path, kind, description=None, classifiers=None,
is_dir=False):
def __init__(self, name, path, kind, description=None, classifiers=None):
""""
:param name: Name that uniquely identifies this artifact.
:param path: The *relative* path of the artifact. Depending on the
@ -575,6 +543,7 @@ class Artifact(Podable):
:param classifiers: A set of key-value pairs to further classify this
metric beyond current iteration (e.g. this can be
used to identify sub-tests).
"""
super(Artifact, self).__init__()
self.name = name
@ -586,13 +555,11 @@ class Artifact(Podable):
raise ValueError(msg.format(kind, ARTIFACT_TYPES))
self.description = description
self.classifiers = classifiers or {}
self.is_dir = is_dir
def to_pod(self):
pod = super(Artifact, self).to_pod()
pod.update(self.__dict__)
pod['kind'] = str(self.kind)
pod['is_dir'] = self.is_dir
return pod
@staticmethod
@ -600,17 +567,11 @@ class Artifact(Podable):
pod['_pod_version'] = pod.get('_pod_version', 1)
return pod
@staticmethod
def _pod_upgrade_v2(pod):
pod['is_dir'] = pod.get('is_dir', False)
return pod
def __str__(self):
return self.path
def __repr__(self):
ft = 'dir' if self.is_dir else 'file'
return '{} ({}) ({}): {}'.format(self.name, ft, self.kind, self.path)
return '{} ({}): {}'.format(self.name, self.kind, self.path)
class Metric(Podable):
@ -641,12 +602,6 @@ class Metric(Podable):
instance._pod_version = pod_version # pylint: disable =protected-access
return instance
@property
def label(self):
parts = ['{}={}'.format(n, v) for n, v in self.classifiers.items()]
parts.insert(0, self.name)
return '/'.join(parts)
def __init__(self, name, value, units=None, lower_is_better=False,
classifiers=None):
super(Metric, self).__init__()
@ -680,7 +635,7 @@ class Metric(Podable):
def __repr__(self):
text = self.__str__()
if self.classifiers:
return '<{} {}>'.format(text, format_ordered_dict(self.classifiers))
return '<{} {}>'.format(text, self.classifiers)
else:
return '<{}>'.format(text)
@ -777,13 +732,9 @@ def init_job_output(run_output, job):
def discover_wa_outputs(path):
# Use topdown=True to allow pruning dirs
for root, dirs, _ in os.walk(path, topdown=True):
for root, dirs, _ in os.walk(path):
if '__meta' in dirs:
yield RunOutput(root)
# Avoid recursing into the artifact as it can be very lengthy if a
# large number of file is present (sysfs dump)
dirs.clear()
def _save_raw_config(meta_dir, state):
@ -847,19 +798,6 @@ class DatabaseOutput(Output):
def get_artifact_path(self, name):
artifact = self.get_artifact(name)
if artifact.is_dir:
return self._read_dir_artifact(artifact)
else:
return self._read_file_artifact(artifact)
def _read_dir_artifact(self, artifact):
artifact_path = tempfile.mkdtemp(prefix='wa_')
with tarfile.open(fileobj=self.conn.lobject(int(artifact.path), mode='b'), mode='r|gz') as tar_file:
safe_extract(tar_file, artifact_path)
self.conn.commit()
return artifact_path
def _read_file_artifact(self, artifact):
artifact = StringIO(self.conn.lobject(int(artifact.path)).read())
self.conn.commit()
return artifact
@ -948,15 +886,13 @@ class DatabaseOutput(Output):
def _get_artifacts(self):
columns = ['artifacts.name', 'artifacts.description', 'artifacts.kind',
('largeobjects.lo_oid', 'path'), 'artifacts.oid', 'artifacts.is_dir',
('largeobjects.lo_oid', 'path'), 'artifacts.oid',
'artifacts._pod_version', 'artifacts._pod_serialization_version']
tables = ['largeobjects', 'artifacts']
joins = [('classifiers', 'classifiers.artifact_oid = artifacts.oid')]
conditions = ['artifacts.{}_oid = \'{}\''.format(self.kind, self.oid),
'artifacts.large_object_uuid = largeobjects.oid']
# If retrieving run level artifacts we want those that don't also belong to a job
if self.kind == 'run':
conditions.append('artifacts.job_oid IS NULL')
'artifacts.large_object_uuid = largeobjects.oid',
'artifacts.job_oid IS NULL']
pod = self._read_db(columns, tables, conditions, joins)
for artifact in pod:
artifact['path'] = str(artifact['path'])
@ -971,9 +907,8 @@ class DatabaseOutput(Output):
def kernel_config_from_db(raw):
kernel_config = {}
if raw:
for k, v in zip(raw[0], raw[1]):
kernel_config[k] = v
for k, v in zip(raw[0], raw[1]):
kernel_config[k] = v
return kernel_config
@ -1007,10 +942,9 @@ class RunDatabaseOutput(DatabaseOutput, RunOutputCommon):
@property
def _db_targetfile(self):
columns = ['os', 'is_rooted', 'target', 'modules', 'abi', 'cpus', 'os_version',
columns = ['os', 'is_rooted', 'target', 'abi', 'cpus', 'os_version',
'hostid', 'hostname', 'kernel_version', 'kernel_release',
'kernel_sha1', 'kernel_config', 'sched_features', 'page_size_kb',
'system_id', 'screen_resolution', 'prop', 'android_id',
'kernel_sha1', 'kernel_config', 'sched_features',
'_pod_version', '_pod_serialization_version']
tables = ['targets']
conditions = ['targets.run_oid = \'{}\''.format(self.oid)]
@ -1063,7 +997,6 @@ class RunDatabaseOutput(DatabaseOutput, RunOutputCommon):
jobs = self._read_db(columns, tables, conditions)
for job in jobs:
job['augmentations'] = self._get_job_augmentations(job['oid'])
job['workload_parameters'] = workload_params.pop(job['oid'], {})
job['runtime_parameters'] = runtime_params.pop(job['oid'], {})
job.pop('oid')
@ -1227,15 +1160,6 @@ class RunDatabaseOutput(DatabaseOutput, RunOutputCommon):
logger.debug('Failed to deserialize job_oid:{}-"{}":"{}"'.format(job_oid, k, v))
return parm_dict
def _get_job_augmentations(self, job_oid):
columns = ['jobs_augs.augmentation_oid', 'augmentations.name',
'augmentations.oid', 'jobs_augs.job_oid']
tables = ['jobs_augs', 'augmentations']
conditions = ['jobs_augs.job_oid = \'{}\''.format(job_oid),
'jobs_augs.augmentation_oid = augmentations.oid']
augmentations = self._read_db(columns, tables, conditions)
return [aug['name'] for aug in augmentations]
def _list_runs(self):
columns = ['runs.run_uuid', 'runs.run_name', 'runs.project',
'runs.project_stage', 'runs.status', 'runs.start_time', 'runs.end_time']
@ -1287,11 +1211,3 @@ class JobDatabaseOutput(DatabaseOutput):
def __str__(self):
return '{}-{}-{}'.format(self.id, self.label, self.iteration)
@property
def augmentations(self):
job_augs = set([])
if self.spec:
for aug in self.spec.augmentations:
job_augs.add(aug)
return list(job_augs)

@ -18,6 +18,8 @@
import os
import sys
import inspect
import imp
import string
import logging
from collections import OrderedDict, defaultdict
from itertools import chain
@ -30,10 +32,16 @@ from wa.framework.exception import (NotFoundError, PluginLoaderError, TargetErro
ValidationError, ConfigError, HostError)
from wa.utils import log
from wa.utils.misc import (ensure_directory_exists as _d, walk_modules, load_class,
merge_dicts_simple, get_article, import_path)
merge_dicts_simple, get_article)
from wa.utils.types import identifier
if sys.version_info[0] == 3:
MODNAME_TRANS = str.maketrans(':/\\.', '____')
else:
MODNAME_TRANS = string.maketrans(':/\\.', '____')
class AttributeCollection(object):
"""
Accumulator for plugin attribute objects (such as Parameters or Artifacts).
@ -149,7 +157,6 @@ class Alias(object):
raise ConfigError(msg.format(param, self.name, ext.name))
# pylint: disable=bad-mcs-classmethod-argument
class PluginMeta(type):
"""
This basically adds some magic to plugins to make implementing new plugins,
@ -239,7 +246,7 @@ class Plugin(with_metaclass(PluginMeta, object)):
@classmethod
def get_default_config(cls):
return {p.name: p.default for p in cls.parameters if not p.deprecated}
return {p.name: p.default for p in cls.parameters}
@property
def dependencies_directory(self):
@ -360,7 +367,7 @@ class Plugin(with_metaclass(PluginMeta, object)):
self._modules.append(module)
def __str__(self):
return str(self.name)
return self.name
def __repr__(self):
params = []
@ -376,22 +383,12 @@ class TargetedPlugin(Plugin):
"""
supported_targets = []
parameters = [
Parameter('cleanup_assets', kind=bool,
global_alias='cleanup_assets',
aliases=['clean_up'],
default=True,
description="""
If ``True``, assets that are deployed or created by the
plugin will be removed again from the device.
"""),
]
suppoted_targets = []
@classmethod
def check_compatible(cls, target):
if cls.supported_targets:
if target.os not in cls.supported_targets:
if cls.suppoted_targets:
if target.os not in cls.suppoted_targets:
msg = 'Incompatible target OS "{}" for {}'
raise TargetError(msg.format(target.os, cls.name))
@ -614,30 +611,24 @@ class PluginLoader(object):
self.logger.debug('Checking path %s', path)
if os.path.isfile(path):
self._discover_from_file(path)
elif os.path.exists(path):
for root, _, files in os.walk(path, followlinks=True):
should_skip = False
for igpath in ignore_paths:
if root.startswith(igpath):
should_skip = True
break
if should_skip:
for root, _, files in os.walk(path, followlinks=True):
should_skip = False
for igpath in ignore_paths:
if root.startswith(igpath):
should_skip = True
break
if should_skip:
continue
for fname in files:
if os.path.splitext(fname)[1].lower() != '.py':
continue
for fname in files:
if os.path.splitext(fname)[1].lower() != '.py':
continue
filepath = os.path.join(root, fname)
self._discover_from_file(filepath)
elif not os.path.isabs(path):
try:
for module in walk_modules(path):
self._discover_in_module(module)
except Exception: # NOQA pylint: disable=broad-except
pass
filepath = os.path.join(root, fname)
self._discover_from_file(filepath)
def _discover_from_file(self, filepath):
try:
module = import_path(filepath)
modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS)
module = imp.load_source(modname, filepath)
self._discover_in_module(module)
except (SystemExit, ImportError) as e:
if self.keep_going:

@ -35,7 +35,6 @@ class __LoaderWrapper(object):
def reset(self):
# These imports cannot be done at top level, because of
# sys.modules manipulation below
# pylint: disable=import-outside-toplevel
from wa.framework.plugin import PluginLoader
from wa.framework.configuration.core import settings
self._loader = PluginLoader(settings.plugin_packages,

@ -16,14 +16,15 @@ import logging
import os
import re
from devlib.utils.android import ApkInfo
from wa.framework import pluginloader
from wa.framework.plugin import Plugin
from wa.framework.exception import ResourceError
from wa.framework.configuration import settings
from wa.utils import log
from wa.utils.android import get_cacheable_apk_info
from wa.utils.misc import get_object_name
from wa.utils.types import enum, list_or_string, prioritylist, version_tuple
from wa.utils.types import enum, list_or_string, prioritylist
SourcePriority = enum(['package', 'remote', 'lan', 'local',
@ -141,12 +142,10 @@ class ApkFile(Resource):
def __init__(self, owner, variant=None, version=None,
package=None, uiauto=False, exact_abi=False,
supported_abi=None, min_version=None, max_version=None):
supported_abi=None):
super(ApkFile, self).__init__(owner)
self.variant = variant
self.version = version
self.max_version = max_version
self.min_version = min_version
self.package = package
self.uiauto = uiauto
self.exact_abi = exact_abi
@ -159,25 +158,21 @@ class ApkFile(Resource):
def match(self, path):
name_matches = True
version_matches = True
version_range_matches = True
package_matches = True
abi_matches = True
uiauto_matches = uiauto_test_matches(path, self.uiauto)
if self.version:
if self.version is not None:
version_matches = apk_version_matches(path, self.version)
if self.max_version or self.min_version:
version_range_matches = apk_version_matches_range(path, self.min_version,
self.max_version)
if self.variant:
if self.variant is not None:
name_matches = file_name_matches(path, self.variant)
if self.package:
if self.package is not None:
package_matches = package_name_matches(path, self.package)
if self.supported_abi:
if self.supported_abi is not None:
abi_matches = apk_abi_matches(path, self.supported_abi,
self.exact_abi)
return name_matches and version_matches and \
version_range_matches and uiauto_matches \
and package_matches and abi_matches
uiauto_matches and package_matches and \
abi_matches
def __str__(self):
text = '<{}\'s apk'.format(self.owner)
@ -278,40 +273,15 @@ class ResourceResolver(object):
def apk_version_matches(path, version):
version = list_or_string(version)
info = get_cacheable_apk_info(path)
for v in version:
if v in (info.version_name, info.version_code):
return True
if loose_version_matching(v, info.version_name):
return True
return False
def apk_version_matches_range(path, min_version=None, max_version=None):
info = get_cacheable_apk_info(path)
return range_version_matching(info.version_name, min_version, max_version)
def range_version_matching(apk_version, min_version=None, max_version=None):
if not apk_version:
return False
apk_version = version_tuple(apk_version)
if max_version:
max_version = version_tuple(max_version)
if apk_version > max_version:
return False
if min_version:
min_version = version_tuple(min_version)
if apk_version < min_version:
return False
return True
info = ApkInfo(path)
if info.version_name == version or info.version_code == version:
return True
return loose_version_matching(version, info.version_name)
def loose_version_matching(config_version, apk_version):
config_version = version_tuple(config_version)
apk_version = version_tuple(apk_version)
config_version = config_version.split('.')
apk_version = apk_version.split('.')
if len(apk_version) < len(config_version):
return False # More specific version requested than available
@ -332,18 +302,18 @@ def file_name_matches(path, pattern):
def uiauto_test_matches(path, uiauto):
info = get_cacheable_apk_info(path)
info = ApkInfo(path)
return uiauto == ('com.arm.wa.uiauto' in info.package)
def package_name_matches(path, package):
info = get_cacheable_apk_info(path)
info = ApkInfo(path)
return info.package == package
def apk_abi_matches(path, supported_abi, exact_abi=False):
supported_abi = list_or_string(supported_abi)
info = get_cacheable_apk_info(path)
info = ApkInfo(path)
# If no native code present, suitable for all devices.
if not info.native_code:
return True

@ -102,7 +102,13 @@ class RunState(Podable):
self.timestamp = datetime.utcnow()
def add_job(self, job):
self.jobs[(job.state.id, job.state.iteration)] = job.state
job_state = JobState(job.id, job.label, job.iteration, job.status)
self.jobs[(job_state.id, job_state.iteration)] = job_state
def update_job(self, job):
state = self.jobs[(job.id, job.iteration)]
state.status = job.status
state.timestamp = datetime.utcnow()
def get_status_counts(self):
counter = Counter()
@ -157,7 +163,7 @@ class JobState(Podable):
pod['label'] = self.label
pod['iteration'] = self.iteration
pod['status'] = self.status.to_pod()
pod['retries'] = self.retries
pod['retries'] = 0
pod['timestamp'] = self.timestamp
return pod

@ -15,7 +15,7 @@
"""
This module wraps louie signalling mechanism. It relies on modified version of louie
This module wraps louie signalling mechanism. It relies on modified version of loiue
that has prioritization added to handler invocation.
"""
@ -23,9 +23,8 @@ import sys
import logging
from contextlib import contextmanager
from louie import dispatcher, saferef # pylint: disable=wrong-import-order
from louie.dispatcher import _remove_receiver
import wrapt
from louie import dispatcher # pylint: disable=wrong-import-order
from wa.utils.types import prioritylist, enum
@ -243,8 +242,8 @@ def connect(handler, signal, sender=dispatcher.Any, priority=0):
receivers = signals[signal]
else:
receivers = signals[signal] = _prioritylist_wrapper()
receivers.add(handler, priority)
dispatcher.connect(handler, signal, sender)
receivers.add(saferef.safe_ref(handler, on_delete=_remove_receiver), priority)
def disconnect(handler, signal, sender=dispatcher.Any):
@ -269,7 +268,7 @@ def send(signal, sender=dispatcher.Anonymous, *args, **kwargs):
"""
Sends a signal, causing connected handlers to be invoked.
Parameters:
Paramters:
:signal: Signal to be sent. This must be an instance of :class:`wa.core.signal.Signal`
or its subclasses.

@ -21,11 +21,9 @@ import tempfile
import threading
import time
from wa.framework.exception import WorkerThreadError
from wa.framework.plugin import Parameter
from wa.utils.android import LogcatParser
from wa.framework.exception import WorkerThreadError
from wa.utils.misc import touch
import wa.framework.signal as signal
class LinuxAssistant(object):
@ -35,9 +33,6 @@ class LinuxAssistant(object):
def __init__(self, target):
self.target = target
def initialize(self):
pass
def start(self):
pass
@ -47,9 +42,6 @@ class LinuxAssistant(object):
def stop(self):
pass
def finalize(self):
pass
class AndroidAssistant(object):
@ -74,111 +66,40 @@ class AndroidAssistant(object):
temporary locaiton on the host. Setting the value of the poll
period enables this behavior.
"""),
Parameter('stay_on_mode', kind=int,
constraint=lambda x: 0 <= x <= 7,
description="""
Specify whether the screen should stay on while the device is
charging:
0: never stay on
1: with AC charger
2: with USB charger
4: with wireless charger
Values can be OR-ed together to produce combinations, for
instance ``7`` will cause the screen to stay on when charging
under any method.
"""),
]
def __init__(self, target, logcat_poll_period=None, disable_selinux=True, stay_on_mode=None):
def __init__(self, target, logcat_poll_period=None, disable_selinux=True):
self.target = target
self.logcat_poll_period = logcat_poll_period
self.disable_selinux = disable_selinux
self.stay_on_mode = stay_on_mode
self.orig_stay_on_mode = self.target.get_stay_on_mode() if stay_on_mode is not None else None
self.logcat_poller = None
self.logger = logging.getLogger('logcat')
self._logcat_marker_msg = None
self._logcat_marker_tag = None
signal.connect(self._before_workload, signal.BEFORE_WORKLOAD_EXECUTION)
if self.logcat_poll_period:
signal.connect(self._after_workload, signal.AFTER_WORKLOAD_EXECUTION)
def initialize(self):
if self.target.is_rooted and self.disable_selinux:
self.do_disable_selinux()
if self.stay_on_mode is not None:
self.target.set_stay_on_mode(self.stay_on_mode)
def start(self):
if self.logcat_poll_period:
self.logcat_poller = LogcatPoller(self.target, self.logcat_poll_period)
self.logcat_poller.start()
else:
if not self._logcat_marker_msg:
self._logcat_marker_msg = 'WA logcat marker for wrap detection'
self._logcat_marker_tag = 'WAlog'
def stop(self):
if self.logcat_poller:
self.logcat_poller.stop()
def finalize(self):
if self.stay_on_mode is not None:
self.target.set_stay_on_mode(self.orig_stay_on_mode)
def extract_results(self, context):
logcat_file = os.path.join(context.output_directory, 'logcat.log')
self.dump_logcat(logcat_file)
context.add_artifact('logcat', logcat_file, kind='log')
self.clear_logcat()
if not self._check_logcat_nowrap(logcat_file):
self.logger.warning('The main logcat buffer wrapped and lost data;'
' results that rely on this buffer may be'
' inaccurate or incomplete.'
)
def dump_logcat(self, outfile):
if self.logcat_poller:
self.logcat_poller.write_log(outfile)
else:
self.target.dump_logcat(outfile, logcat_format='threadtime')
self.target.dump_logcat(outfile)
def clear_logcat(self):
if self.logcat_poller:
self.logcat_poller.clear_buffer()
else:
self.target.clear_logcat()
def _before_workload(self, _):
if self.logcat_poller:
self.logcat_poller.start_logcat_wrap_detect()
else:
self.insert_logcat_marker()
def _after_workload(self, _):
self.logcat_poller.stop_logcat_wrap_detect()
def _check_logcat_nowrap(self, outfile):
if self.logcat_poller:
return self.logcat_poller.check_logcat_nowrap(outfile)
else:
parser = LogcatParser()
for event in parser.parse(outfile):
if (event.tag == self._logcat_marker_tag
and event.message == self._logcat_marker_msg):
return True
return False
def insert_logcat_marker(self):
self.logger.debug('Inserting logcat marker')
self.target.execute(
'log -t "{}" "{}"'.format(
self._logcat_marker_tag, self._logcat_marker_msg
)
)
def do_disable_selinux(self):
# SELinux was added in Android 4.3 (API level 18). Trying to
@ -198,21 +119,15 @@ class LogcatPoller(threading.Thread):
self.period = period
self.timeout = timeout
self.stop_signal = threading.Event()
self.lock = threading.RLock()
self.lock = threading.Lock()
self.buffer_file = tempfile.mktemp()
self.last_poll = 0
self.daemon = True
self.exc = None
self._logcat_marker_tag = 'WALog'
self._logcat_marker_msg = 'WA logcat marker for wrap detection:{}'
self._marker_count = 0
self._start_marker = None
self._end_marker = None
def run(self):
self.logger.debug('Starting polling')
try:
self.insert_logcat_marker()
while True:
if self.stop_signal.is_set():
break
@ -220,7 +135,6 @@ class LogcatPoller(threading.Thread):
current_time = time.time()
if (current_time - self.last_poll) >= self.period:
self.poll()
self.insert_logcat_marker()
time.sleep(0.5)
except Exception: # pylint: disable=W0703
self.exc = WorkerThreadError(self.name, sys.exc_info())
@ -256,49 +170,9 @@ class LogcatPoller(threading.Thread):
def poll(self):
self.last_poll = time.time()
self.target.dump_logcat(self.buffer_file, append=True, timeout=self.timeout, logcat_format='threadtime')
self.target.dump_logcat(self.buffer_file, append=True, timeout=self.timeout)
self.target.clear_logcat()
def insert_logcat_marker(self):
self.logger.debug('Inserting logcat marker')
with self.lock:
self.target.execute(
'log -t "{}" "{}"'.format(
self._logcat_marker_tag,
self._logcat_marker_msg.format(self._marker_count)
)
)
self._marker_count += 1
def check_logcat_nowrap(self, outfile):
parser = LogcatParser()
counter = self._start_marker
for event in parser.parse(outfile):
message = self._logcat_marker_msg.split(':')[0]
if not (event.tag == self._logcat_marker_tag
and event.message.split(':')[0] == message):
continue
number = int(event.message.split(':')[1])
if number > counter:
return False
elif number == counter:
counter += 1
if counter == self._end_marker:
return True
return False
def start_logcat_wrap_detect(self):
with self.lock:
self._start_marker = self._marker_count
self.insert_logcat_marker()
def stop_logcat_wrap_detect(self):
with self.lock:
self._end_marker = self._marker_count
class ChromeOsAssistant(LinuxAssistant):

@ -14,13 +14,14 @@
#
import inspect
from collections import OrderedDict
from copy import copy
from devlib import (LinuxTarget, AndroidTarget, LocalLinuxTarget,
ChromeOsTarget, Platform, Juno, TC2, Gem5SimulationPlatform,
AdbConnection, SshConnection, LocalConnection,
TelnetConnection, Gem5Connection)
Gem5Connection)
from devlib.target import DEFAULT_SHELL_PROMPT
from devlib.utils.ssh import DEFAULT_SSH_SUDO_COMMAND
from wa.framework import pluginloader
from wa.framework.configuration.core import get_config_point_map
@ -68,14 +69,11 @@ def instantiate_target(tdesc, params, connect=None, extra_platform_params=None):
for name, value in params.items():
if name in target_params:
if not target_params[name].deprecated:
tp[name] = value
tp[name] = value
elif name in platform_params:
if not platform_params[name].deprecated:
pp[name] = value
pp[name] = value
elif name in conn_params:
if not conn_params[name].deprecated:
cp[name] = value
cp[name] = value
elif name in assistant_params:
pass
else:
@ -131,8 +129,7 @@ class TargetDescription(object):
config = {}
for pattr in param_attrs:
for p in getattr(self, pattr):
if not p.deprecated:
config[p.name] = p.default
config[p.name] = p.default
return config
def _set(self, attr, vals):
@ -198,12 +195,6 @@ COMMON_TARGET_PARAMS = [
description='''
A regex that matches the shell prompt on the target.
'''),
Parameter('max_async', kind=int, default=50,
description='''
The maximum number of concurent asynchronous connections to the
target maintained at any time.
'''),
]
COMMON_PLATFORM_PARAMS = [
@ -271,6 +262,7 @@ VEXPRESS_PLATFORM_PARAMS = [
``dtr``: toggle the DTR line on the serial connection
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
'''),
]
@ -308,48 +300,6 @@ CONNECTION_PARAMS = {
description="""
ADB server to connect to.
"""),
Parameter(
'adb_port', kind=int,
description="""
ADB port to connect to.
"""),
Parameter(
'poll_transfers', kind=bool,
default=True,
description="""
File transfers will be polled for activity. Inactive
file transfers are cancelled.
"""),
Parameter(
'start_transfer_poll_delay', kind=int,
default=30,
description="""
How long to wait (s) for a transfer to complete
before polling transfer activity. Requires ``poll_transfers``
to be set.
"""),
Parameter(
'total_transfer_timeout', kind=int,
default=3600,
description="""
The total time to elapse before a transfer is cancelled, regardless
of its activity. Requires ``poll_transfers`` to be set.
"""),
Parameter(
'transfer_poll_period', kind=int,
default=30,
description="""
The period at which transfer activity is sampled. Requires
``poll_transfers`` to be set. Too small values may cause
the destination size to appear the same over one or more sample
periods, causing improper transfer cancellation.
"""),
Parameter(
'adb_as_root', kind=bool,
default=False,
description="""
Specify whether the adb server should be started in root mode.
""")
],
SshConnection: [
Parameter(
@ -366,8 +316,6 @@ CONNECTION_PARAMS = {
'password', kind=str,
description="""
Password to use.
(When connecting to a passwordless machine set to an
empty string to prevent attempting ssh key authentication.)
"""),
Parameter(
'keyfile', kind=str,
@ -376,101 +324,14 @@ CONNECTION_PARAMS = {
"""),
Parameter(
'port', kind=int,
default=22,
description="""
The port SSH server is listening on on the target.
"""),
Parameter(
'strict_host_check', kind=bool, default=False,
'telnet', kind=bool, default=False,
description="""
Specify whether devices should be connected to if
their host key does not match the systems known host keys. """),
Parameter(
'sudo_cmd', kind=str,
default=DEFAULT_SSH_SUDO_COMMAND,
description="""
Sudo command to use. Must have ``{}`` specified
somewhere in the string it indicate where the command
to be run via sudo is to go.
"""),
Parameter(
'use_scp', kind=bool,
default=False,
description="""
Allow using SCP as method of file transfer instead
of the default SFTP.
"""),
Parameter(
'poll_transfers', kind=bool,
default=True,
description="""
File transfers will be polled for activity. Inactive
file transfers are cancelled.
"""),
Parameter(
'start_transfer_poll_delay', kind=int,
default=30,
description="""
How long to wait (s) for a transfer to complete
before polling transfer activity. Requires ``poll_transfers``
to be set.
"""),
Parameter(
'total_transfer_timeout', kind=int,
default=3600,
description="""
The total time to elapse before a transfer is cancelled, regardless
of its activity. Requires ``poll_transfers`` to be set.
"""),
Parameter(
'transfer_poll_period', kind=int,
default=30,
description="""
The period at which transfer activity is sampled. Requires
``poll_transfers`` to be set. Too small values may cause
the destination size to appear the same over one or more sample
periods, causing improper transfer cancellation.
"""),
# Deprecated Parameters
Parameter(
'telnet', kind=str,
description="""
Original shell prompt to expect.
""",
deprecated=True),
Parameter(
'password_prompt', kind=str,
description="""
Password prompt to expect
""",
deprecated=True),
Parameter(
'original_prompt', kind=str,
description="""
Original shell prompt to expect.
""",
deprecated=True),
],
TelnetConnection: [
Parameter(
'host', kind=str, mandatory=True,
description="""
Host name or IP address of the target.
"""),
Parameter(
'username', kind=str, mandatory=True,
description="""
User name to connect with
"""),
Parameter(
'password', kind=str,
description="""
Password to use.
"""),
Parameter(
'port', kind=int,
description="""
The port SSH server is listening on on the target.
If set to ``True``, a Telnet connection, rather than
SSH will be used.
"""),
Parameter(
'password_prompt', kind=str,
@ -550,16 +411,16 @@ CONNECTION_PARAMS['ChromeOsConnection'] = \
CONNECTION_PARAMS[AdbConnection] + CONNECTION_PARAMS[SshConnection]
# name --> ((target_class, conn_class, unsupported_platforms), params_list, defaults)
# name --> ((target_class, conn_class), params_list, defaults)
TARGETS = {
'linux': ((LinuxTarget, SshConnection, []), COMMON_TARGET_PARAMS, None),
'android': ((AndroidTarget, AdbConnection, []), COMMON_TARGET_PARAMS +
'linux': ((LinuxTarget, SshConnection), COMMON_TARGET_PARAMS, None),
'android': ((AndroidTarget, AdbConnection), COMMON_TARGET_PARAMS +
[Parameter('package_data_directory', kind=str, default='/data/data',
description='''
Directory containing Android data
'''),
], None),
'chromeos': ((ChromeOsTarget, 'ChromeOsConnection', []), COMMON_TARGET_PARAMS +
'chromeos': ((ChromeOsTarget, 'ChromeOsConnection'), COMMON_TARGET_PARAMS +
[Parameter('package_data_directory', kind=str, default='/data/data',
description='''
Directory containing Android data
@ -580,8 +441,7 @@ TARGETS = {
the need for privilege elevation.
'''),
], None),
'local': ((LocalLinuxTarget, LocalConnection, [Juno, Gem5SimulationPlatform, TC2]),
COMMON_TARGET_PARAMS, None),
'local': ((LocalLinuxTarget, LocalConnection), COMMON_TARGET_PARAMS, None),
}
# name --> assistant
@ -592,87 +452,31 @@ ASSISTANTS = {
'chromeos': ChromeOsAssistant
}
# Platform specific parameter overrides.
JUNO_PLATFORM_OVERRIDES = [
Parameter('baudrate', kind=int, default=115200,
description='''
Baud rate for the serial connection.
'''),
Parameter('vemsd_mount', kind=str, default='/media/JUNO',
description='''
VExpress MicroSD card mount location. This is a MicroSD card in
the VExpress device that is mounted on the host via USB. The card
contains configuration files for the platform and firmware and
kernel images to be flashed.
'''),
Parameter('bootloader', kind=str, default='u-boot',
allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'],
description='''
Selects the bootloader mechanism used by the board. Depending on
firmware version, a number of possible boot mechanisms may be use.
Please see ``devlib`` documentation for descriptions.
'''),
Parameter('hard_reset_method', kind=str, default='dtr',
allowed_values=['dtr', 'reboottxt'],
description='''
There are a couple of ways to reset VersatileExpress board if the
software running on the board becomes unresponsive. Both require
configuration to be enabled (please see ``devlib`` documentation).
``dtr``: toggle the DTR line on the serial connection
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
'''),
]
TC2_PLATFORM_OVERRIDES = [
Parameter('baudrate', kind=int, default=38400,
description='''
Baud rate for the serial connection.
'''),
Parameter('vemsd_mount', kind=str, default='/media/VEMSD',
description='''
VExpress MicroSD card mount location. This is a MicroSD card in
the VExpress device that is mounted on the host via USB. The card
contains configuration files for the platform and firmware and
kernel images to be flashed.
'''),
Parameter('bootloader', kind=str, default='bootmon',
allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'],
description='''
Selects the bootloader mechanism used by the board. Depending on
firmware version, a number of possible boot mechanisms may be use.
Please see ``devlib`` documentation for descriptions.
'''),
Parameter('hard_reset_method', kind=str, default='reboottxt',
allowed_values=['dtr', 'reboottxt'],
description='''
There are a couple of ways to reset VersatileExpress board if the
software running on the board becomes unresponsive. Both require
configuration to be enabled (please see ``devlib`` documentation).
``dtr``: toggle the DTR line on the serial connection
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
'''),
]
# name --> ((platform_class, conn_class, conn_overrides), params_list, defaults, target_overrides)
# name --> ((platform_class, conn_class), params_list, defaults, target_defaults)
# Note: normally, connection is defined by the Target name, but
# platforms may choose to override it
# Note: the target_overrides allows you to override common target_params for a
# Note: the target_defaults allows you to override common target_params for a
# particular platform. Parameters you can override are in COMMON_TARGET_PARAMS
# Example of overriding one of the target parameters: Replace last `None` with
# a list of `Parameter` objects to be used instead.
# Example of overriding one of the target parameters: Replace last None with:
# {'shell_prompt': CUSTOM__SHELL_PROMPT}
PLATFORMS = {
'generic': ((Platform, None, None), COMMON_PLATFORM_PARAMS, None, None),
'juno': ((Juno, None, [
Parameter('host', kind=str, mandatory=False,
description="Host name or IP address of the target."),
]
), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS, JUNO_PLATFORM_OVERRIDES, None),
'tc2': ((TC2, None, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
TC2_PLATFORM_OVERRIDES, None),
'gem5': ((Gem5SimulationPlatform, Gem5Connection, None), GEM5_PLATFORM_PARAMS, None, None),
'generic': ((Platform, None), COMMON_PLATFORM_PARAMS, None, None),
'juno': ((Juno, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
{
'vemsd_mount': '/media/JUNO',
'baudrate': 115200,
'bootloader': 'u-boot',
'hard_reset_method': 'dtr',
},
None),
'tc2': ((TC2, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
{
'vemsd_mount': '/media/VEMSD',
'baudrate': 38400,
'bootloader': 'bootmon',
'hard_reset_method': 'reboottxt',
}, None),
'gem5': ((Gem5SimulationPlatform, Gem5Connection), GEM5_PLATFORM_PARAMS, None, None),
}
@ -692,17 +496,16 @@ class DefaultTargetDescriptor(TargetDescriptor):
# pylint: disable=attribute-defined-outside-init,too-many-locals
result = []
for target_name, target_tuple in TARGETS.items():
(target, conn, unsupported_platforms), target_params = self._get_item(target_tuple)
(target, conn), target_params = self._get_item(target_tuple)
assistant = ASSISTANTS[target_name]
conn_params = CONNECTION_PARAMS[conn]
for platform_name, platform_tuple in PLATFORMS.items():
platform_target_defaults = platform_tuple[-1]
platform_tuple = platform_tuple[0:-1]
(platform, plat_conn, conn_defaults), platform_params = self._get_item(platform_tuple)
if platform in unsupported_platforms:
continue
(platform, plat_conn), platform_params = self._get_item(platform_tuple)
# Add target defaults specified in the Platform tuple
target_params = self._override_params(target_params, platform_target_defaults)
target_params = self._apply_param_defaults(target_params,
platform_target_defaults)
name = '{}_{}'.format(platform_name, target_name)
td = TargetDescription(name, self)
td.target = target
@ -714,31 +517,31 @@ class DefaultTargetDescriptor(TargetDescriptor):
if plat_conn:
td.conn = plat_conn
td.conn_params = self._override_params(CONNECTION_PARAMS[plat_conn],
conn_defaults)
td.conn_params = CONNECTION_PARAMS[plat_conn]
else:
td.conn = conn
td.conn_params = self._override_params(conn_params, conn_defaults)
td.conn_params = conn_params
result.append(td)
return result
def _override_params(self, params, overrides): # pylint: disable=no-self-use
''' Returns a new list of parameters replacing any parameter with the
corresponding parameter in overrides'''
if not overrides:
def _apply_param_defaults(self, params, defaults): # pylint: disable=no-self-use
'''Adds parameters in the defaults dict to params list.
Return updated params as a list (idempotent function).'''
if not defaults:
return params
param_map = {p.name: p for p in params}
for override in overrides:
if override.name in param_map:
param_map[override.name] = override
# Return the list of overriden parameters
param_map = OrderedDict((p.name, copy(p)) for p in params)
for name, value in defaults.items():
if name not in param_map:
raise ValueError('Unexpected default "{}"'.format(name))
param_map[name].default = value
# Convert the OrderedDict to a list to return the same type
return list(param_map.values())
def _get_item(self, item_tuple):
cls_tuple, params, defaults = item_tuple
updated_params = self._override_params(params, defaults)
return cls_tuple, updated_params
cls, params, defaults = item_tuple
updated_params = self._apply_param_defaults(params, defaults)
return cls, updated_params
_adhoc_target_descriptions = []
@ -781,7 +584,7 @@ def _get_target_defaults(target):
def add_description_for_target(target, description=None, **kwargs):
(base_name, ((_, base_conn, _), base_params, _)) = _get_target_defaults(target)
(base_name, ((_, base_conn), base_params, _)) = _get_target_defaults(target)
if 'target_params' not in kwargs:
kwargs['target_params'] = base_params
@ -789,7 +592,7 @@ def add_description_for_target(target, description=None, **kwargs):
if 'platform' not in kwargs:
kwargs['platform'] = Platform
if 'platform_params' not in kwargs:
for (plat, conn, _), params, _, _ in PLATFORMS.values():
for (plat, conn), params, _, _ in PLATFORMS.values():
if plat == kwargs['platform']:
kwargs['platform_params'] = params
if conn is not None and kwargs['conn'] is None:

@ -23,7 +23,6 @@ from devlib.utils.android import AndroidProperties
from wa.framework.configuration.core import settings
from wa.framework.exception import ConfigError
from wa.utils.serializer import read_pod, write_pod, Podable
from wa.utils.misc import atomic_write_path
def cpuinfo_from_pod(pod):
@ -54,9 +53,9 @@ def kernel_version_from_pod(pod):
def kernel_config_from_pod(pod):
config = KernelConfig('')
config.typed_config._config = pod['kernel_config']
config._config = pod['kernel_config']
lines = []
for key, value in config.items():
for key, value in config._config.items():
if value == 'n':
lines.append('# {} is not set'.format(key))
else:
@ -222,7 +221,6 @@ class CpuInfo(Podable):
def get_target_info(target):
info = TargetInfo()
info.target = target.__class__.__name__
info.modules = target.modules
info.os = target.os
info.os_version = target.os_version
info.system_id = target.system_id
@ -230,15 +228,16 @@ def get_target_info(target):
info.is_rooted = target.is_rooted
info.kernel_version = target.kernel_version
info.kernel_config = target.config
info.hostname = target.hostname
info.hostid = target.hostid
try:
info.sched_features = target.read_value('/sys/kernel/debug/sched_features').split()
except TargetError:
# best effort -- debugfs might not be mounted
pass
hostid_string = target.execute('{} hostid'.format(target.busybox)).strip()
info.hostid = int(hostid_string, 16)
info.hostname = target.execute('{} hostname'.format(target.busybox)).strip()
for i, name in enumerate(target.cpuinfo.cpu_names):
cpu = CpuInfo()
cpu.id = i
@ -286,13 +285,11 @@ def read_target_info_cache():
def write_target_info_cache(cache):
if not os.path.exists(settings.cache_directory):
os.makedirs(settings.cache_directory)
with atomic_write_path(settings.target_info_cache_file) as at_path:
write_pod(cache, at_path)
write_pod(cache, settings.target_info_cache_file)
def get_target_info_from_cache(system_id, cache=None):
if cache is None:
cache = read_target_info_cache()
def get_target_info_from_cache(system_id):
cache = read_target_info_cache()
pod = cache.get(system_id, None)
if not pod:
@ -306,9 +303,8 @@ def get_target_info_from_cache(system_id, cache=None):
return TargetInfo.from_pod(pod)
def cache_target_info(target_info, overwrite=False, cache=None):
if cache is None:
cache = read_target_info_cache()
def cache_target_info(target_info, overwrite=False):
cache = read_target_info_cache()
if target_info.system_id in cache and not overwrite:
raise ValueError('TargetInfo for {} is already in cache.'.format(target_info.system_id))
cache[target_info.system_id] = target_info.to_pod()
@ -317,13 +313,12 @@ def cache_target_info(target_info, overwrite=False, cache=None):
class TargetInfo(Podable):
_pod_serialization_version = 5
_pod_serialization_version = 2
@staticmethod
def from_pod(pod):
instance = super(TargetInfo, TargetInfo).from_pod(pod)
instance.target = pod['target']
instance.modules = pod['modules']
instance.abi = pod['abi']
instance.cpus = [CpuInfo.from_pod(c) for c in pod['cpus']]
instance.os = pod['os']
@ -348,7 +343,6 @@ class TargetInfo(Podable):
def __init__(self):
super(TargetInfo, self).__init__()
self.target = None
self.modules = []
self.cpus = []
self.os = None
self.os_version = None
@ -368,7 +362,6 @@ class TargetInfo(Podable):
def to_pod(self):
pod = super(TargetInfo, self).to_pod()
pod['target'] = self.target
pod['modules'] = self.modules
pod['abi'] = self.abi
pod['cpus'] = [c.to_pod() for c in self.cpus]
pod['os'] = self.os
@ -408,20 +401,3 @@ class TargetInfo(Podable):
pod['page_size_kb'] = pod.get('page_size_kb')
pod['_pod_version'] = pod.get('format_version', 0)
return pod
@staticmethod
def _pod_upgrade_v3(pod):
config = {}
for key, value in pod['kernel_config'].items():
config[key.upper()] = value
pod['kernel_config'] = config
return pod
@staticmethod
def _pod_upgrade_v4(pod):
return TargetInfo._pod_upgrade_v3(pod)
@staticmethod
def _pod_upgrade_v5(pod):
pod['modules'] = pod.get('modules') or []
return pod

@ -24,10 +24,8 @@ from wa.framework.plugin import Parameter
from wa.framework.target.descriptor import (get_target_description,
instantiate_target,
instantiate_assistant)
from wa.framework.target.info import (get_target_info, get_target_info_from_cache,
cache_target_info, read_target_info_cache)
from wa.framework.target.info import get_target_info, get_target_info_from_cache, cache_target_info
from wa.framework.target.runtime_parameter_manager import RuntimeParameterManager
from wa.utils.types import module_name_set
class TargetManager(object):
@ -57,7 +55,6 @@ class TargetManager(object):
def initialize(self):
self._init_target()
self.assistant.initialize()
# If target supports hotplugging, online all cpus before perform discovery
# and restore original configuration after completed.
@ -78,8 +75,6 @@ class TargetManager(object):
def finalize(self):
if not self.target:
return
if self.assistant:
self.assistant.finalize()
if self.disconnect or isinstance(self.target.platform, Gem5SimulationPlatform):
self.logger.info('Disconnecting from the device')
with signal.wrap('TARGET_DISCONNECT'):
@ -96,20 +91,10 @@ class TargetManager(object):
@memoized
def get_target_info(self):
cache = read_target_info_cache()
info = get_target_info_from_cache(self.target.system_id, cache=cache)
info = get_target_info_from_cache(self.target.system_id)
if info is None:
info = get_target_info(self.target)
cache_target_info(info, cache=cache)
else:
# If module configuration has changed form when the target info
# was previously cached, it is possible additional info will be
# available, so should re-generate the cache.
if module_name_set(info.modules) != module_name_set(self.target.modules):
info = get_target_info(self.target)
cache_target_info(info, overwrite=True, cache=cache)
cache_target_info(info)
return info
def reboot(self, context, hard=False):

@ -178,7 +178,7 @@ class HotplugRuntimeConfig(RuntimeConfig):
raise TargetError('Target does not appear to support hotplug')
def validate_parameters(self):
if self.num_cores and len(self.num_cores) == self.target.number_of_cpus:
if len(self.num_cores) == self.target.number_of_cpus:
if all(v is False for v in list(self.num_cores.values())):
raise ValueError('Cannot set number of all cores to 0')
@ -694,7 +694,7 @@ class CpufreqRuntimeConfig(RuntimeConfig):
else:
common_freqs = common_freqs.intersection(self.supported_cpu_freqs.get(cpu) or set())
all_freqs = all_freqs.union(self.supported_cpu_freqs.get(cpu) or set())
common_gov = common_gov.intersection(self.supported_cpu_governors.get(cpu) or set())
common_gov = common_gov.intersection(self.supported_cpu_governors.get(cpu))
return all_freqs, common_freqs, common_gov
@ -732,7 +732,7 @@ class IdleStateValue(object):
'''Checks passed state and converts to its ID'''
value = caseless_string(value)
for s_id, s_name, s_desc in self.values:
if value in (s_id, s_name, s_desc):
if value == s_id or value == s_name or value == s_desc:
return s_id
msg = 'Invalid IdleState: "{}"; Must be in {}'
raise ValueError(msg.format(value, self.values))
@ -878,11 +878,6 @@ class AndroidRuntimeConfig(RuntimeConfig):
if value is not None:
obj.config['screen_on'] = value
@staticmethod
def set_unlock_screen(obj, value):
if value is not None:
obj.config['unlock_screen'] = value
def __init__(self, target):
self.config = defaultdict(dict)
super(AndroidRuntimeConfig, self).__init__(target)
@ -935,16 +930,6 @@ class AndroidRuntimeConfig(RuntimeConfig):
Specify whether the device screen should be on
""")
param_name = 'unlock_screen'
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=str,
default=None,
setter=self.set_unlock_screen,
description="""
Specify how the device screen should be unlocked (e.g., vertical)
""")
def check_target(self):
if self.target.os != 'android' and self.target.os != 'chromeos':
raise ConfigError('Target does not appear to be running Android')
@ -955,7 +940,6 @@ class AndroidRuntimeConfig(RuntimeConfig):
pass
def commit(self):
# pylint: disable=too-many-branches
if 'airplane_mode' in self.config:
new_airplane_mode = self.config['airplane_mode']
old_airplane_mode = self.target.get_airplane_mode()
@ -980,20 +964,13 @@ class AndroidRuntimeConfig(RuntimeConfig):
if 'brightness' in self.config:
self.target.set_brightness(self.config['brightness'])
if 'rotation' in self.config:
self.target.set_rotation(self.config['rotation'])
if 'screen_on' in self.config:
if self.config['screen_on']:
self.target.ensure_screen_is_on()
else:
self.target.ensure_screen_is_off()
if self.config.get('unlock_screen'):
self.target.ensure_screen_is_on()
if self.target.is_screen_locked():
self.target.swipe_to_unlock(self.config['unlock_screen'])
def clear(self):
self.config = {}

@ -22,7 +22,6 @@ from wa.framework.target.runtime_config import (SysfileValuesRuntimeConfig,
CpuidleRuntimeConfig,
AndroidRuntimeConfig)
from wa.utils.types import obj_dict, caseless_string
from wa.framework import pluginloader
class RuntimeParameterManager(object):
@ -38,15 +37,8 @@ class RuntimeParameterManager(object):
def __init__(self, target):
self.target = target
self.runtime_params = {}
try:
for rt_cls in pluginloader.list_plugins(kind='runtime-config'):
if rt_cls not in self.runtime_config_cls:
self.runtime_config_cls.append(rt_cls)
except ValueError:
pass
self.runtime_configs = [cls(self.target) for cls in self.runtime_config_cls]
self.runtime_params = {}
runtime_parameter = namedtuple('RuntimeParameter', 'cfg_point, rt_config')
for cfg in self.runtime_configs:

@ -1,18 +1,18 @@
apply plugin: 'com.android.library'
android {
compileSdkVersion 28
buildToolsVersion '28.0.3'
compileSdkVersion 25
buildToolsVersion '25.0.3'
defaultConfig {
minSdkVersion 18
targetSdkVersion 28
targetSdkVersion 25
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
}
}
dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
implementation 'com.android.support.test:runner:0.5'
implementation 'com.android.support.test:rules:0.5'
implementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2'
compile fileTree(include: ['*.jar'], dir: 'libs')
compile 'com.android.support.test:runner:0.5'
compile 'com.android.support.test:rules:0.5'
compile 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2'
}

@ -45,7 +45,7 @@ public class BaseUiAutomation {
public enum FindByCriteria { BY_ID, BY_TEXT, BY_DESC };
public enum Direction { UP, DOWN, LEFT, RIGHT, NULL };
public enum ScreenOrientation { RIGHT, NATURAL, LEFT, PORTRAIT, LANDSCAPE };
public enum ScreenOrientation { RIGHT, NATURAL, LEFT };
public enum PinchType { IN, OUT, NULL };
// Time in milliseconds
@ -176,8 +176,6 @@ public class BaseUiAutomation {
}
public void setScreenOrientation(ScreenOrientation orientation) throws Exception {
int width = mDevice.getDisplayWidth();
int height = mDevice.getDisplayHeight();
switch (orientation) {
case RIGHT:
mDevice.setOrientationRight();
@ -188,30 +186,6 @@ public class BaseUiAutomation {
case LEFT:
mDevice.setOrientationLeft();
break;
case LANDSCAPE:
if (mDevice.isNaturalOrientation()){
if (height > width){
mDevice.setOrientationRight();
}
}
else {
if (height > width){
mDevice.setOrientationNatural();
}
}
break;
case PORTRAIT:
if (mDevice.isNaturalOrientation()){
if (height < width){
mDevice.setOrientationRight();
}
}
else {
if (height < width){
mDevice.setOrientationNatural();
}
}
break;
default:
throw new Exception("No orientation specified");
}
@ -573,29 +547,9 @@ public class BaseUiAutomation {
}
}
// If an an app is not designed for running on the latest version of android
// (currently Q) an additional screen can popup asking to confirm permissions.
public void dismissAndroidPermissionPopup() throws Exception {
UiObject permissionAccess =
mDevice.findObject(new UiSelector().textMatches(
".*Choose what to allow .* to access"));
UiObject continueButton =
mDevice.findObject(new UiSelector().resourceId("com.android.permissioncontroller:id/continue_button")
.textContains("Continue"));
if (permissionAccess.exists() && continueButton.exists()) {
continueButton.click();
}
}
// If an an app is not designed for running on the latest version of android
// (currently Q) dissmiss the warning popup if present.
public void dismissAndroidVersionPopup() throws Exception {
// Ensure we have dissmied any permission screens before looking for the version popup
dismissAndroidPermissionPopup();
UiObject warningText =
mDevice.findObject(new UiSelector().textContains(
"This app was built for an older version of Android"));
@ -608,29 +562,6 @@ public class BaseUiAutomation {
}
// If Chrome is a fresh install then these popups may be presented
// dismiss them if visible.
public void dismissChromePopup() throws Exception {
UiObject accept =
mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/terms_accept")
.className("android.widget.Button"));
if (accept.waitForExists(3000)){
accept.click();
UiObject negative =
mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/negative_button")
.className("android.widget.Button"));
if (negative.waitForExists(10000)) {
negative.click();
}
}
UiObject lite =
mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/button_secondary")
.className("android.widget.Button"));
if (lite.exists()){
lite.click();
}
}
// Override getParams function to decode a url encoded parameter bundle before
// passing it to workloads.
public Bundle getParams() {

@ -3,10 +3,9 @@
buildscript {
repositories {
jcenter()
google()
}
dependencies {
classpath 'com.android.tools.build:gradle:7.2.1'
classpath 'com.android.tools.build:gradle:2.3.2'
// NOTE: Do not place your application dependencies here; they belong
@ -17,7 +16,6 @@ buildscript {
allprojects {
repositories {
jcenter()
google()
}
}

@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-3.3-all.zip

Binary file not shown.

@ -19,23 +19,15 @@ from collections import namedtuple
from subprocess import Popen, PIPE
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev'])
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision'])
version = VersionTuple(3, 4, 0, 'dev1')
required_devlib_version = VersionTuple(1, 4, 0, 'dev3')
def format_version(v):
version_string = '{}.{}.{}'.format(
v.major, v.minor, v.revision)
if v.dev:
version_string += '.{}'.format(v.dev)
return version_string
version = VersionTuple(3, 1, 0)
def get_wa_version():
return format_version(version)
version_string = '{}.{}.{}'.format(
version.major, version.minor, version.revision)
return version_string
def get_wa_version_with_commit():
@ -48,13 +40,13 @@ def get_wa_version_with_commit():
def get_commit():
try:
p = Popen(['git', 'rev-parse', 'HEAD'],
cwd=os.path.dirname(__file__), stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
return None
p = Popen(['git', 'rev-parse', 'HEAD'],
cwd=os.path.dirname(__file__), stdout=PIPE, stderr=PIPE)
std, _ = p.communicate()
p.wait()
if p.returncode:
return None
return std[:8].decode(sys.stdout.encoding or 'utf-8')
if sys.version_info[0] == 3 and isinstance(std, bytes):
return std[:8].decode(sys.stdout.encoding or 'utf-8')
else:
return std[:8]

@ -1,4 +1,4 @@
# Copyright 2014-2019 ARM Limited
# Copyright 2014-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -14,25 +14,17 @@
#
import logging
import os
import threading
import time
try:
from shlex import quote
except ImportError:
from pipes import quote
from devlib.utils.android import ApkInfo
from wa.utils.android import get_cacheable_apk_info, build_apk_launch_command
from wa.framework.plugin import TargetedPlugin, Parameter
from wa.framework.resource import (ApkFile, ReventFile,
File, loose_version_matching,
range_version_matching)
File, loose_version_matching)
from wa.framework.exception import WorkloadError, ConfigError
from wa.utils.types import ParameterDict, list_or_string, version_tuple
from wa.utils.types import ParameterDict
from wa.utils.revent import ReventRecorder
from wa.utils.exec_control import once_per_instance
from wa.utils.misc import atomic_write_path
class Workload(TargetedPlugin):
@ -45,12 +37,14 @@ class Workload(TargetedPlugin):
kind = 'workload'
parameters = [
Parameter('uninstall', kind=bool,
Parameter('cleanup_assets', kind=bool,
global_alias='cleanup_assets',
aliases=['clean_up'],
default=True,
description="""
If ``True``, executables that are installed to the device
as part of the workload will be uninstalled again.
"""),
If ``True``, if assets are deployed as part of the workload they
will be removed again from the device as part of finalize.
""")
]
# Set this to True to mark that this workload poses a risk of exposing
@ -79,7 +73,7 @@ class Workload(TargetedPlugin):
supported_platforms = getattr(self, 'supported_platforms', [])
if supported_platforms and self.target.os not in supported_platforms:
msg = 'Supported platforms for "{}" are "{}", attempting to run on "{}"'
msg = 'Supported platforms for "{}" are "{}", attemping to run on "{}"'
raise WorkloadError(msg.format(self.name, ' '.join(self.supported_platforms),
self.target.os))
@ -124,11 +118,13 @@ class Workload(TargetedPlugin):
Execute the workload. This is the method that performs the actual
"work" of the workload.
"""
pass
def extract_results(self, context):
"""
Extract results on the target
"""
pass
def update_output(self, context):
"""
@ -136,9 +132,11 @@ class Workload(TargetedPlugin):
metrics and artifacts for this workload iteration.
"""
pass
def teardown(self, context):
""" Perform any final clean up for the Workload. """
pass
@once_per_instance
def finalize(self, context):
@ -176,11 +174,8 @@ class ApkWorkload(Workload):
# Times are in seconds
loading_time = 10
package_names = []
supported_versions = []
activity = None
view = None
clear_data_on_reset = True
apk_arguments = {}
# Set this to True to mark that this workload requires the target apk to be run
# for initialisation purposes before the main run is performed.
@ -203,16 +198,6 @@ class ApkWorkload(Workload):
description="""
The version of the package to be used.
"""),
Parameter('max_version', kind=str,
default=None,
description="""
The maximum version of the package to be used.
"""),
Parameter('min_version', kind=str,
default=None,
description="""
The minimum version of the package to be used.
"""),
Parameter('variant', kind=str,
default=None,
description="""
@ -232,7 +217,6 @@ class ApkWorkload(Workload):
"""),
Parameter('uninstall', kind=bool,
default=False,
override=True,
description="""
If ``True``, will uninstall workload\'s APK as part of teardown.'
"""),
@ -251,12 +235,6 @@ class ApkWorkload(Workload):
will fall back to the version on the target if available. If
``False`` then the version on the target is preferred instead.
"""),
Parameter('view', kind=str, default=None, merge=True,
description="""
Manually override the 'View' of the workload for use with
instruments such as the ``fps`` instrument. If not specified,
a workload dependant 'View' will be automatically generated.
"""),
]
@property
@ -271,41 +249,22 @@ class ApkWorkload(Workload):
raise ConfigError('Target does not appear to support Android')
super(ApkWorkload, self).__init__(target, **kwargs)
if self.activity is not None and '.' not in self.activity:
# If we're receiving just the activity name, it's taken relative to
# the package namespace:
self.activity = '.' + self.activity
self.apk = PackageHandler(self,
package_name=self.package_name,
variant=self.variant,
strict=self.strict,
version=self.version or self.supported_versions,
version=self.version,
force_install=self.force_install,
install_timeout=self.install_timeout,
uninstall=self.uninstall,
exact_abi=self.exact_abi,
prefer_host_package=self.prefer_host_package,
clear_data_on_reset=self.clear_data_on_reset,
activity=self.activity,
min_version=self.min_version,
max_version=self.max_version,
apk_arguments=self.apk_arguments)
def validate(self):
if self.min_version and self.max_version:
if version_tuple(self.min_version) > version_tuple(self.max_version):
msg = 'Cannot specify min version ({}) greater than max version ({})'
raise ConfigError(msg.format(self.min_version, self.max_version))
clear_data_on_reset=self.clear_data_on_reset)
@once_per_instance
def initialize(self, context):
super(ApkWorkload, self).initialize(context)
self.apk.initialize(context)
# pylint: disable=access-member-before-definition, attribute-defined-outside-init
if self.version is None:
self.version = self.apk.apk_info.version_name
if self.view is None:
self.view = 'SurfaceView - {}/{}'.format(self.apk.package,
self.apk.activity)
@ -323,6 +282,7 @@ class ApkWorkload(Workload):
Perform the setup necessary to rerun the workload. Only called if
``requires_rerun`` is set.
"""
pass
def teardown(self, context):
super(ApkWorkload, self).teardown(context)
@ -367,8 +327,7 @@ class ApkUIWorkload(ApkWorkload):
@once_per_instance
def finalize(self, context):
super(ApkUIWorkload, self).finalize(context)
if self.cleanup_assets:
self.gui.remove()
self.gui.remove()
class ApkUiautoWorkload(ApkUIWorkload):
@ -406,6 +365,7 @@ class ApkReventWorkload(ApkUIWorkload):
def __init__(self, target, **kwargs):
super(ApkReventWorkload, self).__init__(target, **kwargs)
self.apk = PackageHandler(self)
self.gui = ReventGUI(self, target,
self.setup_timeout,
self.run_timeout,
@ -447,8 +407,7 @@ class UIWorkload(Workload):
@once_per_instance
def finalize(self, context):
super(UIWorkload, self).finalize(context)
if self.cleanup_assets:
self.gui.remove()
self.gui.remove()
class UiautoWorkload(UIWorkload):
@ -520,7 +479,7 @@ class UiAutomatorGUI(object):
def init_resources(self, resolver):
self.uiauto_file = resolver.get(ApkFile(self.owner, uiauto=True))
if not self.uiauto_package:
uiauto_info = get_cacheable_apk_info(self.uiauto_file)
uiauto_info = ApkInfo(self.uiauto_file)
self.uiauto_package = uiauto_info.package
def init_commands(self):
@ -644,12 +603,12 @@ class ReventGUI(object):
if self.revent_teardown_file:
self.revent_recorder.replay(self.on_target_teardown_revent,
timeout=self.teardown_timeout)
def remove(self):
self.target.remove(self.on_target_setup_revent)
self.target.remove(self.on_target_run_revent)
self.target.remove(self.on_target_extract_results_revent)
self.target.remove(self.on_target_teardown_revent)
def remove(self):
self.revent_recorder.remove()
def _check_revent_files(self):
@ -678,24 +637,18 @@ class PackageHandler(object):
@property
def activity(self):
if self._activity:
return self._activity
if self.apk_info is None:
return None
return self.apk_info.activity
# pylint: disable=too-many-locals
def __init__(self, owner, install_timeout=300, version=None, variant=None,
package_name=None, strict=False, force_install=False, uninstall=False,
exact_abi=False, prefer_host_package=True, clear_data_on_reset=True,
activity=None, min_version=None, max_version=None, apk_arguments=None):
exact_abi=False, prefer_host_package=True, clear_data_on_reset=True):
self.logger = logging.getLogger('apk')
self.owner = owner
self.target = self.owner.target
self.install_timeout = install_timeout
self.version = version
self.min_version = min_version
self.max_version = max_version
self.variant = variant
self.package_name = package_name
self.strict = strict
@ -704,21 +657,18 @@ class PackageHandler(object):
self.exact_abi = exact_abi
self.prefer_host_package = prefer_host_package
self.clear_data_on_reset = clear_data_on_reset
self._activity = activity
self.supported_abi = self.target.supported_abi
self.apk_file = None
self.apk_info = None
self.apk_version = None
self.logcat_log = None
self.error_msg = None
self.apk_arguments = apk_arguments
def initialize(self, context):
self.resolve_package(context)
def setup(self, context):
context.update_metadata('app_version', self.apk_info.version_name)
context.update_metadata('app_name', self.apk_info.package)
self.initialize_package(context)
self.start_activity()
self.target.execute('am kill-all') # kill all *background* activities
@ -740,7 +690,7 @@ class PackageHandler(object):
self.resolve_package_from_host(context)
if self.apk_file:
self.apk_info = get_cacheable_apk_info(self.apk_file)
self.apk_info = ApkInfo(self.apk_file)
else:
if self.error_msg:
raise WorkloadError(self.error_msg)
@ -764,9 +714,7 @@ class PackageHandler(object):
version=self.version,
package=self.package_name,
exact_abi=self.exact_abi,
supported_abi=self.supported_abi,
min_version=self.min_version,
max_version=self.max_version),
supported_abi=self.supported_abi),
strict=self.strict)
else:
available_packages = []
@ -776,57 +724,47 @@ class PackageHandler(object):
version=self.version,
package=package,
exact_abi=self.exact_abi,
supported_abi=self.supported_abi,
min_version=self.min_version,
max_version=self.max_version),
supported_abi=self.supported_abi),
strict=self.strict)
if apk_file:
available_packages.append(apk_file)
if len(available_packages) == 1:
self.apk_file = available_packages[0]
elif len(available_packages) > 1:
self.error_msg = self._get_package_error_msg('host')
msg = 'Multiple matching packages found for "{}" on host: {}'
self.error_msg = msg.format(self.owner, available_packages)
def resolve_package_from_target(self): # pylint: disable=too-many-branches
self.logger.debug('Resolving package on target')
found_package = None
if self.package_name:
if not self.target.package_is_installed(self.package_name):
return
else:
installed_versions = [self.package_name]
else:
installed_versions = []
for package in self.owner.package_names:
if self.target.package_is_installed(package):
installed_versions.append(package)
if self.version or self.min_version or self.max_version:
matching_packages = []
for package in installed_versions:
package_version = self.target.get_package_version(package)
if self.version:
for v in list_or_string(self.version):
if loose_version_matching(v, package_version):
matching_packages.append(package)
else:
if range_version_matching(package_version, self.min_version,
self.max_version):
if self.version:
matching_packages = []
for package in installed_versions:
package_version = self.target.get_package_version(package)
if loose_version_matching(self.version, package_version):
matching_packages.append(package)
if len(matching_packages) == 1:
self.package_name = matching_packages[0]
elif len(matching_packages) > 1:
msg = 'Multiple matches for version "{}" found on device.'
self.error_msg = msg.format(self.version)
else:
if len(installed_versions) == 1:
self.package_name = installed_versions[0]
elif len(installed_versions) > 1:
self.error_msg = 'Package version not set and multiple versions found on device.'
if len(matching_packages) == 1:
found_package = matching_packages[0]
elif len(matching_packages) > 1:
self.error_msg = self._get_package_error_msg('device')
else:
if len(installed_versions) == 1:
found_package = installed_versions[0]
elif len(installed_versions) > 1:
self.error_msg = 'Package version not set and multiple versions found on device.'
if found_package:
if self.package_name:
self.logger.debug('Found matching package on target; Pulling to host.')
self.apk_file = self.pull_apk(found_package)
self.package_name = found_package
self.apk_file = self.pull_apk(self.package_name)
def initialize_package(self, context):
installed_version = self.target.get_package_version(self.apk_info.package)
@ -856,10 +794,11 @@ class PackageHandler(object):
self.apk_version = host_version
def start_activity(self):
cmd = build_apk_launch_command(self.apk_info.package, self.activity,
self.apk_arguments)
if not self.apk_info.activity:
cmd = 'am start -W {}'.format(self.apk_info.package)
else:
cmd = 'am start -W -n {}/{}'.format(self.apk_info.package,
self.apk_info.activity)
output = self.target.execute(cmd)
if 'Error:' in output:
# this will dismiss any error dialogs
@ -894,93 +833,12 @@ class PackageHandler(object):
message = 'Cannot retrieve "{}" as not installed on Target'
raise WorkloadError(message.format(package))
package_info = self.target.get_package_info(package)
apk_name = self._get_package_name(package_info.apk_path)
host_path = os.path.join(self.owner.dependencies_directory, apk_name)
with atomic_write_path(host_path) as at_path:
self.target.pull(package_info.apk_path, at_path,
timeout=self.install_timeout)
return host_path
self.target.pull(package_info.apk_path, self.owner.dependencies_directory,
timeout=self.install_timeout)
apk_name = self.target.path.basename(package_info.apk_path)
return os.path.join(self.owner.dependencies_directory, apk_name)
def teardown(self):
self.target.execute('am force-stop {}'.format(self.apk_info.package))
if self.uninstall:
self.target.uninstall_package(self.apk_info.package)
def _get_package_name(self, apk_path):
return self.target.path.basename(apk_path)
def _get_package_error_msg(self, location):
if self.version:
msg = 'Multiple matches for "{version}" found on {location}.'
elif self.min_version and self.max_version:
msg = 'Multiple matches between versions "{min_version}" and "{max_version}" found on {location}.'
elif self.max_version:
msg = 'Multiple matches less than or equal to "{max_version}" found on {location}.'
elif self.min_version:
msg = 'Multiple matches greater or equal to "{min_version}" found on {location}.'
else:
msg = ''
return msg.format(version=self.version, min_version=self.min_version,
max_version=self.max_version, location=location)
class TestPackageHandler(PackageHandler):
"""Class wrapping an APK used through ``am instrument``.
"""
def __init__(self, owner, instrument_args=None, raw_output=False,
instrument_wait=True, no_hidden_api_checks=False,
*args, **kwargs):
if instrument_args is None:
instrument_args = {}
super(TestPackageHandler, self).__init__(owner, *args, **kwargs)
self.raw = raw_output
self.args = instrument_args
self.wait = instrument_wait
self.no_checks = no_hidden_api_checks
self.cmd = ''
self.instrument_thread = None
self._instrument_output = None
def setup(self, context):
self.initialize_package(context)
words = ['am', 'instrument', '--user', '0']
if self.raw:
words.append('-r')
if self.wait:
words.append('-w')
if self.no_checks:
words.append('--no-hidden-api-checks')
for k, v in self.args.items():
words.extend(['-e', str(k), str(v)])
words.append(str(self.apk_info.package))
if self.apk_info.activity:
words[-1] += '/{}'.format(self.apk_info.activity)
self.cmd = ' '.join(quote(x) for x in words)
self.instrument_thread = threading.Thread(target=self._start_instrument)
def start_activity(self):
self.instrument_thread.start()
def wait_instrument_over(self):
self.instrument_thread.join()
if 'Error:' in self._instrument_output:
cmd = 'am force-stop {}'.format(self.apk_info.package)
self.target.execute(cmd)
raise WorkloadError(self._instrument_output)
def _start_instrument(self):
self._instrument_output = self.target.execute(self.cmd)
self.logger.debug(self._instrument_output)
def _get_package_name(self, apk_path):
return 'test_{}'.format(self.target.path.basename(apk_path))
@property
def instrument_output(self):
if self.instrument_thread.is_alive():
self.instrument_thread.join() # writes self._instrument_output
return self._instrument_output

@ -20,7 +20,6 @@ import time
from wa import Instrument, Parameter
from wa.framework.exception import ConfigError, InstrumentError
from wa.framework.instrument import extremely_slow
from wa.utils.types import identifier
class DelayInstrument(Instrument):
@ -33,7 +32,7 @@ class DelayInstrument(Instrument):
The delay may be specified as either a fixed period or a temperature
threshold that must be reached.
Optionally, if an active cooling solution is available on the device to
Optionally, if an active cooling solution is available on the device tqgitq
speed up temperature drop between runs, it may be controlled using this
instrument.
@ -201,16 +200,16 @@ class DelayInstrument(Instrument):
reading = self.target.read_int(self.temperature_file)
def validate(self):
if (self.temperature_between_specs is not None
and self.fixed_between_specs is not None):
if (self.temperature_between_specs is not None and
self.fixed_between_specs is not None):
raise ConfigError('Both fixed delay and thermal threshold specified for specs.')
if (self.temperature_between_jobs is not None
and self.fixed_between_jobs is not None):
if (self.temperature_between_jobs is not None and
self.fixed_between_jobs is not None):
raise ConfigError('Both fixed delay and thermal threshold specified for jobs.')
if (self.temperature_before_start is not None
and self.fixed_before_start is not None):
if (self.temperature_before_start is not None and
self.fixed_before_start is not None):
raise ConfigError('Both fixed delay and thermal threshold specified before start.')
if not any([self.temperature_between_specs, self.fixed_between_specs,
@ -223,7 +222,7 @@ class DelayInstrument(Instrument):
for module in self.active_cooling_modules:
if self.target.has(module):
if not cooling_module:
cooling_module = getattr(self.target, identifier(module))
cooling_module = getattr(self.target, module)
else:
msg = 'Multiple cooling modules found "{}" "{}".'
raise InstrumentError(msg.format(cooling_module.name, module))

Some files were not shown because too many files have changed in this diff Show More