mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-07-11 17:43:35 +01:00
Compare commits
119 Commits
Author | SHA1 | Date | |
---|---|---|---|
a826b661f4 | |||
43f4e52995 | |||
23b3b165d5 | |||
2f87e126f0 | |||
59d74b6273 | |||
7b92f355c8 | |||
982069be32 | |||
63ff8987ea | |||
f276d4e39f | |||
1811a8b733 | |||
0ae03e2c54 | |||
c423a8b4bc | |||
c207a34872 | |||
2cb40d3da6 | |||
18d1f9f649 | |||
17ce8d0fe9 | |||
ac03c9bab4 | |||
8bdffe6f9c | |||
2ff13089fd | |||
772346507c | |||
0fc88a84be | |||
6e4f6af942 | |||
c87daa510e | |||
5e1c9694e7 | |||
a9a42164a3 | |||
0d50fe9b77 | |||
e5c228bab2 | |||
7ccac87b93 | |||
24a2afb5b9 | |||
9652801cce | |||
881b7514e2 | |||
17fe6c9a5b | |||
f02b6d5fd9 | |||
eaf4d02aea | |||
56a4d52995 | |||
ec5c149df5 | |||
c0f32237e3 | |||
5a1c8c7a7e | |||
46cd26e774 | |||
544c498eb6 | |||
5ad75dd0b8 | |||
b2248413b7 | |||
9296bafbd9 | |||
8abf39762d | |||
87cbce4244 | |||
ef61f16896 | |||
e96450d226 | |||
2cf08cf448 | |||
59cfd7c757 | |||
d3c7f11f2d | |||
187fd70077 | |||
fe7f98a98b | |||
66c18fcd31 | |||
5773da0d08 | |||
d581f1f329 | |||
f165969d61 | |||
8dc24bd327 | |||
59066cb46d | |||
6c4d88ff57 | |||
a40542d57b | |||
697aefc7bb | |||
8bc71bb810 | |||
91210f26e9 | |||
44a49db04d | |||
0bfa4bff3c | |||
73aa590056 | |||
985b249a24 | |||
f5e138bed0 | |||
b6c0e2e4fd | |||
df8ef6be6b | |||
8a3186e1c8 | |||
68043f2a52 | |||
95bbce77a2 | |||
ec85f9f8a0 | |||
82e4998092 | |||
48259d872b | |||
8d13e1f341 | |||
33ef949507 | |||
68714e0e55 | |||
9ee1666a76 | |||
8dcdc9afe1 | |||
724f6e590e | |||
507090515b | |||
1dfbe9e44c | |||
d303ab2b50 | |||
b17ae78d6b | |||
391b0b01fc | |||
20861f0ee4 | |||
ff5f48b7e7 | |||
9a301175b0 | |||
712c79020d | |||
12dfbef76b | |||
b1f607ef70 | |||
107e8414bb | |||
4f8b7e9f59 | |||
a077e7df3c | |||
a2257fe1e2 | |||
50353d0b8f | |||
0f5621ff66 | |||
2eca77fb02 | |||
3de5b5fe0b | |||
499a9f4082 | |||
3043506d86 | |||
7db904b359 | |||
5abeb7aac2 | |||
e04691afb9 | |||
15ced50640 | |||
1a2e1fdf75 | |||
3531dd6d07 | |||
cf55f317f8 | |||
79554a2dbc | |||
06c232545a | |||
11184750ec | |||
77b221fc5a | |||
20cd6a9c18 | |||
34d7e7055a | |||
0c1e01cad4 | |||
a68e46eb0a | |||
203a3f7d07 |
.github
.gitignore.readthedocs.ymlMANIFEST.inREADME.rstdev_scripts
doc
Makefilebuild_extension_docs.pybuild_instrumentation_method_map.pybuild_plugin_docs.pymake.batrequirements.txt
source
WA-logo-white.svg
_static
_templates
additional_topics.rstagenda.rstapi.rstapi
changes.rstconf.pyconfiguration.rstcontributing.rstconventions.rstdaq-wiring.pngdaq_device_setup.rstdeveloper_information.rstdeveloper_information
developer_guide.rst
device_setup.rstexecution_model.rstfaq.rstglossary.rstindex.rstinstallation.rstinstrument_method_map.templateinstrumentation_method_map.rstinstrumentation_method_map.templateinvocation.rstmigration_guide.rstplugins.rstquickstart.rstresources.rstrevent.rstuser_information.rstdeveloper_guide
developer_reference.rstdeveloper_reference
WA_Execution.svgWA_Signal_Dispatch.svgcontributing.rstframework_overview.rstplugins.rstrevent.rstserialization.rst
how_to.rsthow_tos
user_information
wa-execution.pngwriting_extensions.rstextras
pytest.inirequirements.txtscripts
setup.pytests
ci
data
bad-syntax-agenda.yaml
test_agenda_parser.pytest_config.pytest_exec_control.pytest_execution.pytest_plugin.pytest_runtime_param_utils.pytest_signal.pytest_utils.pyincludes
wa
__init__.py
assets
commands
create.pylist.py
postgres_schemas
postgres_schema.sqlpostgres_schema_update_v1.2.sqlpostgres_schema_update_v1.3.sqlpostgres_schema_update_v1.4.sqlpostgres_schema_update_v1.5.sqlpostgres_schema_update_v1.6.sql
process.pyreport.pyrevent.pyrun.pyschema_changelog.rstshow.pytemplates
framework
command.py
configuration
entrypoint.pyexecution.pygetters.pyhost.pyjob.pyoutput.pyoutput_processor.pyplugin.pypluginloader.pyresource.pyrun.pysignal.pytarget
uiauto
version.pyworkload.pyinstruments
delay.pyenergy_measurement.pyfps.pyhwmon.pymisc.pyperf.pyperfetto.py
poller
proc_stat
screencap.pyserialmon.pytrace_cmd.pyoutput_processors
tools
revent
utils
workloads
adobereader
__init__.pycom.arm.wa.uiauto.adobereader.apk
uiauto
aitutu
__init__.pycom.arm.wa.uiauto.aitutu.apk
uiauto
androbench
__init__.pycom.arm.wa.uiauto.androbench.apk
uiauto
antutu
__init__.pycom.arm.wa.uiauto.antutu.apk
apache.pyuiauto
applaunch
__init__.pycom.arm.wa.uiauto.applaunch.apk
uiauto
benchmarkpi
com.arm.wa.uiauto.benchmarkpi.apk
uiauto
chrome
__init__.pycom.arm.wa.uiauto.chrome.apk
uiauto
deepbench
dhrystone
drarm
exoplayer
geekbench
__init__.pycom.arm.wa.uiauto.geekbench.apk
uiauto
gfxbench
__init__.pycom.arm.wa.uiauto.gfxbench.apk
uiauto
glbenchmark
gmail
__init__.pycom.arm.wa.uiauto.gmail.apk
uiauto
googlemaps
__init__.pycom.arm.wa.uiauto.googlemaps.apk
uiauto
googlephotos
__init__.pycom.arm.wa.uiauto.googlephotos.apk
uiauto
googleplaybooks
__init__.pycom.arm.wa.uiauto.googleplaybooks.apk
uiauto
googleslides
__init__.pycom.arm.wa.uiauto.googleslides.apk
uiauto
hackbench
honorofkings
idle.pyjankbench
manual
meabo
memcpy
mongoperf
motionmark
__init__.pycom.arm.wa.uiauto.motionmark.apk
uiauto
openssl
pcmark
__init__.pycom.arm.wa.uiauto.pcmark.apk
uiauto
recentfling
rt_app
bin
arm64
armeabi
ppc64le
x86
x86_64
use_cases
schbench
speedometer
stress_ng
sysbench
bin
arm64
uibench
uibenchjanktests
vellamo
youtube
__init__.pycom.arm.wa.uiauto.youtube.apk
uiauto
youtube_playback
wlauto
__init__.pyagenda-example-biglittle.yamlagenda-example-tutorial.yaml
commands
common
config_example.pycore
__init__.pyagenda.pybootstrap.pycommand.pyconfiguration.pydevice.pyentry_point.pyexecution.pyextension.pyextension_loader.pyexttype.pyinstrumentation.pyresolver.pyresource.pyresult.pysignal.pyversion.pyworkload.py
devices
__init__.py
exceptions.pyandroid
linux
external
README
bbench_server
daq_server
louie
LICENSE__init__.pydispatcher.pyerror.pyplugin.pyprioritylist.pyrobustapply.pysaferef.pysender.pysignal.py
test
__init__.pyconftest.pyfixture.pytest_dispatcher.pytest_plugin.pytest_prioritydispatcher.pytest_prioritylist.pytest_robustapply.pytest_saferef.py
version.pypmu_logger
readenergy
revent
sqlite
uiauto
instrumentation
__init__.py
coreutil
daq
delay
dmesg
energy_model
energy_probe
fps
freqsweep
hwmon
juno_energy
misc
netstats
perf
pmu_logger
poller
screenon
servo_power_monitors
streamline
systrace
trace_cmd
modules
resource_getters
result_processors
tests
README__init__.py
data
test_agenda.pytest_config.pytest_device.pytest_diff.pytest_execution.pytest_extension.pytest_extension_loader.pytest_instrumentation.pytest_results_manager.pytest_utils.pytools
utils
__init__.pyandroid.pycli.pycpuinfo.pycros_sdk.pydoc.pyformatter.pyhwmon.pyipython.pylog.pymisc.pynetio.pypower.pyserial_port.pyssh.pyterminalsize.pytrace_cmd.pytypes.pyuboot.pyuefi.py
workloads
__init__.py
andebench
androbench
angrybirds
angrybirds_rio
anomaly2
antutu
apklaunch
applaunch
audio
autotest
bbench
benchmarkpi
blogbench
caffeinemark
cameracapture
camerarecord
castlebuilder
castlemaster
cfbench
citadel
cyclictest
dex2oat
dhrystone
dungeondefenders
ebizzy
facebook
geekbench
glbcorp
glbenchmark
googlemap
gunbros2
hackbench
homescreen
hwuitest
idle
iozone
ironman
krazykart
linpack
linpack_cli
lmbench
manual
memcpy
nenamark
peacekeeper
power_loadtest
quadrant
real_linpack
realracing3
recentfling
rt_app
LICENSE__init__.py
bin
use_cases
browser-long.jsonbrowser-short.jsonmp3-long.jsonmp3-short.jsonspreading-tasks.jsontaskset.jsonvideo-long.jsonvideo-short.json
workgenshellscript
skypevideo
smartbench
spec2000
sqlite
stream
stress_ng
sysbench
telemetry
templerun
thechase
truckerparking3d
vellamo
video
videostreaming
16
.github/ISSUE_TEMPLATE/bug_report.md
vendored
16
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -1,16 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help resolve an issue.
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the issue**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**Run Log**
|
||||
Please attach your `run.log` detailing the issue.
|
||||
|
||||
**Other comments (optional)**
|
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,17 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is.
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the feature request here.
|
10
.github/ISSUE_TEMPLATE/question---support-.md
vendored
10
.github/ISSUE_TEMPLATE/question---support-.md
vendored
@ -1,10 +0,0 @@
|
||||
---
|
||||
name: 'Question / Support '
|
||||
about: Ask a question or reqeust support
|
||||
title: ''
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**
|
11
.github/ISSUE_TEMPLATE/question.md
vendored
11
.github/ISSUE_TEMPLATE/question.md
vendored
@ -1,11 +0,0 @@
|
||||
---
|
||||
name: Question
|
||||
about: Ask a question
|
||||
title: ''
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe you query**
|
||||
What would you like to know / what are you trying to achieve?
|
92
.github/workflows/main.yml
vendored
92
.github/workflows/main.yml
vendored
@ -1,92 +0,0 @@
|
||||
name: WA Test Suite
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
schedule:
|
||||
- cron: 0 2 * * *
|
||||
# Allows runing this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
Run-Linters-and-Tests:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.8.18
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8.18
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .[test]
|
||||
python -m pip install pylint==2.6.2 pep8 flake8 mock nose
|
||||
- name: Run pylint
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE && ./dev_scripts/pylint wa/
|
||||
- name: Run PEP8
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE && ./dev_scripts/pep8 wa
|
||||
- name: Run nose tests
|
||||
run: |
|
||||
nosetests
|
||||
|
||||
Execute-Test-Workload-and-Process:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .
|
||||
- name: Run test workload
|
||||
run: |
|
||||
cd /tmp && wa run $GITHUB_WORKSPACE/tests/ci/idle_agenda.yaml -v -d idle_workload
|
||||
- name: Test Process Command
|
||||
run: |
|
||||
cd /tmp && wa process -f -p csv idle_workload
|
||||
|
||||
Test-WA-Commands:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .
|
||||
- name: Test Show Command
|
||||
run: |
|
||||
wa show dhrystone && wa show generic_android && wa show trace-cmd && wa show csv
|
||||
- name: Test List Command
|
||||
run: |
|
||||
wa list all
|
||||
- name: Test Create Command
|
||||
run: |
|
||||
wa create agenda dhrystone generic_android csv trace_cmd && wa create package test && wa create workload test
|
24
.gitignore
vendored
24
.gitignore
vendored
@ -3,7 +3,6 @@
|
||||
*.bak
|
||||
*.o
|
||||
*.cmd
|
||||
*.iml
|
||||
Module.symvers
|
||||
modules.order
|
||||
*~
|
||||
@ -12,23 +11,20 @@ build/
|
||||
dist/
|
||||
.ropeproject/
|
||||
wa_output/
|
||||
doc/source/plugins/
|
||||
doc/source/api/
|
||||
doc/source/extensions/
|
||||
MANIFEST
|
||||
wlauto/external/uiautomator/bin/
|
||||
wlauto/external/uiautomator/*.properties
|
||||
wlauto/external/uiautomator/build.xml
|
||||
*.orig
|
||||
local.properties
|
||||
wlauto/external/revent/libs/
|
||||
wlauto/external/revent/obj/
|
||||
wlauto/external/bbench_server/libs/
|
||||
wlauto/external/bbench_server/obj/
|
||||
pmu_logger.mod.c
|
||||
.tmp_versions
|
||||
obj/
|
||||
libs/armeabi
|
||||
**/uiauto/**/build/
|
||||
**/uiauto/**/.gradle
|
||||
**/uiauto/**/.idea
|
||||
**/uiauto/**/proguard-rules.pro
|
||||
**/uiauto/app/libs/
|
||||
**/uiauto/*.properties
|
||||
**/uiauto/**/.project
|
||||
**/uiauto/**/.settings
|
||||
**/uiauto/**/.classpath
|
||||
doc/source/developer_information/developer_guide/instrument_method_map.rst
|
||||
doc/source/run_config/
|
||||
.eggs
|
||||
wlauto/workloads/*/uiauto/bin/
|
||||
|
@ -1,28 +0,0 @@
|
||||
# .readthedocs.yml
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
# Required
|
||||
version: 2
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
builder: html
|
||||
configuration: doc/source/conf.py
|
||||
|
||||
# Build the docs in additional formats such as PDF and ePub
|
||||
formats: all
|
||||
|
||||
|
||||
# Configure the build environment
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
|
||||
# Ensure doc dependencies are installed before building
|
||||
python:
|
||||
install:
|
||||
- requirements: doc/requirements.txt
|
||||
- method: pip
|
||||
path: .
|
@ -1,3 +1,2 @@
|
||||
recursive-include scripts *
|
||||
recursive-include doc *
|
||||
recursive-include wa *
|
||||
|
40
README.rst
40
README.rst
@ -2,23 +2,23 @@ Workload Automation
|
||||
+++++++++++++++++++
|
||||
|
||||
Workload Automation (WA) is a framework for executing workloads and collecting
|
||||
measurements on Android and Linux devices. WA includes automation for nearly 40
|
||||
workloads and supports some common instrumentation (ftrace, hwmon) along with a
|
||||
number of output formats.
|
||||
measurements on Android and Linux devices. WA includes automation for nearly 50
|
||||
workloads (mostly Android), some common instrumentation (ftrace, ARM
|
||||
Streamline, hwmon). A number of output formats are supported.
|
||||
|
||||
WA is designed primarily as a developer tool/framework to facilitate data driven
|
||||
development by providing a method of collecting measurements from a device in a
|
||||
repeatable way.
|
||||
Workload Automation is designed primarily as a developer tool/framework to
|
||||
facilitate data driven development by providing a method of collecting
|
||||
measurements from a device in a repeatable way.
|
||||
|
||||
WA is highly extensible. Most of the concrete functionality is implemented via
|
||||
plug-ins, and it is easy to write new plug-ins to support new device types,
|
||||
workloads, instruments or output processing.
|
||||
Workload Automation is highly extensible. Most of the concrete functionality is
|
||||
implemented via plug-ins, and it is easy to write new plug-ins to support new
|
||||
device types, workloads, instrumentation or output processing.
|
||||
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
- Python 3.5+
|
||||
- Python 2.7
|
||||
- Linux (should work on other Unixes, but untested)
|
||||
- Latest Android SDK (ANDROID_HOME must be set) for Android devices, or
|
||||
- SSH for Linux devices
|
||||
@ -29,28 +29,24 @@ Installation
|
||||
|
||||
To install::
|
||||
|
||||
git clone git@github.com:ARM-software/workload-automation.git workload-automation
|
||||
sudo -H python setup [install|develop]
|
||||
python setup.py sdist
|
||||
sudo pip install dist/wlauto-*.tar.gz
|
||||
|
||||
Note: A `requirements.txt` is included however this is designed to be used as a
|
||||
reference for known working versions rather than as part of a standard
|
||||
installation.
|
||||
|
||||
Please refer to the `installation section <http://workload-automation.readthedocs.io/en/latest/user_information.html#install>`_
|
||||
Please refer to the `installation section <./doc/source/installation.rst>`_
|
||||
in the documentation for more details.
|
||||
|
||||
|
||||
Basic Usage
|
||||
===========
|
||||
|
||||
Please see the `Quickstart <http://workload-automation.readthedocs.io/en/latest/user_information.html#user-guide>`_
|
||||
section of the documentation.
|
||||
Please see the `Quickstart <./doc/source/quickstart.rst>`_ section of the
|
||||
documentation.
|
||||
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
You can view pre-built HTML documentation `here <http://workload-automation.readthedocs.io/en/latest/>`_.
|
||||
You can view pre-built HTML documentation `here <http://pythonhosted.org/wlauto/>`_.
|
||||
|
||||
Documentation in reStructuredText format may be found under ``doc/source``. To
|
||||
compile it into cross-linked HTML, make sure you have `Sphinx
|
||||
@ -65,11 +61,11 @@ License
|
||||
|
||||
Workload Automation is distributed under `Apache v2.0 License
|
||||
<http://www.apache.org/licenses/LICENSE-2.0>`_. Workload automation includes
|
||||
binaries distributed under different licenses (see LICENSE files in specific
|
||||
binaries distributed under differnt licenses (see LICENSE files in specfic
|
||||
directories).
|
||||
|
||||
|
||||
Feedback, Contributions and Support
|
||||
Feedback, Contrubutions and Support
|
||||
===================================
|
||||
|
||||
- Please use the GitHub Issue Tracker associated with this repository for
|
||||
|
@ -15,15 +15,9 @@ Scripts
|
||||
:get_apk_versions: Prints out a table of APKs and their versons found under the
|
||||
path specified as the argument.
|
||||
|
||||
:pep8: Runs flake8 (formerly called "pep8") code checker (must be
|
||||
installed) over wa/ with the correct settings for WA.
|
||||
:pep8: Runs pep8 code checker (must be installed) over wlauto with the correct
|
||||
settings for WA.
|
||||
|
||||
:pylint: Runs pylint (must be installed) over wlauto with the correct settings
|
||||
for WA.
|
||||
|
||||
:rebuild_all_uiauto: Rebuild UIAutomator APKs for workloads that have them. This
|
||||
is useful to make sure they're all using the latest
|
||||
uiauto.arr after the latter has been updated.
|
||||
|
||||
:update_copyrights: Checks and updates the year of the copyright in source files,
|
||||
adding a copyright header if it's not already there.
|
||||
|
@ -1,16 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
DEFAULT_DIRS=(
|
||||
wa
|
||||
wlauto
|
||||
wlauto/external/daq_server/src/daqpower
|
||||
)
|
||||
|
||||
EXCLUDE=wa/tests,wa/framework/target/descriptor.py
|
||||
EXCLUDE_COMMA=
|
||||
IGNORE=E501,E265,E266,W391,E401,E402,E731,W503,W605,F401
|
||||
EXCLUDE=wlauto/external/,wlauto/tests
|
||||
EXCLUDE_COMMA=wlauto/core/bootstrap.py,wlauto/workloads/geekbench/__init__.py
|
||||
IGNORE=E501,E265,E266,W391,E401,E402,E731
|
||||
|
||||
if ! hash flake8 2>/dev/null; then
|
||||
echo "flake8 not found in PATH"
|
||||
echo "you can install it with \"sudo pip install flake8\""
|
||||
if ! hash pep8 2>/dev/null; then
|
||||
echo "pep8 not found in PATH"
|
||||
echo "you can install it with \"sudo pip install pep8\""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -18,11 +19,11 @@ if [[ "$1" == "" ]]; then
|
||||
THIS_DIR="`dirname \"$0\"`"
|
||||
pushd $THIS_DIR/.. > /dev/null
|
||||
for dir in "${DEFAULT_DIRS[@]}"; do
|
||||
flake8 --exclude=$EXCLUDE,$EXCLUDE_COMMA --ignore=$IGNORE $dir
|
||||
pep8 --exclude=$EXCLUDE,$EXCLUDE_COMMA --ignore=$IGNORE $dir
|
||||
done
|
||||
flake8 --exclude=$EXCLUDE --ignore=$IGNORE,E241 $(echo "$EXCLUDE_COMMA" | sed 's/,/ /g')
|
||||
pep8 --exclude=$EXCLUDE --ignore=$IGNORE,E241 $(echo "$EXCLUDE_COMMA" | sed 's/,/ /g')
|
||||
popd > /dev/null
|
||||
else
|
||||
flake8 --exclude=$EXCLUDE,$EXCLUDE_COMMA --ignore=$IGNORE $1
|
||||
pep8 --exclude=$EXCLUDE,$EXCLUDE_COMMA --ignore=$IGNORE $1
|
||||
fi
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
DEFAULT_DIRS=(
|
||||
wa
|
||||
wlauto
|
||||
wlauto/external/daq_server/src/daqpower
|
||||
)
|
||||
|
||||
target=$1
|
||||
@ -32,34 +34,21 @@ compare_versions() {
|
||||
return 0
|
||||
}
|
||||
|
||||
pylint_version=$(python -c 'from pylint.__pkginfo__ import version; print(version)' 2>/dev/null)
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
pylint_version=$(python3 -c 'from pylint.__pkginfo__ import version; print(version)' 2>/dev/null)
|
||||
fi
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
pylint_version=$(python3 -c 'from pylint import version; print(version)' 2>/dev/null)
|
||||
fi
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
echo "ERROR: no pylint verison found; is it installed?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
compare_versions $pylint_version "1.9.2"
|
||||
pylint_version=$(python -c 'from pylint.__pkginfo__ import version; print version')
|
||||
compare_versions $pylint_version "1.5.1"
|
||||
result=$?
|
||||
if [ "$result" == "2" ]; then
|
||||
echo "ERROR: pylint version must be at least 1.9.2; found $pylint_version"
|
||||
echo "ERROR: pylint version must be at least 1.5.1; found $pylint_version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -e
|
||||
THIS_DIR="`dirname \"$0\"`"
|
||||
CWD=$PWD
|
||||
pushd $THIS_DIR > /dev/null
|
||||
if [[ "$target" == "" ]]; then
|
||||
pushd $THIS_DIR/.. > /dev/null
|
||||
for dir in "${DEFAULT_DIRS[@]}"; do
|
||||
PYTHONPATH=. pylint --rcfile ../extras/pylintrc --load-plugins pylint_plugins ../$dir
|
||||
pylint --rcfile extras/pylintrc $dir
|
||||
done
|
||||
popd > /dev/null
|
||||
else
|
||||
PYTHONPATH=. pylint --rcfile ../extras/pylintrc --load-plugins pylint_plugins $CWD/$target
|
||||
pylint --rcfile $THIS_DIR/../extras/pylintrc $target
|
||||
fi
|
||||
popd > /dev/null
|
||||
|
@ -1,48 +0,0 @@
|
||||
import sys
|
||||
|
||||
from astroid import MANAGER
|
||||
from astroid import scoped_nodes
|
||||
|
||||
|
||||
IGNORE_ERRORS = {
|
||||
('attribute-defined-outside-init', ): [
|
||||
'wa.workloads',
|
||||
'wa.instruments',
|
||||
'wa.output_procesors',
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def register(linter):
|
||||
pass
|
||||
|
||||
|
||||
def transform(mod):
|
||||
for errors, paths in IGNORE_ERRORS.items():
|
||||
for path in paths:
|
||||
if path in mod.name:
|
||||
text = mod.stream().read()
|
||||
if not text.strip():
|
||||
return
|
||||
|
||||
text = text.split(b'\n')
|
||||
# NOTE: doing it this way because the "correct" approach below does not
|
||||
# work. We can get away with this, because in well-formated WA files,
|
||||
# the initial line is the copyright header's blank line.
|
||||
if b'pylint:' in text[0]:
|
||||
msg = 'pylint directive found on the first line of {}; please move to below copyright header'
|
||||
raise RuntimeError(msg.format(mod.name))
|
||||
char = chr(text[0][0])
|
||||
if text[0].strip() and char != '#':
|
||||
msg = 'first line of {} is not a comment; is the copyright header missing?'
|
||||
raise RuntimeError(msg.format(mod.name))
|
||||
text[0] = '# pylint: disable={}'.format(','.join(errors)).encode('utf-8')
|
||||
mod.file_bytes = b'\n'.join(text)
|
||||
|
||||
# This is what *should* happen, but doesn't work.
|
||||
# text.insert(0, '# pylint: disable=attribute-defined-outside-init')
|
||||
# mod.file_bytes = '\n'.join(text)
|
||||
# mod.tolineno += 1
|
||||
|
||||
|
||||
MANAGER.register_transform(scoped_nodes.Module, transform)
|
@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script rebuilds all uiauto APKs as well as the base uiauto.arr. This is
|
||||
# useful when changes have been made to the base uiautomation classes and so
|
||||
# all automation needs to be rebuilt to link against the updated uiauto.arr.
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
BASE_DIR="$SCRIPT_DIR/../wa/framework/uiauto"
|
||||
WORKLOADS_DIR="$SCRIPT_DIR/../wa/workloads"
|
||||
|
||||
pushd $BASE_DIR > /dev/null
|
||||
echo "building $(pwd)"
|
||||
./build.sh
|
||||
popd > /dev/null
|
||||
|
||||
for uiauto_dir in $(find $WORKLOADS_DIR -type d -name uiauto); do
|
||||
pushd $uiauto_dir > /dev/null
|
||||
if [ -f build.sh ]; then
|
||||
echo "building $(pwd)"
|
||||
./build.sh
|
||||
fi
|
||||
popd > /dev/null
|
||||
done
|
@ -1,212 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Script to put copyright headers into source files.
|
||||
#
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import string
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
|
||||
SOURCE_EXTENSIONS = {
|
||||
'.py': ('#', '#', '#'),
|
||||
'.sh': ('#', '#', '#'),
|
||||
'.java': ('/*', '*/', ' *'),
|
||||
'.c': ('/*', '*/', ' *'),
|
||||
'.h': ('/*', '*/', ' *'),
|
||||
'.cpp': ('/*', '*/', ' *'),
|
||||
}
|
||||
|
||||
OLD_HEADER_TEMPLATE = string.Template(
|
||||
"""${begin_symbol} $$Copyright:
|
||||
${symbol} ----------------------------------------------------------------
|
||||
${symbol} This confidential and proprietary software may be used only as
|
||||
${symbol} authorised by a licensing agreement from ARM Limited
|
||||
${symbol} (C) COPYRIGHT ${year} ARM Limited
|
||||
${symbol} ALL RIGHTS RESERVED
|
||||
${symbol} The entire notice above must be reproduced on all authorised
|
||||
${symbol} copies and copies may only be made to the extent permitted
|
||||
${symbol} by a licensing agreement from ARM Limited.
|
||||
${symbol} ----------------------------------------------------------------
|
||||
${symbol} File: ${file}
|
||||
${symbol} ----------------------------------------------------------------
|
||||
${symbol} $$
|
||||
${end_symbol}
|
||||
"""
|
||||
)
|
||||
|
||||
HEADER_TEMPLATE = string.Template(
|
||||
"""${begin_symbol} Copyright ${year} ARM Limited
|
||||
${symbol}
|
||||
${symbol} Licensed under the Apache License, Version 2.0 (the "License");
|
||||
${symbol} you may not use this file except in compliance with the License.
|
||||
${symbol} You may obtain a copy of the License at
|
||||
${symbol}
|
||||
${symbol} http://www.apache.org/licenses/LICENSE-2.0
|
||||
${symbol}
|
||||
${symbol} Unless required by applicable law or agreed to in writing, software
|
||||
${symbol} distributed under the License is distributed on an "AS IS" BASIS,
|
||||
${symbol} WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
${symbol} See the License for the specific language governing permissions and
|
||||
${symbol} limitations under the License.
|
||||
${end_symbol}
|
||||
"""
|
||||
)
|
||||
|
||||
# Minimum length, in characters, of a copy right header.
|
||||
MIN_HEADER_LENGTH = 500
|
||||
|
||||
OLD_COPYRIGHT_REGEX = re.compile(r'\(C\) COPYRIGHT\s+(?:(\d+)-)?(\d+)')
|
||||
COPYRIGHT_REGEX = re.compile(r'Copyright\s+(?:(\d+)\s*[-,]\s*)?(\d+) ARM Limited')
|
||||
|
||||
DEFAULT_EXCLUDE_PATHS = [
|
||||
os.path.join('wa', 'commands', 'templates'),
|
||||
]
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(levelname)-8s %(message)s')
|
||||
|
||||
|
||||
def remove_old_copyright(filepath):
|
||||
begin_symbol, end_symbol, symbol = SOURCE_EXTENSIONS[ext.lower()]
|
||||
header = HEADER_TEMPLATE.substitute(begin_symbol=begin_symbol,
|
||||
end_symbol=end_symbol,
|
||||
symbol=symbol,
|
||||
year='0',
|
||||
file=os.path.basename(filepath))
|
||||
header_line_count = len(header.splitlines())
|
||||
with open(filepath) as fh:
|
||||
lines = fh.readlines()
|
||||
for i, line in enumerate(lines):
|
||||
if OLD_COPYRIGHT_REGEX.search(line):
|
||||
start_line = i -4
|
||||
break
|
||||
lines = lines[0:start_line] + lines[start_line + header_line_count:]
|
||||
return ''.join(lines)
|
||||
|
||||
|
||||
def add_copyright_header(filepath, year):
|
||||
_, ext = os.path.splitext(filepath)
|
||||
begin_symbol, end_symbol, symbol = SOURCE_EXTENSIONS[ext.lower()]
|
||||
with open(filepath) as fh:
|
||||
text = fh.read()
|
||||
match = OLD_COPYRIGHT_REGEX.search(text)
|
||||
if match:
|
||||
_, year = update_year(text, year, copyright_regex=OLD_COPYRIGHT_REGEX)
|
||||
text = remove_old_copyright(filepath)
|
||||
header = HEADER_TEMPLATE.substitute(begin_symbol=begin_symbol,
|
||||
end_symbol=end_symbol,
|
||||
symbol=symbol,
|
||||
year=year)
|
||||
if text.strip().startswith('#!') or text.strip().startswith('# -*-'):
|
||||
first_line, rest = text.split('\n', 1)
|
||||
updated_text = '\n'.join([first_line, header, rest])
|
||||
else:
|
||||
updated_text = '\n'.join([header, text])
|
||||
with open(filepath, 'w') as wfh:
|
||||
wfh.write(updated_text)
|
||||
|
||||
|
||||
def update_year(text, year, copyright_regex=COPYRIGHT_REGEX, match=None):
|
||||
if match is None:
|
||||
match = copyright_regex.search(text)
|
||||
old_year = match.group(1) or match.group(2)
|
||||
updated_year_text = 'Copyright {}-{} ARM Limited'.format(old_year, year)
|
||||
if old_year == year:
|
||||
ret_year = '{}'.format(year)
|
||||
else:
|
||||
ret_year = '{}-{}'.format(old_year, year)
|
||||
return (text.replace(match.group(0), updated_year_text), ret_year)
|
||||
|
||||
|
||||
def get_git_year(path):
|
||||
info = subprocess.check_output('git log -n 1 {}'.format(os.path.basename(path)),
|
||||
shell=True, cwd=os.path.dirname(path))
|
||||
if not info.strip():
|
||||
return None
|
||||
|
||||
i = 1
|
||||
while 'copyright' in info.lower():
|
||||
info = subprocess.check_output('git log -n 1 --skip {} {}'.format(i, os.path.basename(path)),
|
||||
shell=True, cwd=os.path.dirname(path))
|
||||
if not info.strip():
|
||||
return None
|
||||
|
||||
info_split_lines = info.split('\n')
|
||||
info_split_words = info_split_lines[2].split()
|
||||
return int(info_split_words[5])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('path', help='Location to add copyrights to source files in.')
|
||||
parser.add_argument('-n', '--update-no-ext', action='store_true',
|
||||
help='Will update files with on textension using # as the comment symbol.')
|
||||
parser.add_argument('-x', '--exclude', action='append',
|
||||
help='Exclude this directory form the scan. May be used multiple times.')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.update_no_ext:
|
||||
SOURCE_EXTENSIONS[''] = ('#', '#', '#')
|
||||
|
||||
exclude_paths = DEFAULT_EXCLUDE_PATHS + (args.exclude or [])
|
||||
|
||||
current_year = datetime.now().year
|
||||
for root, dirs, files in os.walk(args.path):
|
||||
should_skip = False
|
||||
for exclude_path in exclude_paths:
|
||||
if exclude_path in os.path.realpath(root):
|
||||
should_skip = True
|
||||
break
|
||||
if should_skip:
|
||||
logging.info('Skipping {}'.format(root))
|
||||
continue
|
||||
|
||||
logging.info('Checking {}'.format(root))
|
||||
for entry in files:
|
||||
_, ext = os.path.splitext(entry)
|
||||
if ext.lower() in SOURCE_EXTENSIONS:
|
||||
filepath = os.path.join(root, entry)
|
||||
should_skip = False
|
||||
for exclude_path in exclude_paths:
|
||||
if exclude_path in os.path.realpath(filepath):
|
||||
should_skip = True
|
||||
break
|
||||
if should_skip:
|
||||
logging.info('\tSkipping {}'.format(entry))
|
||||
continue
|
||||
with open(filepath) as fh:
|
||||
text = fh.read()
|
||||
if not text.strip():
|
||||
logging.info('\tSkipping empty {}'.format(entry))
|
||||
continue
|
||||
|
||||
year_modified = get_git_year(filepath) or current_year
|
||||
if len(text) < MIN_HEADER_LENGTH:
|
||||
logging.info('\tAdding header to {}'.format(entry))
|
||||
add_copyright_header(filepath, year_modified)
|
||||
else:
|
||||
first_chunk = text[:MIN_HEADER_LENGTH]
|
||||
match = COPYRIGHT_REGEX.search(first_chunk)
|
||||
if not match:
|
||||
if OLD_COPYRIGHT_REGEX.search(first_chunk):
|
||||
logging.warn('\tOld copyright message detected and replaced in {}'.format(entry))
|
||||
add_copyright_header(filepath, year_modified)
|
||||
elif '(c)' in first_chunk or '(C)' in first_chunk:
|
||||
logging.warn('\tAnother copyright header appears to be in {}'.format(entry))
|
||||
else:
|
||||
logging.info('\tAdding header to {}'.format(entry))
|
||||
add_copyright_header(filepath, current_year)
|
||||
else:
|
||||
# Found an existing copyright header. Update the
|
||||
# year if needed, otherwise, leave it alone.
|
||||
last_year = int(match.group(2))
|
||||
if year_modified > last_year:
|
||||
logging.info('\tUpdating year in {}'.format(entry))
|
||||
text, _ = update_year(text, year_modified, COPYRIGHT_REGEX, match)
|
||||
with open(filepath, 'w') as wfh:
|
||||
wfh.write(text)
|
||||
else:
|
||||
logging.info('\t{}: OK'.format(entry))
|
63
doc/Makefile
63
doc/Makefile
@ -10,14 +10,15 @@ BUILDDIR = build
|
||||
SPHINXAPI = sphinx-apidoc
|
||||
SPHINXAPIOPTS =
|
||||
|
||||
WAEXT = ./build_plugin_docs.py
|
||||
WAEXTOPTS = source/plugins ../wa ../wa/tests ../wa/framework
|
||||
WAEXT = ./build_extension_docs.py
|
||||
WAEXTOPTS = source/extensions ../wlauto ../wlauto/external ../wlauto/tests
|
||||
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
ALLSPHINXAPIOPTS = -f $(SPHINXAPIOPTS) -o source/api ../wlauto
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
|
||||
@ -48,47 +49,61 @@ help:
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
rm -rf source/plugins/*
|
||||
rm -rf source/developer_guide/instrument_method_map.rst
|
||||
rm -rf source/run_config/*
|
||||
rm -rf source/api/*
|
||||
rm -rf source/extensions/*
|
||||
rm -rf source/instrumentation_method_map.rst
|
||||
|
||||
coverage:
|
||||
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
|
||||
@echo
|
||||
@echo "Build finished. The coverage reports are in $(BUILDDIR)/coverage."
|
||||
|
||||
html:
|
||||
api: ../wlauto
|
||||
rm -rf source/api/*
|
||||
$(SPHINXAPI) $(ALLSPHINXAPIOPTS)
|
||||
|
||||
waext: ../wlauto
|
||||
rm -rf source/extensions
|
||||
mkdir -p source/extensions
|
||||
$(WAEXT) $(WAEXTOPTS)
|
||||
|
||||
|
||||
sigtab: ../wlauto/core/instrumentation.py source/instrumentation_method_map.template
|
||||
rm -rf source/instrumentation_method_map.rst
|
||||
./build_instrumentation_method_map.py source/instrumentation_method_map.rst
|
||||
|
||||
html: api waext sigtab
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
dirhtml: api waext sigtab
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
singlehtml: api waext sigtab
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
pickle: api waext sigtab
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
json: api waext sigtab
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
htmlhelp: api waext sigtab
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
qthelp: api waext sigtab
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
@ -97,7 +112,7 @@ qthelp:
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/WorkloadAutomation2.qhc"
|
||||
|
||||
devhelp:
|
||||
devhelp: api
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@ -106,64 +121,64 @@ devhelp:
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/WorkloadAutomation2"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
epub: api waext sigtab
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
latex: api waext sigtab
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
latexpdf: api waext sigtab
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
text: api waext sigtab
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
man: api waext sigtab
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
texinfo: api waext sigtab
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
info: api waext sigtab
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
gettext: api waext sigtab
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
changes: api waext sigtab
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
linkcheck: api waext sigtab
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
doctest: api waext sigtab
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
46
doc/build_extension_docs.py
Executable file
46
doc/build_extension_docs.py
Executable file
@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from wlauto import ExtensionLoader
|
||||
from wlauto.utils.doc import get_rst_from_extension, underline
|
||||
from wlauto.utils.misc import capitalize
|
||||
|
||||
|
||||
GENERATE_FOR = ['workload', 'instrument', 'result_processor', 'device']
|
||||
|
||||
|
||||
def generate_extension_documentation(source_dir, outdir, ignore_paths):
|
||||
loader = ExtensionLoader(keep_going=True)
|
||||
loader.clear()
|
||||
loader.update(paths=[source_dir], ignore_paths=ignore_paths)
|
||||
for ext_type in loader.extension_kinds:
|
||||
if not ext_type in GENERATE_FOR:
|
||||
continue
|
||||
outfile = os.path.join(outdir, '{}s.rst'.format(ext_type))
|
||||
with open(outfile, 'w') as wfh:
|
||||
wfh.write('.. _{}s:\n\n'.format(ext_type))
|
||||
wfh.write(underline(capitalize('{}s'.format(ext_type))))
|
||||
exts = loader.list_extensions(ext_type)
|
||||
for ext in sorted(exts, key=lambda x: x.name):
|
||||
wfh.write(get_rst_from_extension(ext))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
generate_extension_documentation(sys.argv[2], sys.argv[1], sys.argv[3:])
|
31
doc/build_instrument_method_map.py → doc/build_instrumentation_method_map.py
Normal file → Executable file
31
doc/build_instrument_method_map.py → doc/build_instrumentation_method_map.py
Normal file → Executable file
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2015-2019 ARM Limited
|
||||
# Copyright 2015-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -18,19 +18,26 @@ import sys
|
||||
import string
|
||||
from copy import copy
|
||||
|
||||
from wa.framework.instrument import SIGNAL_MAP
|
||||
from wa.framework.signal import CallbackPriority
|
||||
from wa.utils.doc import format_simple_table
|
||||
|
||||
OUTPUT_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), 'source', 'instrument_method_map.template')
|
||||
from wlauto.core.instrumentation import SIGNAL_MAP, PRIORITY_MAP
|
||||
from wlauto.utils.doc import format_simple_table
|
||||
|
||||
|
||||
def generate_instrument_method_map(outfile):
|
||||
signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.items()],
|
||||
CONVINIENCE_ALIASES = ['initialize', 'setup', 'start', 'stop', 'process_workload_result',
|
||||
'update_result', 'teardown', 'finalize']
|
||||
|
||||
OUTPUT_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), 'source', 'instrumentation_method_map.template')
|
||||
|
||||
|
||||
def escape_trailing_underscore(value):
|
||||
if value.endswith('_'):
|
||||
return value[:-1] + '\_'
|
||||
|
||||
|
||||
def generate_instrumentation_method_map(outfile):
|
||||
signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.iteritems()],
|
||||
headers=['method name', 'signal'], align='<<')
|
||||
decorator_names = map(lambda x: x.replace('high', 'fast').replace('low', 'slow'), CallbackPriority.names)
|
||||
priority_table = format_simple_table(zip(decorator_names, CallbackPriority.names, CallbackPriority.values),
|
||||
headers=['decorator', 'CallbackPriority name', 'CallbackPriority value'], align='<>')
|
||||
priority_table = format_simple_table([(escape_trailing_underscore(k), v) for k, v in PRIORITY_MAP.iteritems()],
|
||||
headers=['prefix', 'priority'], align='<>')
|
||||
with open(OUTPUT_TEMPLATE_FILE) as fh:
|
||||
template = string.Template(fh.read())
|
||||
with open(outfile, 'w') as wfh:
|
||||
@ -38,4 +45,4 @@ def generate_instrument_method_map(outfile):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
generate_instrument_method_map(sys.argv[1])
|
||||
generate_instrumentation_method_map(sys.argv[1])
|
@ -1,130 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2014-2019 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from wa import pluginloader
|
||||
from wa.framework.configuration.core import RunConfiguration, MetaConfiguration
|
||||
from wa.framework.target.descriptor import list_target_descriptions
|
||||
from wa.utils.doc import (strip_inlined_text, get_rst_from_plugin,
|
||||
get_params_rst, underline, line_break)
|
||||
from wa.utils.misc import capitalize
|
||||
|
||||
GENERATE_FOR_PACKAGES = [
|
||||
'wa.workloads',
|
||||
'wa.instruments',
|
||||
'wa.output_processors',
|
||||
]
|
||||
|
||||
|
||||
def insert_contents_table(title='', depth=1):
|
||||
"""
|
||||
Insert a sphinx directive to insert a contents page with
|
||||
a configurable title and depth.
|
||||
"""
|
||||
text = '''\n
|
||||
.. contents:: {}
|
||||
:depth: {}
|
||||
:local:\n
|
||||
'''.format(title, depth)
|
||||
return text
|
||||
|
||||
|
||||
def generate_plugin_documentation(source_dir, outdir, ignore_paths):
|
||||
# pylint: disable=unused-argument
|
||||
pluginloader.clear()
|
||||
pluginloader.update(packages=GENERATE_FOR_PACKAGES)
|
||||
if not os.path.exists(outdir):
|
||||
os.mkdir(outdir)
|
||||
|
||||
for ext_type in pluginloader.kinds:
|
||||
outfile = os.path.join(outdir, '{}s.rst'.format(ext_type))
|
||||
with open(outfile, 'w') as wfh:
|
||||
wfh.write('.. _{}s:\n\n'.format(ext_type.replace('_', '-')))
|
||||
title = ' '.join([capitalize(w) for w in ext_type.split('_')])
|
||||
wfh.write(underline('{}s'.format(title)))
|
||||
wfh.write(insert_contents_table())
|
||||
wfh.write(line_break())
|
||||
exts = pluginloader.list_plugins(ext_type)
|
||||
sorted_exts = iter(sorted(exts, key=lambda x: x.name))
|
||||
try:
|
||||
wfh.write(get_rst_from_plugin(next(sorted_exts)))
|
||||
except StopIteration:
|
||||
return
|
||||
for ext in sorted_exts:
|
||||
wfh.write(line_break())
|
||||
wfh.write(get_rst_from_plugin(ext))
|
||||
|
||||
|
||||
def generate_target_documentation(outdir):
|
||||
targets_to_generate = ['generic_android',
|
||||
'generic_linux',
|
||||
'generic_chromeos',
|
||||
'generic_local',
|
||||
'juno_linux',
|
||||
'juno_android']
|
||||
|
||||
intro = (
|
||||
'\nThis is a list of commonly used targets and their device '
|
||||
'parameters, to see a complete for a complete reference please use the'
|
||||
' WA :ref:`list command <list-command>`.\n\n\n'
|
||||
)
|
||||
|
||||
pluginloader.clear()
|
||||
pluginloader.update(packages=['wa.framework.target.descriptor'])
|
||||
|
||||
target_descriptors = list_target_descriptions(pluginloader)
|
||||
outfile = os.path.join(outdir, 'targets.rst')
|
||||
with open(outfile, 'w') as wfh:
|
||||
wfh.write(underline('Common Targets'))
|
||||
wfh.write(intro)
|
||||
for td in sorted(target_descriptors, key=lambda t: t.name):
|
||||
if td.name not in targets_to_generate:
|
||||
continue
|
||||
text = underline(td.name, '~')
|
||||
if hasattr(td, 'description'):
|
||||
desc = strip_inlined_text(td.description or '')
|
||||
text += desc
|
||||
text += underline('Device Parameters:', '-')
|
||||
text += get_params_rst(td.conn_params)
|
||||
text += get_params_rst(td.platform_params)
|
||||
text += get_params_rst(td.target_params)
|
||||
text += get_params_rst(td.assistant_params)
|
||||
wfh.write(text)
|
||||
|
||||
|
||||
def generate_run_config_documentation(outdir):
|
||||
generate_config_documentation(RunConfiguration, outdir)
|
||||
|
||||
|
||||
def generate_meta_config_documentation(outdir):
|
||||
generate_config_documentation(MetaConfiguration, outdir)
|
||||
|
||||
|
||||
def generate_config_documentation(config, outdir):
|
||||
if not os.path.exists(outdir):
|
||||
os.mkdir(outdir)
|
||||
|
||||
config_name = '_'.join(config.name.split())
|
||||
outfile = os.path.join(outdir, '{}.rst'.format(config_name))
|
||||
with open(outfile, 'w') as wfh:
|
||||
wfh.write(get_params_rst(config.config_points))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
generate_plugin_documentation(sys.argv[2], sys.argv[1], sys.argv[3:])
|
263
doc/make.bat
263
doc/make.bat
@ -1,263 +0,0 @@
|
||||
@ECHO OFF
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set BUILDDIR=_build
|
||||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
|
||||
set I18NSPHINXOPTS=%SPHINXOPTS% .
|
||||
if NOT "%PAPER%" == "" (
|
||||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
||||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
|
||||
)
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
if "%1" == "help" (
|
||||
:help
|
||||
echo.Please use `make ^<target^>` where ^<target^> is one of
|
||||
echo. html to make standalone HTML files
|
||||
echo. dirhtml to make HTML files named index.html in directories
|
||||
echo. singlehtml to make a single large HTML file
|
||||
echo. pickle to make pickle files
|
||||
echo. json to make JSON files
|
||||
echo. htmlhelp to make HTML files and a HTML help project
|
||||
echo. qthelp to make HTML files and a qthelp project
|
||||
echo. devhelp to make HTML files and a Devhelp project
|
||||
echo. epub to make an epub
|
||||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
||||
echo. text to make text files
|
||||
echo. man to make manual pages
|
||||
echo. texinfo to make Texinfo files
|
||||
echo. gettext to make PO message catalogs
|
||||
echo. changes to make an overview over all changed/added/deprecated items
|
||||
echo. xml to make Docutils-native XML files
|
||||
echo. pseudoxml to make pseudoxml-XML files for display purposes
|
||||
echo. linkcheck to check all external links for integrity
|
||||
echo. doctest to run all doctests embedded in the documentation if enabled
|
||||
echo. coverage to run coverage check of the documentation if enabled
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "clean" (
|
||||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
||||
del /q /s %BUILDDIR%\*
|
||||
goto end
|
||||
)
|
||||
|
||||
|
||||
REM Check if sphinx-build is available and fallback to Python version if any
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 goto sphinx_python
|
||||
goto sphinx_ok
|
||||
|
||||
:sphinx_python
|
||||
|
||||
set SPHINXBUILD=python -m sphinx.__init__
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.http://sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
:sphinx_ok
|
||||
|
||||
|
||||
if "%1" == "html" (
|
||||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "dirhtml" (
|
||||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "singlehtml" (
|
||||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pickle" (
|
||||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the pickle files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "json" (
|
||||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the JSON files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "htmlhelp" (
|
||||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run HTML Help Workshop with the ^
|
||||
.hhp project file in %BUILDDIR%/htmlhelp.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "qthelp" (
|
||||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
||||
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
||||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\devlib.qhcp
|
||||
echo.To view the help file:
|
||||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\devlib.ghc
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "devhelp" (
|
||||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "epub" (
|
||||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latex" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdf" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf
|
||||
cd %~dp0
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdfja" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf-ja
|
||||
cd %~dp0
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "text" (
|
||||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The text files are in %BUILDDIR%/text.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "man" (
|
||||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "texinfo" (
|
||||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "gettext" (
|
||||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "changes" (
|
||||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.The overview file is in %BUILDDIR%/changes.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "linkcheck" (
|
||||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Link check complete; look for any errors in the above output ^
|
||||
or in %BUILDDIR%/linkcheck/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "doctest" (
|
||||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of doctests in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/doctest/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "coverage" (
|
||||
%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of coverage in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/coverage/python.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "xml" (
|
||||
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The XML files are in %BUILDDIR%/xml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pseudoxml" (
|
||||
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
|
||||
goto end
|
||||
)
|
||||
|
||||
:end
|
@ -1,7 +0,0 @@
|
||||
nose
|
||||
numpy
|
||||
pandas
|
||||
sphinx_rtd_theme==1.0.0
|
||||
sphinx==4.2
|
||||
docutils<0.18
|
||||
devlib @ git+https://github.com/ARM-software/devlib@master
|
@ -1,78 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="231.99989"
|
||||
height="128.625"
|
||||
id="svg4921"
|
||||
version="1.1"
|
||||
inkscape:version="0.48.4 r9939"
|
||||
sodipodi:docname="WA-logo-black.svg">
|
||||
<defs
|
||||
id="defs4923" />
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="0.70000001"
|
||||
inkscape:cx="80.419359"
|
||||
inkscape:cy="149.66406"
|
||||
inkscape:document-units="px"
|
||||
inkscape:current-layer="layer1"
|
||||
showgrid="false"
|
||||
inkscape:window-width="1676"
|
||||
inkscape:window-height="1027"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="19"
|
||||
inkscape:window-maximized="0"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0" />
|
||||
<metadata
|
||||
id="metadata4926">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title></dc:title>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
inkscape:label="Layer 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(-135.03125,-342.375)">
|
||||
<path
|
||||
style="fill:#ffffff;fill-opacity:1;stroke:none"
|
||||
d="m 239,342.375 0,11.21875 c -5.57308,1.24469 -10.80508,3.40589 -15.5,6.34375 l -8.34375,-8.34375 -15.5625,15.5625 8.28125,8.28125 c -3.25948,5.08895 -5.62899,10.81069 -6.875,16.9375 l -11,0 0,22 11.46875,0 c 1.38373,5.61408 3.71348,10.8741 6.8125,15.5625 l -8.15625,8.1875 15.5625,15.53125 8.46875,-8.46875 c 4.526,2.73972 9.527,4.77468 14.84375,5.96875 l 0,11.21875 14.59375,0 c -4.57581,-6.7196 -7.25,-14.81979 -7.25,-23.5625 0,-5.85191 1.21031,-11.43988 3.375,-16.5 -10.88114,-0.15024 -19.65625,-9.02067 -19.65625,-19.9375 0,-10.66647 8.37245,-19.40354 18.90625,-19.9375 0.3398,-0.0172 0.68717,0 1.03125,0 10.5808,0 19.2466,8.24179 19.90625,18.65625 5.54962,-2.70912 11.78365,-4.25 18.375,-4.25 7.94803,0 15.06896,2.72769 21.71875,6.0625 l 0,-10.53125 -11.03125,0 c -1.13608,-5.58713 -3.20107,-10.85298 -6.03125,-15.59375 l 8.1875,-8.21875 -15.5625,-15.53125 -7.78125,7.78125 C 272.7607,357.45113 267.0827,354.99261 261,353.625 l 0,-11.25 z m 11,46 c -7.73198,0 -14,6.26802 -14,14 0,7.732 6.26802,14 14,14 1.05628,0 2.07311,-0.12204 3.0625,-0.34375 2.84163,-4.38574 6.48859,-8.19762 10.71875,-11.25 C 263.91776,403.99646 264,403.19884 264,402.375 c 0,-7.73198 -6.26801,-14 -14,-14 z m -87.46875,13.25 -11.78125,4.78125 2.4375,6 c -2.7134,1.87299 -5.02951,4.16091 -6.90625,6.75 L 140,416.5 l -4.96875,11.6875 6.21875,2.65625 c -0.64264,3.42961 -0.65982,6.98214 0,10.53125 l -5.875,2.40625 4.75,11.78125 6.15625,-2.5 c 1.95629,2.70525 4.32606,5.00539 7,6.84375 l -2.59375,6.15625 11.6875,4.9375 2.71875,-6.34375 c 3.01575,0.48636 6.11446,0.48088 9.21875,-0.0312 l 2.4375,6 11.78125,-4.75 -2.4375,-6.03125 c 2.70845,-1.87526 5.03044,-4.16169 6.90625,-6.75 l 6.21875,2.625 4.96875,-11.6875 -6.15625,-2.625 c 0.56936,-3.04746 0.64105,-6.22008 0.1875,-9.375 l 6.125,-2.46875 -4.75,-11.78125 -5.90625,2.40625 c -1.8179,-2.74443 -4.05238,-5.13791 -6.59375,-7.0625 L 189.6875,406.9688 178,402.0313 l -2.5,5.84375 c -3.41506,-0.712 -6.97941,-0.8039 -10.53125,-0.21875 z m 165.28125,7.125 -7.09375,19.125 -9.59375,23 -1.875,-42.0625 -14.1875,0 -18.1875,42.0625 -1.78125,-42.0625 -13.8125,0 2.5,57.875 17.28125,0 18.71875,-43.96875 1.9375,43.96875 16.90625,0 0.0312,-0.0625 2.71875,0 1.78125,-5.0625 7.90625,-22.90625 0.0312,0 1.59375,-4.65625 4.46875,-10.40625 7.46875,21.75 -11.125,0 -3.71875,10.75 18.625,0 3.625,10.53125 15,0 -21.4375,-57.875 z m -158,15.875 c 4.48547,0.0706 8.71186,2.76756 10.5,7.1875 2.38422,5.89328 -0.45047,12.61577 -6.34375,15 -5.89327,2.38421 -12.61578,-0.48172 -15,-6.375 -2.3097,-5.70909 0.29002,-12.18323 5.8125,-14.75 0.17811,-0.0828 0.34709,-0.14426 0.53125,-0.21875 1.47332,-0.59605 3.00484,-0.86727 4.5,-0.84375 z m -0.1875,3.40625 c -0.2136,5.4e-4 -0.44162,0.0134 -0.65625,0.0312 -0.79249,0.0658 -1.56779,0.24857 -2.34375,0.5625 -4.13846,1.67427 -6.14302,6.3928 -4.46875,10.53125 1.67428,4.13847 6.3928,6.14301 10.53125,4.46875 4.13847,-1.67428 6.11177,-6.3928 4.4375,-10.53125 -1.27532,-3.15234 -4.29605,-5.07059 -7.5,-5.0625 z"
|
||||
id="rect4081-3-8"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccccccccccccccscscscsccccccccccsssccsscccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccssssccssssssss" />
|
||||
<g
|
||||
id="g3117"
|
||||
transform="translate(-244.99999,-214.64287)">
|
||||
<g
|
||||
transform="translate(83.928571,134.28571)"
|
||||
id="text4037-4-7"
|
||||
style="font-size:79.3801651px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:DejaVu Sans;-inkscape-font-specification:DejaVu Sans Bold" />
|
||||
<g
|
||||
transform="translate(83.928571,134.28571)"
|
||||
id="text4041-5-8"
|
||||
style="font-size:79.3801651px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:DejaVu Sans;-inkscape-font-specification:DejaVu Sans" />
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
Before (image error) Size: 5.7 KiB |
101
doc/source/additional_topics.rst
Normal file
101
doc/source/additional_topics.rst
Normal file
@ -0,0 +1,101 @@
|
||||
Additional Topics
|
||||
+++++++++++++++++
|
||||
|
||||
Modules
|
||||
=======
|
||||
|
||||
Modules are essentially plug-ins for Extensions. They provide a way of defining
|
||||
common and reusable functionality. An Extension can load zero or more modules
|
||||
during its creation. Loaded modules will then add their capabilities (see
|
||||
Capabilities_) to those of the Extension. When calling code tries to access an
|
||||
attribute of an Extension the Extension doesn't have, it will try to find the
|
||||
attribute among its loaded modules and will return that instead.
|
||||
|
||||
.. note:: Modules are themselves extensions, and can therefore load their own
|
||||
modules. *Do not* abuse this.
|
||||
|
||||
For example, calling code may wish to reboot an unresponsive device by calling
|
||||
``device.hard_reset()``, but the ``Device`` in question does not have a
|
||||
``hard_reset`` method; however the ``Device`` has loaded ``netio_switch``
|
||||
module which allows to disable power supply over a network (say this device
|
||||
is in a rack and is powered through such a switch). The module has
|
||||
``reset_power`` capability (see Capabilities_ below) and so implements
|
||||
``hard_reset``. This will get invoked when ``device.hard_rest()`` is called.
|
||||
|
||||
.. note:: Modules can only extend Extensions with new attributes; they cannot
|
||||
override existing functionality. In the example above, if the
|
||||
``Device`` has implemented ``hard_reset()`` itself, then *that* will
|
||||
get invoked irrespective of which modules it has loaded.
|
||||
|
||||
If two loaded modules have the same capability or implement the same method,
|
||||
then the last module to be loaded "wins" and its method will be invoke,
|
||||
effectively overriding the module that was loaded previously.
|
||||
|
||||
Specifying Modules
|
||||
------------------
|
||||
|
||||
Modules get loaded when an Extension is instantiated by the extension loader.
|
||||
There are two ways to specify which modules should be loaded for a device.
|
||||
|
||||
|
||||
Capabilities
|
||||
============
|
||||
|
||||
Capabilities define the functionality that is implemented by an Extension,
|
||||
either within the Extension itself or through loadable modules. A capability is
|
||||
just a label, but there is an implied contract. When an Extension claims to have
|
||||
a particular capability, it promises to expose a particular set of
|
||||
functionality through a predefined interface.
|
||||
|
||||
Currently used capabilities are described below.
|
||||
|
||||
.. note:: Since capabilities are basically random strings, the user can always
|
||||
define their own; and it is then up to the user to define, enforce and
|
||||
document the contract associated with their capability. Below, are the
|
||||
"standard" capabilities used in WA.
|
||||
|
||||
|
||||
.. note:: The method signatures in the descriptions below show the calling
|
||||
signature (i.e. they're omitting the initial self parameter).
|
||||
|
||||
active_cooling
|
||||
--------------
|
||||
|
||||
Intended to be used by devices and device modules, this capability implies
|
||||
that the device implements a controllable active cooling solution (e.g.
|
||||
a programmable fan). The device/module must implement the following methods:
|
||||
|
||||
start_active_cooling()
|
||||
Active cooling is started (e.g. the fan is turned on)
|
||||
|
||||
stop_active_cooling()
|
||||
Active cooling is stopped (e.g. the fan is turned off)
|
||||
|
||||
|
||||
reset_power
|
||||
-----------
|
||||
|
||||
Intended to be used by devices and device modules, this capability implies
|
||||
that the device is capable of performing a hard reset by toggling power. The
|
||||
device/module must implement the following method:
|
||||
|
||||
hard_reset()
|
||||
The device is restarted. This method cannot rely on the device being
|
||||
responsive and must work even if the software on the device has crashed.
|
||||
|
||||
|
||||
flash
|
||||
-----
|
||||
|
||||
Intended to be used by devices and device modules, this capability implies
|
||||
that the device can be flashed with new images. The device/module must
|
||||
implement the following method:
|
||||
|
||||
flash(image_bundle=None, images=None)
|
||||
``image_bundle`` is a path to a "bundle" (e.g. a tarball) that contains
|
||||
all the images to be flashed. Which images go where must also be defined
|
||||
within the bundle. ``images`` is a dict mapping image destination (e.g.
|
||||
partition name) to the path to that specific image. Both
|
||||
``image_bundle`` and ``images`` may be specified at the same time. If
|
||||
there is overlap between the two, ``images`` wins and its contents will
|
||||
be flashed in preference to the ``image_bundle``.
|
608
doc/source/agenda.rst
Normal file
608
doc/source/agenda.rst
Normal file
@ -0,0 +1,608 @@
|
||||
.. _agenda:
|
||||
|
||||
======
|
||||
Agenda
|
||||
======
|
||||
|
||||
An agenda specifies what is to be done during a Workload Automation run,
|
||||
including which workloads will be run, with what configuration, which
|
||||
instruments and result processors will be enabled, etc. Agenda syntax is
|
||||
designed to be both succinct and expressive.
|
||||
|
||||
Agendas are specified using YAML_ notation. It is recommended that you
|
||||
familiarize yourself with the linked page.
|
||||
|
||||
.. _YAML: http://en.wikipedia.org/wiki/YAML
|
||||
|
||||
.. note:: Earlier versions of WA have supported CSV-style agendas. These were
|
||||
there to facilitate transition from WA1 scripts. The format was more
|
||||
awkward and supported only a limited subset of the features. Support
|
||||
for it has now been removed.
|
||||
|
||||
|
||||
Specifying which workloads to run
|
||||
=================================
|
||||
|
||||
The central purpose of an agenda is to specify what workloads to run. A
|
||||
minimalist agenda contains a single entry at the top level called "workloads"
|
||||
that maps onto a list of workload names to run:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
- cyclictest
|
||||
|
||||
This specifies a WA run consisting of ``dhrystone`` followed by ``memcpy``, followed by
|
||||
``cyclictest`` workloads, and using instruments and result processors specified in
|
||||
config.py (see :ref:`configuration-specification` section).
|
||||
|
||||
.. note:: If you're familiar with YAML, you will recognize the above as a single-key
|
||||
associative array mapping onto a list. YAML has two notations for both
|
||||
associative arrays and lists: block notation (seen above) and also
|
||||
in-line notation. This means that the above agenda can also be
|
||||
written in a single line as ::
|
||||
|
||||
workloads: [dhrystone, memcpy, cyclictest]
|
||||
|
||||
(with the list in-lined), or ::
|
||||
|
||||
{workloads: [dhrystone, memcpy, cyclictest]}
|
||||
|
||||
(with both the list and the associative array in-line). WA doesn't
|
||||
care which of the notations is used as they all get parsed into the
|
||||
same structure by the YAML parser. You can use whatever format you
|
||||
find easier/clearer.
|
||||
|
||||
Multiple iterations
|
||||
-------------------
|
||||
|
||||
There will normally be some variability in workload execution when running on a
|
||||
real device. In order to quantify it, multiple iterations of the same workload
|
||||
are usually performed. You can specify the number of iterations for each
|
||||
workload by adding ``iterations`` field to the workload specifications (or
|
||||
"specs"):
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
iterations: 5
|
||||
- name: memcpy
|
||||
iterations: 5
|
||||
- name: cyclictest
|
||||
iterations: 5
|
||||
|
||||
Now that we're specifying both the workload name and the number of iterations in
|
||||
each spec, we have to explicitly name each field of the spec.
|
||||
|
||||
It is often the case that, as in in the example above, you will want to run all
|
||||
workloads for the same number of iterations. Rather than having to specify it
|
||||
for each and every spec, you can do with a single entry by adding a ``global``
|
||||
section to your agenda:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
- cyclictest
|
||||
|
||||
The global section can contain the same fields as a workload spec. The
|
||||
fields in the global section will get added to each spec. If the same field is
|
||||
defined both in global section and in a spec, then the value in the spec will
|
||||
overwrite the global value. For example, suppose we wanted to run all our workloads
|
||||
for five iterations, except cyclictest which we want to run for ten (e.g.
|
||||
because we know it to be particularly unstable). This can be specified like
|
||||
this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
- name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
Again, because we are now specifying two fields for cyclictest spec, we have to
|
||||
explicitly name them.
|
||||
|
||||
Configuring workloads
|
||||
---------------------
|
||||
|
||||
Some workloads accept configuration parameters that modify their behavior. These
|
||||
parameters are specific to a particular workload and can alter the workload in
|
||||
any number of ways, e.g. set the duration for which to run, or specify a media
|
||||
file to be used, etc. The vast majority of workload parameters will have some
|
||||
default value, so it is only necessary to specify the name of the workload in
|
||||
order for WA to run it. However, sometimes you want more control over how a
|
||||
workload runs.
|
||||
|
||||
For example, by default, dhrystone will execute 10 million loops across four
|
||||
threads. Suppose you device has six cores available and you want the workload to
|
||||
load them all. You also want to increase the total number of loops accordingly
|
||||
to 15 million. You can specify this using dhrystone's parameters:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- memcpy
|
||||
- name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
.. note:: You can find out what parameters a workload accepts by looking it up
|
||||
in the :ref:`Workloads` section. You can also look it up using WA itself
|
||||
with "show" command::
|
||||
|
||||
wa show dhrystone
|
||||
|
||||
see the :ref:`Invocation` section for details.
|
||||
|
||||
In addition to configuring the workload itself, we can also specify
|
||||
configuration for the underlying device. This can be done by setting runtime
|
||||
parameters in the workload spec. For example, suppose we want to ensure the
|
||||
maximum score for our benchmarks, at the expense of power consumption, by
|
||||
setting the cpufreq governor to "performance" on cpu0 (assuming all our cores
|
||||
are in the same DVFS domain and so setting the governor for cpu0 will affect all
|
||||
cores). This can be done like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- memcpy
|
||||
- name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
|
||||
Here, we're specifying ``sysfile_values`` runtime parameter for the device. The
|
||||
value for this parameter is a mapping (an associative array, in YAML) of file
|
||||
paths onto values that should be written into those files. ``sysfile_values`` is
|
||||
the only runtime parameter that is available for any (Linux) device. Other
|
||||
runtime parameters will depend on the specifics of the device used (e.g. its
|
||||
CPU cores configuration). I've renamed ``params`` to ``workload_params`` for
|
||||
clarity, but that wasn't strictly necessary as ``params`` is interpreted as
|
||||
``workload_params`` inside a workload spec.
|
||||
|
||||
.. note:: ``params`` field is interpreted differently depending on whether it's in a
|
||||
workload spec or the global section. In a workload spec, it translates to
|
||||
``workload_params``, in the global section it translates to ``runtime_params``.
|
||||
|
||||
Runtime parameters do not automatically reset at the end of workload spec
|
||||
execution, so all subsequent iterations will also be affected unless they
|
||||
explicitly change the parameter (in the example above, performance governor will
|
||||
also be used for ``memcpy`` and ``cyclictest``. There are two ways around this:
|
||||
either set ``reboot_policy`` WA setting (see :ref:`configuration-specification` section) such that
|
||||
the device gets rebooted between spec executions, thus being returned to its
|
||||
initial state, or set the default runtime parameter values in the ``global``
|
||||
section of the agenda so that they get set for every spec that doesn't
|
||||
explicitly override them.
|
||||
|
||||
.. note:: "In addition to ``runtime_params`` there are also ``boot_params`` that
|
||||
work in a similar way, but they get passed to the device when it
|
||||
reboots. At the moment ``TC2`` is the only device that defines a boot
|
||||
parameter, which is explained in ``TC2`` documentation, so boot
|
||||
parameters will not be mentioned further.
|
||||
|
||||
IDs and Labels
|
||||
--------------
|
||||
|
||||
It is possible to list multiple specs with the same workload in an agenda. You
|
||||
may wish to this if you want to run a workload with different parameter values
|
||||
or under different runtime configurations of the device. The workload name
|
||||
therefore does not uniquely identify a spec. To be able to distinguish between
|
||||
different specs (e.g. in reported results), each spec has an ID which is unique
|
||||
to all specs within an agenda (and therefore with a single WA run). If an ID
|
||||
isn't explicitly specified using ``id`` field (note that the field name is in
|
||||
lower case), one will be automatically assigned to the spec at the beginning of
|
||||
the WA run based on the position of the spec within the list. The first spec
|
||||
*without an explicit ID* will be assigned ID ``1``, the second spec *without an
|
||||
explicit ID* will be assigned ID ``2``, and so forth.
|
||||
|
||||
Numerical IDs aren't particularly easy to deal with, which is why it is
|
||||
recommended that, for non-trivial agendas, you manually set the ids to something
|
||||
more meaningful (or use labels -- see below). An ID can be pretty much anything
|
||||
that will pass through the YAML parser. The only requirement is that it is
|
||||
unique to the agenda. However, is usually better to keep them reasonably short
|
||||
(they don't need to be *globally* unique), and to stick with alpha-numeric
|
||||
characters and underscores/dashes. While WA can handle other characters as well,
|
||||
getting too adventurous with your IDs may cause issues further down the line
|
||||
when processing WA results (e.g. when uploading them to a database that may have
|
||||
its own restrictions).
|
||||
|
||||
In addition to IDs, you can also specify labels for your workload specs. These
|
||||
are similar to IDs but do not have the uniqueness restriction. If specified,
|
||||
labels will be used by some result processes instead of (or in addition to) the
|
||||
workload name. For example, the ``csv`` result processor will put the label in the
|
||||
"workload" column of the CSV file.
|
||||
|
||||
It is up to you how you chose to use IDs and labels. WA itself doesn't expect
|
||||
any particular format (apart from uniqueness for IDs). Below is the earlier
|
||||
example updated to specify explicit IDs and label dhrystone spec to reflect
|
||||
parameters used.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
|
||||
Result Processors and Instrumentation
|
||||
=====================================
|
||||
|
||||
Result Processors
|
||||
-----------------
|
||||
|
||||
Result processors, as the name suggests, handle the processing of results
|
||||
generated form running workload specs. By default, WA enables a couple of basic
|
||||
result processors (e.g. one generates a csv file with all scores reported by
|
||||
workloads), which you can see in ``~/.workload_automation/config.py``. However,
|
||||
WA has a number of other, more specialized, result processors (e.g. for
|
||||
uploading to databases). You can list available result processors with
|
||||
``wa list result_processors`` command. If you want to permanently enable a
|
||||
result processor, you can add it to your ``config.py``. You can also enable a
|
||||
result processor for a particular run by specifying it in the ``config`` section
|
||||
in the agenda. As the name suggests, ``config`` section mirrors the structure of
|
||||
``config.py``\ (although using YAML rather than Python), and anything that can
|
||||
be specified in the latter, can also be specified in the former.
|
||||
|
||||
As with workloads, result processors may have parameters that define their
|
||||
behavior. Parameters of result processors are specified a little differently,
|
||||
however. Result processor parameter values are listed in the config section,
|
||||
namespaced under the name of the result processor.
|
||||
|
||||
For example, suppose we want to be able to easily query the results generated by
|
||||
the workload specs we've defined so far. We can use ``sqlite`` result processor
|
||||
to have WA create an sqlite_ database file with the results. By default, this
|
||||
file will be generated in WA's output directory (at the same level as
|
||||
results.csv); but suppose we want to store the results in the same file for
|
||||
every run of the agenda we do. This can be done by specifying an alternative
|
||||
database file with ``database`` parameter of the result processor:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
A couple of things to observe here:
|
||||
|
||||
- There is no need to repeat the result processors listed in ``config.py``. The
|
||||
processors listed in ``result_processors`` entry in the agenda will be used
|
||||
*in addition to* those defined in the ``config.py``.
|
||||
- The database file is specified under "sqlite" entry in the config section.
|
||||
Note, however, that this entry alone is not enough to enable the result
|
||||
processor, it must be listed in ``result_processors``, otherwise the "sqilte"
|
||||
config entry will be ignored.
|
||||
- The database file must be specified as an absolute path, however it may use
|
||||
the user home specifier '~' and/or environment variables.
|
||||
|
||||
.. _sqlite: http://www.sqlite.org/
|
||||
|
||||
|
||||
Instrumentation
|
||||
---------------
|
||||
|
||||
WA can enable various "instruments" to be used during workload execution.
|
||||
Instruments can be quite diverse in their functionality, but the majority of
|
||||
instruments available in WA today are there to collect additional data (such as
|
||||
trace) from the device during workload execution. You can view the list of
|
||||
available instruments by using ``wa list instruments`` command. As with result
|
||||
processors, a few are enabled by default in the ``config.py`` and additional
|
||||
ones may be added in the same place, or specified in the agenda using
|
||||
``instrumentation`` entry.
|
||||
|
||||
For example, we can collect core utilisation statistics (for what proportion of
|
||||
workload execution N cores were utilized above a specified threshold) using
|
||||
``coreutil`` instrument.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
instrumentation: [coreutil]
|
||||
coreutil:
|
||||
threshold: 80
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
Instrumentation isn't "free" and it is advisable not to have too many
|
||||
instruments enabled at once as that might skew results. For example, you don't
|
||||
want to have power measurement enabled at the same time as event tracing, as the
|
||||
latter may prevent cores from going into idle states and thus affecting the
|
||||
reading collected by the former.
|
||||
|
||||
Unlike result processors, instrumentation may be enabled (and disabled -- see below)
|
||||
on per-spec basis. For example, suppose we want to collect /proc/meminfo from the
|
||||
device when we run ``memcpy`` workload, but not for the other two. We can do that using
|
||||
``sysfs_extractor`` instrument, and we will only enable it for ``memcpy``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
instrumentation: [coreutil]
|
||||
coreutil:
|
||||
threshold: 80
|
||||
sysfs_extractor:
|
||||
paths: [/proc/meminfo]
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
instrumentation: [sysfs_extractor]
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
As with ``config`` sections, ``instrumentation`` entry in the spec needs only to
|
||||
list additional instruments and does not need to repeat instruments specified
|
||||
elsewhere.
|
||||
|
||||
.. note:: At present, it is only possible to enable/disable instrumentation on
|
||||
per-spec base. It is *not* possible to provide configuration on
|
||||
per-spec basis in the current version of WA (e.g. in our example, it
|
||||
is not possible to specify different ``sysfs_extractor`` paths for
|
||||
different workloads). This restriction may be lifted in future
|
||||
versions of WA.
|
||||
|
||||
Disabling result processors and instrumentation
|
||||
-----------------------------------------------
|
||||
|
||||
As seen above, extensions specified with ``instrumentation`` and
|
||||
``result_processor`` clauses get added to those already specified previously.
|
||||
Just because an instrument specified in ``config.py`` is not listed in the
|
||||
``config`` section of the agenda, does not mean it will be disabled. If you do
|
||||
want to disable an instrument, you can always remove/comment it out from
|
||||
``config.py``. However that will be introducing a permanent configuration change
|
||||
to your environment (one that can be easily reverted, but may be just as
|
||||
easily forgotten). If you want to temporarily disable a result processor or an
|
||||
instrument for a particular run, you can do that in your agenda by prepending a
|
||||
tilde (``~``) to its name.
|
||||
|
||||
For example, let's say we want to disable ``cpufreq`` instrument enabled in our
|
||||
``config.py`` (suppose we're going to send results via email and so want to
|
||||
reduce to total size of the output directory):
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
instrumentation: [coreutil, ~cpufreq]
|
||||
coreutil:
|
||||
threshold: 80
|
||||
sysfs_extractor:
|
||||
paths: [/proc/meminfo]
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
instrumentation: [sysfs_extractor]
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
|
||||
Sections
|
||||
========
|
||||
|
||||
It is a common requirement to be able to run the same set of workloads under
|
||||
different device configurations. E.g. you may want to investigate impact of
|
||||
changing a particular setting to different values on the benchmark scores, or to
|
||||
quantify the impact of enabling a particular feature in the kernel. WA allows
|
||||
this by defining "sections" of configuration with an agenda.
|
||||
|
||||
For example, suppose what we really want, is to measure the impact of using
|
||||
interactive cpufreq governor vs the performance governor on the three
|
||||
benchmarks. We could create another three workload spec entries similar to the
|
||||
ones we already have and change the sysfile value being set to "interactive".
|
||||
However, this introduces a lot of duplication; and what if we want to change
|
||||
spec configuration? We would have to change it in multiple places, running the
|
||||
risk of forgetting one.
|
||||
|
||||
A better way is to keep the three workload specs and define a section for each
|
||||
governor:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
instrumentation: [coreutil, ~cpufreq]
|
||||
coreutil:
|
||||
threshold: 80
|
||||
sysfs_extractor:
|
||||
paths: [/proc/meminfo]
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
sections:
|
||||
- id: perf
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
- id: inter
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
instrumentation: [sysfs_extractor]
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
A section, just like an workload spec, needs to have a unique ID. Apart from
|
||||
that, a "section" is similar to the ``global`` section we've already seen --
|
||||
everything that goes into a section will be applied to each workload spec.
|
||||
Workload specs defined under top-level ``workloads`` entry will be executed for
|
||||
each of the sections listed under ``sections``.
|
||||
|
||||
.. note:: It is also possible to have a ``workloads`` entry within a section,
|
||||
in which case, those workloads will only be executed for that specific
|
||||
section.
|
||||
|
||||
In order to maintain the uniqueness requirement of workload spec IDs, they will
|
||||
be namespaced under each section by prepending the section ID to the spec ID
|
||||
with an under score. So in the agenda above, we no longer have a workload spec
|
||||
with ID ``01_dhry``, instead there are two specs with IDs ``perf_01_dhry`` and
|
||||
``inter_01_dhry``.
|
||||
|
||||
Note that the ``global`` section still applies to every spec in the agenda. So
|
||||
the precedence order is -- spec settings override section settings, which in
|
||||
turn override global settings.
|
||||
|
||||
|
||||
Other Configuration
|
||||
===================
|
||||
|
||||
.. _configuration_in_agenda:
|
||||
|
||||
As mentioned previously, ``config`` section in an agenda can contain anything
|
||||
that can be defined in ``config.py`` (with Python syntax translated to the
|
||||
equivalent YAML). Certain configuration (e.g. ``run_name``) makes more sense
|
||||
to define in an agenda than a config file. Refer to the
|
||||
:ref:`configuration-specification` section for details.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
project: governor_comparison
|
||||
run_name: performance_vs_interactive
|
||||
|
||||
device: generic_android
|
||||
reboot_policy: never
|
||||
|
||||
instrumentation: [coreutil, ~cpufreq]
|
||||
coreutil:
|
||||
threshold: 80
|
||||
sysfs_extractor:
|
||||
paths: [/proc/meminfo]
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
sections:
|
||||
- id: perf
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
- id: inter
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
instrumentation: [sysfs_extractor]
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
@ -1,9 +0,0 @@
|
||||
Workload Automation API
|
||||
=======================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
api/output
|
||||
|
||||
api/workload
|
@ -1,696 +0,0 @@
|
||||
.. _output_processing_api:
|
||||
|
||||
Output
|
||||
======
|
||||
|
||||
A WA output directory can be accessed via a :class:`RunOutput` object. There are
|
||||
two ways of getting one -- either instantiate it with a path to a WA output
|
||||
directory, or use :func:`discover_wa_outputs` to traverse a directory tree
|
||||
iterating over all WA output directories found.
|
||||
|
||||
.. function:: discover_wa_outputs(path)
|
||||
|
||||
Recursively traverse ``path`` looking for WA output directories. Return
|
||||
an iterator over :class:`RunOutput` objects for each discovered output.
|
||||
|
||||
:param path: The directory to scan for WA output
|
||||
|
||||
|
||||
.. class:: RunOutput(path)
|
||||
|
||||
The main interface into a WA output directory.
|
||||
|
||||
:param path: must be the path to the top-level output directory (the one
|
||||
containing ``__meta`` subdirectory and ``run.log``).
|
||||
|
||||
WA output stored in a Postgres database by the ``Postgres`` output processor
|
||||
can be accessed via a :class:`RunDatabaseOutput` which can be initialized as follows:
|
||||
|
||||
.. class:: RunDatabaseOutput(password, host='localhost', user='postgres', port='5432', dbname='wa', run_uuid=None, list_runs=False)
|
||||
|
||||
The main interface into Postgres database containing WA results.
|
||||
|
||||
:param password: The password used to authenticate with
|
||||
:param host: The database host address. Defaults to ``'localhost'``
|
||||
:param user: The user name used to authenticate with. Defaults to ``'postgres'``
|
||||
:param port: The database connection port number. Defaults to ``'5432'``
|
||||
:param dbname: The database name. Defaults to ``'wa'``
|
||||
:param run_uuid: The ``run_uuid`` to identify the selected run
|
||||
:param list_runs: Will connect to the database and will print out the available runs
|
||||
with their corresponding run_uuids. Defaults to ``False``
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
.. seealso:: :ref:`processing_output`
|
||||
|
||||
To demonstrate how we can use the output API if we have an existing WA output
|
||||
called ``wa_output`` in the current working directory we can initialize a
|
||||
``RunOutput`` as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
In [1]: from wa import RunOutput
|
||||
...:
|
||||
...: output_directory = 'wa_output'
|
||||
...: run_output = RunOutput(output_directory)
|
||||
|
||||
Alternatively if the results have been stored in a Postgres database we can
|
||||
initialize a ``RunDatabaseOutput`` as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
In [1]: from wa import RunDatabaseOutput
|
||||
...:
|
||||
...: db_settings = {
|
||||
...: host: 'localhost',
|
||||
...: port: '5432',
|
||||
...: dbname: 'wa'
|
||||
...: user: 'postgres',
|
||||
...: password: 'wa'
|
||||
...: }
|
||||
...:
|
||||
...: RunDatabaseOutput(list_runs=True, **db_settings)
|
||||
Available runs are:
|
||||
========= ============ ============= =================== =================== ====================================
|
||||
Run Name Project Project Stage Start Time End Time run_uuid
|
||||
========= ============ ============= =================== =================== ====================================
|
||||
Test Run my_project None 2018-11-29 14:53:08 2018-11-29 14:53:24 aa3077eb-241a-41d3-9610-245fd4e552a9
|
||||
run_1 my_project None 2018-11-29 14:53:34 2018-11-29 14:53:37 4c2885c9-2f4a-49a1-bbc5-b010f8d6b12a
|
||||
========= ============ ============= =================== =================== ====================================
|
||||
|
||||
In [2]: run_uuid = '4c2885c9-2f4a-49a1-bbc5-b010f8d6b12a'
|
||||
...: run_output = RunDatabaseOutput(run_uuid=run_uuid, **db_settings)
|
||||
|
||||
|
||||
From here we can retrieve various information about the run. For example if we
|
||||
want to see what the overall status of the run was, along with the runtime
|
||||
parameters and the metrics recorded from the first job was we can do the following:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
In [2]: run_output.status
|
||||
Out[2]: OK(7)
|
||||
|
||||
# List all of the jobs for the run
|
||||
In [3]: run_output.jobs
|
||||
Out[3]:
|
||||
[<wa.framework.output.JobOutput at 0x7f70358a1f10>,
|
||||
<wa.framework.output.JobOutput at 0x7f70358a1150>,
|
||||
<wa.framework.output.JobOutput at 0x7f7035862810>,
|
||||
<wa.framework.output.JobOutput at 0x7f7035875090>]
|
||||
|
||||
# Examine the first job that was ran
|
||||
In [4]: job_1 = run_output.jobs[0]
|
||||
|
||||
In [5]: job_1.label
|
||||
Out[5]: u'dhrystone'
|
||||
|
||||
# Print out all the runtime parameters and their values for this job
|
||||
In [6]: for k, v in job_1.spec.runtime_parameters.items():
|
||||
...: print (k, v)
|
||||
(u'airplane_mode': False)
|
||||
(u'brightness': 100)
|
||||
(u'governor': 'userspace')
|
||||
(u'big_frequency': 1700000)
|
||||
(u'little_frequency': 1400000)
|
||||
|
||||
# Print out all the metrics available for this job
|
||||
In [7]: job_1.metrics
|
||||
Out[7]:
|
||||
[<thread 0 score: 14423105 (+)>,
|
||||
<thread 0 DMIPS: 8209 (+)>,
|
||||
<thread 1 score: 14423105 (+)>,
|
||||
<thread 1 DMIPS: 8209 (+)>,
|
||||
<thread 2 score: 14423105 (+)>,
|
||||
<thread 2 DMIPS: 8209 (+)>,
|
||||
<thread 3 score: 18292638 (+)>,
|
||||
<thread 3 DMIPS: 10411 (+)>,
|
||||
<thread 4 score: 17045532 (+)>,
|
||||
<thread 4 DMIPS: 9701 (+)>,
|
||||
<thread 5 score: 14150917 (+)>,
|
||||
<thread 5 DMIPS: 8054 (+)>,
|
||||
<time: 0.184497 seconds (-)>,
|
||||
<total DMIPS: 52793 (+)>,
|
||||
<total score: 92758402 (+)>]
|
||||
|
||||
# Load the run results csv file into pandas
|
||||
In [7]: pd.read_csv(run_output.get_artifact_path('run_result_csv'))
|
||||
Out[7]:
|
||||
id workload iteration metric value units
|
||||
0 450000-wk1 dhrystone 1 thread 0 score 1.442310e+07 NaN
|
||||
1 450000-wk1 dhrystone 1 thread 0 DMIPS 8.209700e+04 NaN
|
||||
2 450000-wk1 dhrystone 1 thread 1 score 1.442310e+07 NaN
|
||||
3 450000-wk1 dhrystone 1 thread 1 DMIPS 8.720900e+04 NaN
|
||||
...
|
||||
|
||||
|
||||
We can also retrieve information about the target that the run was performed on
|
||||
for example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Print out the target's abi:
|
||||
In [9]: run_output.target_info.abi
|
||||
Out[9]: u'arm64'
|
||||
|
||||
# The os the target was running
|
||||
In [9]: run_output.target_info.os
|
||||
Out[9]: u'android'
|
||||
|
||||
# And other information about the os version
|
||||
In [10]: run_output.target_info.os_version
|
||||
Out[10]:
|
||||
OrderedDict([(u'all_codenames', u'REL'),
|
||||
(u'incremental', u'3687331'),
|
||||
(u'preview_sdk', u'0'),
|
||||
(u'base_os', u''),
|
||||
(u'release', u'7.1.1'),
|
||||
(u'codename', u'REL'),
|
||||
(u'security_patch', u'2017-03-05'),
|
||||
(u'sdk', u'25')])
|
||||
|
||||
|
||||
|
||||
:class:`RunOutput`
|
||||
------------------
|
||||
|
||||
:class:`RunOutput` provides access to the output of a WA :term:`run`, including metrics,
|
||||
artifacts, metadata, and configuration. It has the following attributes:
|
||||
|
||||
|
||||
``jobs``
|
||||
A list of :class:`JobOutput` objects for each job that was executed during
|
||||
the run.
|
||||
|
||||
``status``
|
||||
Run status. This indicates whether the run has completed without problems
|
||||
(``Status.OK``) or if there were issues.
|
||||
|
||||
``metrics``
|
||||
A list of :class:`Metric`\ s for the run.
|
||||
|
||||
.. note:: these are *overall run* metrics only. Metrics for individual
|
||||
jobs are contained within the corresponding :class:`JobOutput`\ s.
|
||||
|
||||
``artifacts``
|
||||
A list of :class:`Artifact`\ s for the run. These are usually backed by a
|
||||
file and can contain traces, raw data, logs, etc.
|
||||
|
||||
.. note:: these are *overall run* artifacts only. Artifacts for individual
|
||||
jobs are contained within the corresponding :class:`JobOutput`\ s.
|
||||
|
||||
``info``
|
||||
A :ref:`RunInfo <run-info-api>` object that contains information about the run
|
||||
itself for example it's duration, name, uuid etc.
|
||||
|
||||
``target_info``
|
||||
A :ref:`TargetInfo <target-info-api>` object which can be used to access
|
||||
various information about the target that was used during the run for example
|
||||
it's ``abi``, ``hostname``, ``os`` etc.
|
||||
|
||||
``run_config``
|
||||
A :ref:`RunConfiguration <run-configuration>` object that can be used to
|
||||
access all the configuration of the run itself, for example the
|
||||
``reboot_policy``, ``execution_order``, ``device_config`` etc.
|
||||
|
||||
``classifiers``
|
||||
:ref:`classifiers <classifiers>` defined for the entire run.
|
||||
|
||||
``metadata``
|
||||
:ref:`metadata <metadata>` associated with the run.
|
||||
|
||||
``events``
|
||||
A list of any events logged during the run, that are not associated with a
|
||||
particular job.
|
||||
|
||||
``event_summary``
|
||||
A condensed summary of any events that occurred during the run.
|
||||
|
||||
``augmentations``
|
||||
A list of the :term:`augmentation`\ s that were enabled during the run (these
|
||||
augmentations may or may not have been active for a particular job).
|
||||
|
||||
``basepath``
|
||||
A (relative) path to the WA output directory backing this object.
|
||||
|
||||
|
||||
methods
|
||||
~~~~~~~
|
||||
|
||||
.. method:: RunOutput.get_artifact(name)
|
||||
|
||||
Return the :class:`Artifact` specified by ``name``. This will only look
|
||||
at the run artifacts; this will not search the artifacts of the individual
|
||||
jobs.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:return: The :class:`Artifact` with that name
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
|
||||
.. method:: RunOutput.get_artifact_path(name)
|
||||
|
||||
Return the path to the file backing the artifact specified by ``name``. This
|
||||
will only look at the run artifacts; this will not search the artifacts of
|
||||
the individual jobs.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:return: The path to the artifact
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
|
||||
.. method:: RunOutput.get_metric(name)
|
||||
|
||||
Return the :class:`Metric` associated with the run (not the individual jobs)
|
||||
with the specified `name`.
|
||||
|
||||
:return: The :class:`Metric` object for the metric with the specified name.
|
||||
|
||||
|
||||
.. method:: RunOutput.get_job_spec(spec_id)
|
||||
|
||||
Return the :class:`JobSpec` with the specified `spec_id`. A :term:`spec`
|
||||
describes the job to be executed. Each :class:`Job` has an associated
|
||||
:class:`JobSpec`, though a single :term:`spec` can be associated with
|
||||
multiple :term:`job`\ s (If the :term:`spec` specifies multiple iterations).
|
||||
|
||||
.. method:: RunOutput.list_workloads()
|
||||
|
||||
List unique workload labels that featured in this run. The labels will be
|
||||
in the order in which they first ran.
|
||||
|
||||
:return: A list of `str` labels of workloads that were part of this run.
|
||||
|
||||
|
||||
.. method:: RunOutput.add_classifier(name, value, overwrite=False)
|
||||
|
||||
Add a classifier to the run as a whole. If a classifier with the specified
|
||||
``name`` already exists, a``ValueError`` will be raised, unless
|
||||
`overwrite=True` is specified.
|
||||
|
||||
|
||||
:class:`RunDatabaseOutput`
|
||||
---------------------------
|
||||
|
||||
:class:`RunDatabaseOutput` provides access to the output of a WA :term:`run`,
|
||||
including metrics,artifacts, metadata, and configuration stored in a postgres database.
|
||||
The majority of attributes and methods are the same :class:`RunOutput` however the
|
||||
noticeable differences are:
|
||||
|
||||
``jobs``
|
||||
A list of :class:`JobDatabaseOutput` objects for each job that was executed
|
||||
during the run.
|
||||
|
||||
``basepath``
|
||||
A representation of the current database and host information backing this object.
|
||||
|
||||
methods
|
||||
~~~~~~~
|
||||
|
||||
.. method:: RunDatabaseOutput.get_artifact(name)
|
||||
|
||||
Return the :class:`Artifact` specified by ``name``. This will only look
|
||||
at the run artifacts; this will not search the artifacts of the individual
|
||||
jobs. The `path` attribute of the :class:`Artifact` will be set to the Database OID of the object.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:return: The :class:`Artifact` with that name
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
|
||||
.. method:: RunDatabaseOutput.get_artifact_path(name)
|
||||
|
||||
If the artifcat is a file this method returns a `StringIO` object containing
|
||||
the contents of the artifact specified by ``name``. If the aritifcat is a
|
||||
directory, the method returns a path to a locally extracted version of the
|
||||
directory which is left to the user to remove after use. This will only look
|
||||
at the run artifacts; this will not search the artifacts of the individual
|
||||
jobs.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:return: A `StringIO` object with the contents of the artifact
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
|
||||
:class:`JobOutput`
|
||||
------------------
|
||||
|
||||
:class:`JobOutput` provides access to the output of a single :term:`job`
|
||||
executed during a WA :term:`run`, including metrics,
|
||||
artifacts, metadata, and configuration. It has the following attributes:
|
||||
|
||||
``status``
|
||||
Job status. This indicates whether the job has completed without problems
|
||||
(``Status.OK``) or if there were issues.
|
||||
|
||||
.. note:: Under typical configuration, WA will make a number of attempts to
|
||||
re-run a job in case of issue. This status (and the rest of the
|
||||
output) will represent the the latest attempt. I.e. a
|
||||
``Status.OK`` indicates that the latest attempt was successful,
|
||||
but it does mean that there weren't prior failures. You can check
|
||||
the ``retry`` attribute (see below) to whether this was the first
|
||||
attempt or not.
|
||||
|
||||
``retry``
|
||||
Retry number for this job. If a problem is detected during job execution, the
|
||||
job will be re-run up to :confval:`max_retries` times. This indicates the
|
||||
final retry number for the output. A value of ``0`` indicates that the job
|
||||
succeeded on the first attempt, and no retries were necessary.
|
||||
|
||||
.. note:: Outputs for previous attempts are moved into ``__failed``
|
||||
subdirectory of WA output. These are currently not exposed via the
|
||||
API.
|
||||
|
||||
``id``
|
||||
The ID of the :term:`spec` associated with with job. This ID is unique to
|
||||
the spec, but not necessary to the job -- jobs representing multiple
|
||||
iterations of the same spec will share the ID.
|
||||
|
||||
``iteration``
|
||||
The iteration number of this job. Together with the ``id`` (above), this
|
||||
uniquely identifies a job with a run.
|
||||
|
||||
``label``
|
||||
The workload label associated with this job. Usually, this will be the name
|
||||
or :term:`alias` of the workload, however maybe overwritten by the user in
|
||||
the :term:`agenda`.
|
||||
|
||||
``metrics``
|
||||
A list of :class:`Metric`\ s for the job.
|
||||
|
||||
``artifacts``
|
||||
A list of :class:`Artifact`\ s for the job These are usually backed by a
|
||||
file and can contain traces, raw data, logs, etc.
|
||||
|
||||
``classifiers``
|
||||
:ref:`classifiers <classifiers>` defined for the job.
|
||||
|
||||
``metadata``
|
||||
:ref:`metadata <metadata>` associated with the job.
|
||||
|
||||
``events``
|
||||
A list of any events logged during the execution of the job.
|
||||
|
||||
``event_summary``
|
||||
A condensed summary of any events that occurred during the execution of the
|
||||
job.
|
||||
|
||||
``augmentations``
|
||||
A list of the :term:`augmentation`\ s that were enabled for this job. This may
|
||||
be different from overall augmentations specified for the run, as they may be
|
||||
enabled/disabled on per-job basis.
|
||||
|
||||
``basepath``
|
||||
A (relative) path to the WA output directory backing this object.
|
||||
|
||||
|
||||
methods
|
||||
~~~~~~~
|
||||
|
||||
.. method:: JobOutput.get_artifact(name)
|
||||
|
||||
Return the :class:`Artifact` specified by ``name`` associated with this job.
|
||||
|
||||
:param name: The name of the artifact to retrieve.
|
||||
:return: The :class:`Artifact` with that name
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
.. method:: JobOutput.get_artifact_path(name)
|
||||
|
||||
Return the path to the file backing the artifact specified by ``name``,
|
||||
associated with this job.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:return: The path to the artifact
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
.. method:: JobOutput.get_metric(name)
|
||||
|
||||
Return the :class:`Metric` associated with this job with the specified
|
||||
`name`.
|
||||
|
||||
:return: The :class:`Metric` object for the metric with the specified name.
|
||||
|
||||
.. method:: JobOutput.add_classifier(name, value, overwrite=False)
|
||||
|
||||
Add a classifier to the job. The classifier will be propagated to all
|
||||
existing artifacts and metrics, as well as those added afterwards. If a
|
||||
classifier with the specified ``name`` already exists, a ``ValueError`` will
|
||||
be raised, unless `overwrite=True` is specified.
|
||||
|
||||
|
||||
:class:`JobDatabaseOutput`
|
||||
---------------------------
|
||||
|
||||
:class:`JobOutput` provides access to the output of a single :term:`job`
|
||||
executed during a WA :term:`run`, including metrics, artifacts, metadata, and
|
||||
configuration stored in a postgres database.
|
||||
The majority of attributes and methods are the same :class:`JobOutput` however the
|
||||
noticeable differences are:
|
||||
|
||||
``basepath``
|
||||
A representation of the current database and host information backing this object.
|
||||
|
||||
|
||||
methods
|
||||
~~~~~~~
|
||||
|
||||
.. method:: JobDatabaseOutput.get_artifact(name)
|
||||
|
||||
Return the :class:`Artifact` specified by ``name`` associated with this job.
|
||||
The `path` attribute of the :class:`Artifact` will be set to the Database
|
||||
OID of the object.
|
||||
|
||||
:param name: The name of the artifact to retrieve.
|
||||
:return: The :class:`Artifact` with that name
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
.. method:: JobDatabaseOutput.get_artifact_path(name)
|
||||
|
||||
If the artifcat is a file this method returns a `StringIO` object containing
|
||||
the contents of the artifact specified by ``name`` associated with this job.
|
||||
If the aritifcat is a directory, the method returns a path to a locally
|
||||
extracted version of the directory which is left to the user to remove after
|
||||
use.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:return: A `StringIO` object with the contents of the artifact
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
|
||||
:class:`Metric`
|
||||
---------------
|
||||
|
||||
A metric represent a single numerical measurement/score collected as a result of
|
||||
running the workload. It would be generated either by the workload or by one of
|
||||
the augmentations active during the execution of the workload.
|
||||
|
||||
A :class:`Metric` has the following attributes:
|
||||
|
||||
``name``
|
||||
The name of the metric.
|
||||
|
||||
.. note:: A name of the metric is not necessarily unique, even for the same
|
||||
job. Some workloads internally run multiple sub-tests, each
|
||||
generating a metric with the same name. In such cases,
|
||||
:term:`classifier`\ s are used to distinguish between them.
|
||||
|
||||
``value``
|
||||
The value of the metrics collected.
|
||||
|
||||
|
||||
``units``
|
||||
The units of the metrics. This maybe ``None`` if the metric has no units.
|
||||
|
||||
|
||||
``lower_is_better``
|
||||
The default assumption is that higher metric values are better. This may be
|
||||
overridden by setting this to ``True``, e.g. if metrics such as "run time"
|
||||
or "latency". WA does not use this internally (at the moment) but this may
|
||||
be used by external parties to sensibly process WA results in a generic way.
|
||||
|
||||
|
||||
``classifiers``
|
||||
These can be user-defined :term:`classifier`\ s propagated from the job/run,
|
||||
or they may have been added by the workload to help distinguish between
|
||||
otherwise identical metrics.
|
||||
|
||||
``label``
|
||||
This is a string constructed from the name and classifiers, to provide a
|
||||
more unique identifier, e.g. for grouping values across iterations. The
|
||||
format is in the form ``name/cassifier1=value1/classifier2=value2/...``.
|
||||
|
||||
|
||||
:class:`Artifact`
|
||||
-----------------
|
||||
|
||||
An artifact is a file that is created on the host as part of executing a
|
||||
workload. This could be trace, logging, raw output, or pretty much anything
|
||||
else. Pretty much every file under WA output directory that is not already
|
||||
represented by some other framework object will have an :class:`Artifact`
|
||||
associated with it.
|
||||
|
||||
An :class:`Artifact` has the following attributes:
|
||||
|
||||
|
||||
``name``
|
||||
The name of this artifact. This will be unique for the job/run (unlike
|
||||
metric names). This is intended as a consistent "handle" for this artifact.
|
||||
The actual file name for the artifact may vary from job to job (e.g. some
|
||||
benchmarks that create files with results include timestamps in the file
|
||||
names), however the name will always be the same.
|
||||
|
||||
``path``
|
||||
Partial path to the file associated with this artifact. Often, this is just
|
||||
the file name. To get the complete path that maybe used to access the file,
|
||||
use :func:`get_artifact_path` of the corresponding output object.
|
||||
|
||||
|
||||
``kind``
|
||||
Describes the nature of this artifact to facilitate generic processing.
|
||||
Possible kinds are:
|
||||
|
||||
:log: A log file. Not part of the "output" as such but contains
|
||||
information about the run/workload execution that be useful for
|
||||
diagnostics/meta analysis.
|
||||
:meta: A file containing metadata. This is not part of the "output", but
|
||||
contains information that may be necessary to reproduce the
|
||||
results (contrast with ``log`` artifacts which are *not*
|
||||
necessary).
|
||||
:data: This file contains new data, not available otherwise and should
|
||||
be considered part of the "output" generated by WA. Most traces
|
||||
would fall into this category.
|
||||
:export: Exported version of results or some other artifact. This
|
||||
signifies that this artifact does not contain any new data
|
||||
that is not available elsewhere and that it may be safely
|
||||
discarded without losing information.
|
||||
:raw: Signifies that this is a raw dump/log that is normally processed
|
||||
to extract useful information and is then discarded. In a sense,
|
||||
it is the opposite of ``export``, but in general may also be
|
||||
discarded.
|
||||
|
||||
.. note:: Whether a file is marked as ``log``/``data`` or ``raw``
|
||||
depends on how important it is to preserve this file,
|
||||
e.g. when archiving, vs how much space it takes up.
|
||||
Unlike ``export`` artifacts which are (almost) always
|
||||
ignored by other exporters as that would never result
|
||||
in data loss, ``raw`` files *may* be processed by
|
||||
exporters if they decided that the risk of losing
|
||||
potentially (though unlikely) useful data is greater
|
||||
than the time/space cost of handling the artifact (e.g.
|
||||
a database uploader may choose to ignore ``raw``
|
||||
artifacts, where as a network filer archiver may choose
|
||||
to archive them).
|
||||
|
||||
.. note:: The kind parameter is intended to represent the logical
|
||||
function of a particular artifact, not it's intended means of
|
||||
processing -- this is left entirely up to the output
|
||||
processors.
|
||||
|
||||
``description``
|
||||
This may be used by the artifact's creator to provide additional free-form
|
||||
information about the artifact. In practice, this is often ``None``
|
||||
|
||||
|
||||
``classifiers``
|
||||
Job- and run-level :term:`classifier`\ s will be propagated to the artifact.
|
||||
|
||||
|
||||
Additional run info
|
||||
-------------------
|
||||
|
||||
:class:`RunOutput` object has ``target_info`` and ``run_info`` attributes that
|
||||
contain structures that provide additional information about the run and device.
|
||||
|
||||
.. _target-info-api:
|
||||
|
||||
:class:`TargetInfo`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The :class:`TargetInfo` class presents various pieces of information about the
|
||||
target device. An instance of this class will be instantiated and populated
|
||||
automatically from the devlib `target
|
||||
<http://devlib.readthedocs.io/en/latest/target.html>`_ created during a WA run
|
||||
and serialized to a json file as part of the metadata exported
|
||||
by WA at the end of a run.
|
||||
|
||||
The available attributes of the class are as follows:
|
||||
|
||||
``target``
|
||||
The name of the target class that was uised ot interact with the device
|
||||
during the run E.g. ``"AndroidTarget"``, ``"LinuxTarget"`` etc.
|
||||
|
||||
``modules``
|
||||
A list of names of modules that have been loaded by the target. Modules
|
||||
provide additional functionality, such as access to ``cpufreq`` and which
|
||||
modules are installed may impact how much of the ``TargetInfo`` has been
|
||||
populated.
|
||||
|
||||
``cpus``
|
||||
A list of :class:`CpuInfo` objects describing the capabilities of each CPU.
|
||||
|
||||
``os``
|
||||
A generic name of the OS the target was running (e.g. ``"android"``).
|
||||
|
||||
``os_version``
|
||||
A dict that contains a mapping of OS version elements to their values. This
|
||||
mapping is OS-specific.
|
||||
|
||||
``abi``
|
||||
The ABI of the target device.
|
||||
|
||||
``hostname``
|
||||
The hostname of the the device the run was executed on.
|
||||
|
||||
``is_rooted``
|
||||
A boolean value specifying whether root was detected on the device.
|
||||
|
||||
``kernel_version``
|
||||
The version of the kernel on the target device. This returns a
|
||||
:class:`KernelVersion` instance that has separate version and release
|
||||
fields.
|
||||
|
||||
``kernel_config``
|
||||
A :class:`KernelConfig` instance that contains parsed kernel config from the
|
||||
target device. This may be ``None`` if the kernel config could not be
|
||||
extracted.
|
||||
|
||||
``sched_features``
|
||||
A list of the available tweaks to the scheduler, if available from the
|
||||
device.
|
||||
|
||||
``hostid``
|
||||
The unique identifier of the particular device the WA run was executed on.
|
||||
|
||||
|
||||
.. _run-info-api:
|
||||
|
||||
:class:`RunInfo`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
The :class:`RunInfo` provides general run information. It has the following
|
||||
attributes:
|
||||
|
||||
|
||||
``uuid``
|
||||
A unique identifier for that particular run.
|
||||
|
||||
``run_name``
|
||||
The name of the run (if provided)
|
||||
|
||||
``project``
|
||||
The name of the project the run belongs to (if provided)
|
||||
|
||||
``project_stage``
|
||||
The project stage the run is associated with (if provided)
|
||||
|
||||
``duration``
|
||||
The length of time the run took to complete.
|
||||
|
||||
``start_time``
|
||||
The time the run was stared.
|
||||
|
||||
``end_time``
|
||||
The time at which the run finished.
|
@ -1,302 +0,0 @@
|
||||
.. _workloads-api:
|
||||
|
||||
Workloads
|
||||
~~~~~~~~~
|
||||
.. _workload-api:
|
||||
|
||||
Workload
|
||||
^^^^^^^^
|
||||
|
||||
The base :class:`Workload` interface is as follows, and is the base class for
|
||||
all :ref:`workload types <workload-types>`. For more information about to
|
||||
implement your own workload please see the
|
||||
:ref:`Developer How Tos <adding-a-workload-example>`.
|
||||
|
||||
All instances of a workload will have the following attributes:
|
||||
|
||||
``name``
|
||||
This identifies the workload (e.g. it is used to specify the
|
||||
workload in the :ref:`agenda <agenda>`).
|
||||
|
||||
``phones_home``
|
||||
This can be set to True to mark that this workload poses a risk of
|
||||
exposing information to the outside world about the device it runs on.
|
||||
For example a benchmark application that sends scores and device data
|
||||
to a database owned by the maintainer.
|
||||
|
||||
``requires_network``
|
||||
Set this to ``True`` to mark the the workload will fail without a network
|
||||
connection, this enables it to fail early with a clear message.
|
||||
|
||||
``asset_directory``
|
||||
Set this to specify a custom directory for assets to be pushed to, if
|
||||
unset the working directory will be used.
|
||||
|
||||
``asset_files``
|
||||
This can be used to automatically deploy additional assets to
|
||||
the device. If required the attribute should contain a list of file
|
||||
names that are required by the workload which will be attempted to be
|
||||
found by the resource getters
|
||||
|
||||
methods
|
||||
"""""""
|
||||
|
||||
.. method:: Workload.init_resources(context)
|
||||
|
||||
This method may be optionally overridden to implement dynamic
|
||||
resource discovery for the workload. This method executes
|
||||
early on, before the device has been initialized, so it
|
||||
should only be used to initialize resources that do not
|
||||
depend on the device to resolve. This method is executed
|
||||
once per run for each workload instance.
|
||||
|
||||
:param context: The :ref:`Context <context>` for the current run.
|
||||
|
||||
|
||||
.. method:: Workload.validate(context)
|
||||
|
||||
This method can be used to validate any assumptions your workload
|
||||
makes about the environment (e.g. that required files are
|
||||
present, environment variables are set, etc) and should raise a
|
||||
:class:`wa.WorkloadError <wa.framework.exception.WorkloadError>`
|
||||
if that is not the case. The base class implementation only makes
|
||||
sure sure that the name attribute has been set.
|
||||
|
||||
:param context: The :ref:`Context <context>` for the current run.
|
||||
|
||||
|
||||
.. method:: Workload.initialize(context)
|
||||
|
||||
This method is decorated with the ``@once_per_instance`` decorator,
|
||||
(for more information please see
|
||||
:ref:`Execution Decorators <execution-decorators>`)
|
||||
therefore it will be executed exactly once per run (no matter
|
||||
how many instances of the workload there are). It will run
|
||||
after the device has been initialized, so it may be used to
|
||||
perform device-dependent initialization that does not need to
|
||||
be repeated on each iteration (e.g. as installing executables
|
||||
required by the workload on the device).
|
||||
|
||||
:param context: The :ref:`Context <context>` for the current run.
|
||||
|
||||
|
||||
.. method:: Workload.setup(context)
|
||||
|
||||
Everything that needs to be in place for workload execution should
|
||||
be done in this method. This includes copying files to the device,
|
||||
starting up an application, configuring communications channels,
|
||||
etc.
|
||||
|
||||
:param context: The :ref:`Context <context>` for the current run.
|
||||
|
||||
|
||||
.. method:: Workload.setup_rerun(context)
|
||||
|
||||
Everything that needs to be in place for workload execution should
|
||||
be done in this method. This includes copying files to the device,
|
||||
starting up an application, configuring communications channels,
|
||||
etc.
|
||||
|
||||
:param context: The :ref:`Context <context>` for the current run.
|
||||
|
||||
|
||||
.. method:: Workload.run(context)
|
||||
|
||||
This method should perform the actual task that is being measured.
|
||||
When this method exits, the task is assumed to be complete.
|
||||
|
||||
:param context: The :ref:`Context <context>` for the current run.
|
||||
|
||||
.. note:: Instruments are kicked off just before calling this
|
||||
method and disabled right after, so everything in this
|
||||
method is being measured. Therefore this method should
|
||||
contain the least code possible to perform the operations
|
||||
you are interested in measuring. Specifically, things like
|
||||
installing or starting applications, processing results, or
|
||||
copying files to/from the device should be done elsewhere if
|
||||
possible.
|
||||
|
||||
|
||||
|
||||
.. method:: Workload.extract_results(context)
|
||||
|
||||
This method gets invoked after the task execution has finished and
|
||||
should be used to extract metrics from the target.
|
||||
|
||||
:param context: The :ref:`Context <context>` for the current run.
|
||||
|
||||
|
||||
.. method:: Workload.update_output(context)
|
||||
|
||||
This method should be used to update the output within the specified
|
||||
execution context with the metrics and artifacts from this
|
||||
workload iteration.
|
||||
|
||||
:param context: The :ref:`Context <context>` for the current run.
|
||||
|
||||
|
||||
.. method:: Workload.teardown(context)
|
||||
|
||||
This could be used to perform any cleanup you may wish to do, e.g.
|
||||
Uninstalling applications, deleting file on the device, etc.
|
||||
|
||||
:param context: The :ref:`Context <context>` for the current run.
|
||||
|
||||
|
||||
.. method:: Workload.finalize(context)
|
||||
|
||||
This is the complement to ``initialize``. This will be executed
|
||||
exactly once at the end of the run. This should be used to
|
||||
perform any final clean up (e.g. uninstalling binaries installed
|
||||
in the ``initialize``)
|
||||
|
||||
:param context: The :ref:`Context <context>` for the current run.
|
||||
|
||||
.. _apkworkload-api:
|
||||
|
||||
ApkWorkload
|
||||
^^^^^^^^^^^^
|
||||
|
||||
The :class:`ApkWorkload` derives from the base :class:`Workload` class however
|
||||
this associates the workload with a package allowing for an apk to be found for
|
||||
the workload, setup and ran on the device before running the workload.
|
||||
|
||||
In addition to the attributes mentioned above ApkWorloads this class also
|
||||
features the following attributes however this class does not present any new
|
||||
methods.
|
||||
|
||||
|
||||
``loading_time``
|
||||
This is the time in seconds that WA will wait for the application to load
|
||||
before continuing with the run. By default this will wait 10 second however
|
||||
if your application under test requires additional time this values should
|
||||
be increased.
|
||||
|
||||
``package_names``
|
||||
This attribute should be a list of Apk packages names that are
|
||||
suitable for this workload. Both the host (in the relevant resource
|
||||
locations) and device will be searched for an application with a matching
|
||||
package name.
|
||||
|
||||
``supported_versions``
|
||||
This attribute should be a list of apk versions that are suitable for this
|
||||
workload, if a specific apk version is not specified then any available
|
||||
supported version may be chosen.
|
||||
|
||||
``activity``
|
||||
This attribute can be optionally set to override the default activity that
|
||||
will be extracted from the selected APK file which will be used when
|
||||
launching the APK.
|
||||
|
||||
``view``
|
||||
This is the "view" associated with the application. This is used by
|
||||
instruments like ``fps`` to monitor the current framerate being generated by
|
||||
the application.
|
||||
|
||||
``apk``
|
||||
The is a :class:`PackageHandler`` which is what is used to store
|
||||
information about the apk and manage the application itself, the handler is
|
||||
used to call the associated methods to manipulate the application itself for
|
||||
example to launch/close it etc.
|
||||
|
||||
``package``
|
||||
This is a more convenient way to access the package name of the Apk
|
||||
that was found and being used for the run.
|
||||
|
||||
|
||||
.. _apkuiautoworkload-api:
|
||||
|
||||
ApkUiautoWorkload
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
The :class:`ApkUiautoWorkload` derives from :class:`ApkUIWorkload` which is an
|
||||
intermediate class which in turn inherits from
|
||||
:class:`ApkWorkload`, however in addition to associating an apk with the
|
||||
workload this class allows for automating the application with UiAutomator.
|
||||
|
||||
This class define these additional attributes:
|
||||
|
||||
``gui``
|
||||
This attribute will be an instance of a :class:`UiAutmatorGUI` which is
|
||||
used to control the automation, and is what is used to pass parameters to the
|
||||
java class for example ``gui.uiauto_params``.
|
||||
|
||||
|
||||
.. _apkreventworkload-api:
|
||||
|
||||
ApkReventWorkload
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
The :class:`ApkReventWorkload` derives from :class:`ApkUIWorkload` which is an
|
||||
intermediate class which in turn inherits from
|
||||
:class:`ApkWorkload`, however in addition to associating an apk with the
|
||||
workload this class allows for automating the application with
|
||||
:ref:`Revent <revent_files_creation>`.
|
||||
|
||||
This class define these additional attributes:
|
||||
|
||||
``gui``
|
||||
This attribute will be an instance of a :class:`ReventGUI` which is
|
||||
used to control the automation
|
||||
|
||||
``setup_timeout``
|
||||
This is the time allowed for replaying a recording for the setup stage.
|
||||
|
||||
``run_timeout``
|
||||
This is the time allowed for replaying a recording for the run stage.
|
||||
|
||||
``extract_results_timeout``
|
||||
This is the time allowed for replaying a recording for the extract results stage.
|
||||
|
||||
``teardown_timeout``
|
||||
This is the time allowed for replaying a recording for the teardown stage.
|
||||
|
||||
|
||||
.. _uiautoworkload-api:
|
||||
|
||||
UiautoWorkload
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
The :class:`UiautoWorkload` derives from :class:`UIWorkload` which is an
|
||||
intermediate class which in turn inherits from
|
||||
:class:`Workload`, however this allows for providing generic automation using
|
||||
UiAutomator without associating a particular application with the workload.
|
||||
|
||||
This class define these additional attributes:
|
||||
|
||||
``gui``
|
||||
This attribute will be an instance of a :class:`UiAutmatorGUI` which is
|
||||
used to control the automation, and is what is used to pass parameters to the
|
||||
java class for example ``gui.uiauto_params``.
|
||||
|
||||
|
||||
.. _reventworkload-api:
|
||||
|
||||
ReventWorkload
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
The :class:`ReventWorkload` derives from :class:`UIWorkload` which is an
|
||||
intermediate class which in turn inherits from
|
||||
:class:`Workload`, however this allows for providing generic automation
|
||||
using :ref:`Revent <revent_files_creation>` without associating with the
|
||||
workload.
|
||||
|
||||
This class define these additional attributes:
|
||||
|
||||
``gui``
|
||||
This attribute will be an instance of a :class:`ReventGUI` which is
|
||||
used to control the automation
|
||||
|
||||
``setup_timeout``
|
||||
This is the time allowed for replaying a recording for the setup stage.
|
||||
|
||||
``run_timeout``
|
||||
This is the time allowed for replaying a recording for the run stage.
|
||||
|
||||
``extract_results_timeout``
|
||||
This is the time allowed for replaying a recording for the extract results stage.
|
||||
|
||||
``teardown_timeout``
|
||||
This is the time allowed for replaying a recording for the teardown stage.
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,9 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2023 ARM Limited
|
||||
# Copyright 2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# WA3 documentation build configuration file.
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -11,8 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
|
||||
#
|
||||
# Workload Automation 2 documentation build configuration file, created by
|
||||
# sphinx-quickstart on Mon Jul 15 09:00:46 2013.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
@ -20,44 +26,33 @@
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import shlex
|
||||
import sys, os
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings('ignore', "Module louie was already imported")
|
||||
|
||||
this_dir = os.path.dirname(__file__)
|
||||
sys.path.insert(0, os.path.join(this_dir, '..'))
|
||||
sys.path.insert(0, os.path.join(this_dir, '../..'))
|
||||
import wa
|
||||
from build_plugin_docs import (generate_plugin_documentation,
|
||||
generate_run_config_documentation,
|
||||
generate_meta_config_documentation,
|
||||
generate_target_documentation)
|
||||
from build_instrument_method_map import generate_instrument_method_map
|
||||
import wlauto
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.viewcode',
|
||||
]
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['static/templates']
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst', '.md']
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
@ -67,25 +62,21 @@ source_suffix = '.rst'
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'wa'
|
||||
copyright = u'2023, ARM Limited'
|
||||
author = u'ARM Limited'
|
||||
project = u'Workload Automation'
|
||||
copyright = u'2013, ARM Ltd'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = wa.framework.version.get_wa_version()
|
||||
version = wlauto.__version__
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = wa.framework.version.get_wa_version()
|
||||
release = wlauto.__version__
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
@ -95,11 +86,9 @@ language = None
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['../build', 'developer_information',
|
||||
'user_information', 'run_config']
|
||||
exclude_patterns = ['**/*-example']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
@ -119,25 +108,17 @@ pygments_style = 'sphinx'
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_theme = 'classic'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
html_theme_options = {
|
||||
'logo_only': True
|
||||
}
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
@ -151,7 +132,7 @@ html_theme_options = {
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
html_logo = 'WA-logo-white.svg'
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
@ -161,12 +142,7 @@ html_logo = 'WA-logo-white.svg'
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
# html_static_path = ['static']
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#html_extra_path = []
|
||||
html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
@ -209,24 +185,11 @@ html_logo = 'WA-logo-white.svg'
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Language to be used for generating the HTML full-text search index.
|
||||
# Sphinx supports the following languages:
|
||||
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
|
||||
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
|
||||
#html_search_language = 'en'
|
||||
|
||||
# A dictionary with options for the search language support, empty by default.
|
||||
# Now only 'ja' uses this config value
|
||||
#html_search_options = {'type': 'default'}
|
||||
|
||||
# The name of a javascript file (relative to the configuration directory) that
|
||||
# implements a search results scorer. If empty, the default will be used.
|
||||
#html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'wadoc'
|
||||
htmlhelp_basename = 'WorkloadAutomationdoc'
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
@ -237,17 +200,13 @@ latex_elements = {
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
(master_doc, 'wa.tex', u'wa Documentation',
|
||||
u'Arm Limited', 'manual'),
|
||||
('index', 'WorkloadAutomation.tex', u'Workload Automation Documentation',
|
||||
u'WA Mailing List \\textless{}workload-automation@arm.com\\textgreater{},Sergei Trofimov \\textless{}sergei.trofimov@arm.com\\textgreater{}, Vasilis Flouris \\textless{}vasilis.flouris@arm.com\\textgreater{}, Mohammed Binsabbar \\textless{}mohammed.binsabbar@arm.com\\textgreater{}', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
@ -271,27 +230,27 @@ latex_documents = [
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
# -- Options for manual page output --------------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'wa', u'wa Documentation',
|
||||
[author], 1)
|
||||
('index', 'workloadautomation', u'Workload Automation Documentation',
|
||||
[u'WA Mailing List <workload-automation@arm.com>, Sergei Trofimov <sergei.trofimov@arm.com>, Vasilis Flouris <vasilis.flouris@arm.com>'], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
# -- Options for Texinfo output ------------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'wa', u'wa Documentation',
|
||||
author, 'wa', 'A framework for automating workload execution on mobile devices.',
|
||||
('index', 'WorkloadAutomation', u'Workload Automation Documentation',
|
||||
u'WA Mailing List <workload-automation@arm.com>, Sergei Trofimov <sergei.trofimov@arm.com>, Vasilis Flouris <vasilis.flouris@arm.com>', 'WorkloadAutomation', 'A framwork for automationg workload execution on mobile devices.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
@ -304,20 +263,8 @@ texinfo_documents = [
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
|
||||
def setup(app):
|
||||
module_dir = os.path.join('..', '..', 'wa')
|
||||
excluded_extensions = [os.path.join(module_dir, 'framework'),
|
||||
os.path.join(module_dir, 'tests')]
|
||||
os.chdir(os.path.dirname(__file__))
|
||||
generate_plugin_documentation(module_dir, 'plugins', excluded_extensions)
|
||||
generate_target_documentation('plugins')
|
||||
generate_run_config_documentation('run_config')
|
||||
generate_meta_config_documentation('run_config')
|
||||
generate_instrument_method_map(os.path.join('developer_information', 'developer_guide',
|
||||
'instrument_method_map.rst'))
|
||||
app.add_object_type('confval', 'confval',
|
||||
objname='configuration value',
|
||||
indextemplate='pair: %s; configuration value')
|
||||
|
220
doc/source/configuration.rst
Normal file
220
doc/source/configuration.rst
Normal file
@ -0,0 +1,220 @@
|
||||
.. _configuration-specification:
|
||||
|
||||
=============
|
||||
Configuration
|
||||
=============
|
||||
|
||||
In addition to specifying run execution parameters through an agenda, the
|
||||
behavior of WA can be modified through configuration file(s). The default
|
||||
configuration file is ``~/.workload_automation/config.py`` (the location can be
|
||||
changed by setting ``WA_USER_DIRECTORY`` environment variable, see :ref:`envvars`
|
||||
section below). This file will be
|
||||
created when you first run WA if it does not already exist. This file must
|
||||
always exist and will always be loaded. You can add to or override the contents
|
||||
of that file on invocation of Workload Automation by specifying an additional
|
||||
configuration file using ``--config`` option.
|
||||
|
||||
The config file is just a Python source file, so it can contain any valid Python
|
||||
code (though execution of arbitrary code through the config file is
|
||||
discouraged). Variables with specific names will be picked up by the framework
|
||||
and used to modify the behavior of Workload automation.
|
||||
|
||||
.. note:: As of version 2.1.3 is also possible to specify the following
|
||||
configuration in the agenda. See :ref:`configuration in an agenda <configuration_in_agenda>`\ .
|
||||
|
||||
|
||||
.. _available_settings:
|
||||
|
||||
Available Settings
|
||||
==================
|
||||
|
||||
.. note:: Extensions such as workloads, instrumentation or result processors
|
||||
may also pick up certain settings from this file, so the list below is
|
||||
not exhaustive. Please refer to the documentation for the specific
|
||||
extensions to see what settings they accept.
|
||||
|
||||
.. confval:: device
|
||||
|
||||
This setting defines what specific Device subclass will be used to interact
|
||||
the connected device. Obviously, this must match your setup.
|
||||
|
||||
.. confval:: device_config
|
||||
|
||||
This must be a Python dict containing setting-value mapping for the
|
||||
configured :rst:dir:`device`. What settings and values are valid is specific
|
||||
to each device. Please refer to the documentation for your device.
|
||||
|
||||
.. confval:: reboot_policy
|
||||
|
||||
This defines when during execution of a run the Device will be rebooted. The
|
||||
possible values are:
|
||||
|
||||
``"never"``
|
||||
The device will never be rebooted.
|
||||
``"initial"``
|
||||
The device will be rebooted when the execution first starts, just before
|
||||
executing the first workload spec.
|
||||
``"each_spec"``
|
||||
The device will be rebooted before running a new workload spec.
|
||||
Note: this acts the same as each_iteration when execution order is set to by_iteration
|
||||
``"each_iteration"``
|
||||
The device will be rebooted before each new iteration.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:doc:`execution_model`
|
||||
|
||||
.. confval:: execution_order
|
||||
|
||||
Defines the order in which the agenda spec will be executed. At the moment,
|
||||
the following execution orders are supported:
|
||||
|
||||
``"by_iteration"``
|
||||
The first iteration of each workload spec is executed one after the other,
|
||||
so all workloads are executed before proceeding on to the second iteration.
|
||||
E.g. A1 B1 C1 A2 C2 A3. This is the default if no order is explicitly specified.
|
||||
|
||||
In case of multiple sections, this will spread them out, such that specs
|
||||
from the same section are further part. E.g. given sections X and Y, global
|
||||
specs A and B, and two iterations, this will run ::
|
||||
|
||||
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
|
||||
|
||||
``"by_section"``
|
||||
Same as ``"by_iteration"``, however this will group specs from the same
|
||||
section together, so given sections X and Y, global specs A and B, and two iterations,
|
||||
this will run ::
|
||||
|
||||
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
|
||||
|
||||
``"by_spec"``
|
||||
All iterations of the first spec are executed before moving on to the next
|
||||
spec. E.g. A1 A2 A3 B1 C1 C2 This may also be specified as ``"classic"``,
|
||||
as this was the way workloads were executed in earlier versions of WA.
|
||||
|
||||
``"random"``
|
||||
Execution order is entirely random.
|
||||
|
||||
Added in version 2.1.5.
|
||||
|
||||
|
||||
.. confval:: retry_on_status
|
||||
|
||||
This is list of statuses on which a job will be cosidered to have failed and
|
||||
will be automatically retried up to ``max_retries`` times. This defaults to
|
||||
``["FAILED", "PARTIAL"]`` if not set. Possible values are:
|
||||
|
||||
``"OK"``
|
||||
This iteration has completed and no errors have been detected
|
||||
|
||||
``"PARTIAL"``
|
||||
One or more instruments have failed (the iteration may still be running).
|
||||
|
||||
``"FAILED"``
|
||||
The workload itself has failed.
|
||||
|
||||
``"ABORTED"``
|
||||
The user interupted the workload
|
||||
|
||||
.. confval:: max_retries
|
||||
|
||||
The maximum number of times failed jobs will be retried before giving up. If
|
||||
not set, this will default to ``3``.
|
||||
|
||||
.. note:: this number does not include the original attempt
|
||||
|
||||
.. confval:: instrumentation
|
||||
|
||||
This should be a list of instruments to be enabled during run execution.
|
||||
Values must be names of available instruments. Instruments are used to
|
||||
collect additional data, such as energy measurements or execution time,
|
||||
during runs.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:doc:`api/wlauto.instrumentation`
|
||||
|
||||
.. confval:: result_processors
|
||||
|
||||
This should be a list of result processors to be enabled during run execution.
|
||||
Values must be names of available result processors. Result processor define
|
||||
how data is output from WA.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:doc:`api/wlauto.result_processors`
|
||||
|
||||
.. confval:: logging
|
||||
|
||||
A dict that contains logging setting. At the moment only three settings are
|
||||
supported:
|
||||
|
||||
``"file format"``
|
||||
Controls how logging output appears in the run.log file in the output
|
||||
directory.
|
||||
``"verbose format"``
|
||||
Controls how logging output appear on the console when ``--verbose`` flag
|
||||
was used.
|
||||
``"regular format"``
|
||||
Controls how logging output appear on the console when ``--verbose`` flag
|
||||
was not used.
|
||||
|
||||
All three values should be Python `old-style format strings`_ specifying which
|
||||
`log record attributes`_ should be displayed.
|
||||
|
||||
.. confval:: remote_assets_path
|
||||
|
||||
Path to the local mount of a network assets repository. See
|
||||
:ref:`assets_repository`.
|
||||
|
||||
|
||||
There are also a couple of settings are used to provide additional metadata
|
||||
for a run. These may get picked up by instruments or result processors to
|
||||
attach context to results.
|
||||
|
||||
.. confval:: project
|
||||
|
||||
A string naming the project for which data is being collected. This may be
|
||||
useful, e.g. when uploading data to a shared database that is populated from
|
||||
multiple projects.
|
||||
|
||||
.. confval:: project_stage
|
||||
|
||||
A dict or a string that allows adding additional identifier. This is may be
|
||||
useful for long-running projects.
|
||||
|
||||
.. confval:: run_name
|
||||
|
||||
A string that labels the WA run that is bing performed. This would typically
|
||||
be set in the ``config`` section of an agenda (see
|
||||
:ref:`configuration in an agenda <configuration_in_agenda>`) rather than in the config file.
|
||||
|
||||
.. _old-style format strings: http://docs.python.org/2/library/stdtypes.html#string-formatting-operations
|
||||
.. _log record attributes: http://docs.python.org/2/library/logging.html#logrecord-attributes
|
||||
|
||||
|
||||
.. _envvars:
|
||||
|
||||
Environment Variables
|
||||
=====================
|
||||
|
||||
In addition to standard configuration described above, WA behaviour can be
|
||||
altered through environment variables. These can determine where WA looks for
|
||||
various assets when it starts.
|
||||
|
||||
.. confval:: WA_USER_DIRECTORY
|
||||
|
||||
This is the location WA will look for config.py, inustrumentation , and it
|
||||
will also be used for local caches, etc. If this variable is not set, the
|
||||
default location is ``~/.workload_automation`` (this is created when WA
|
||||
is installed).
|
||||
|
||||
.. note:: This location **must** be writable by the user who runs WA.
|
||||
|
||||
|
||||
.. confval:: WA_EXTENSION_PATHS
|
||||
|
||||
By default, WA will look for extensions in its own package and in
|
||||
subdirectories under ``WA_USER_DIRECTORY``. This environment variable can
|
||||
be used specify a colon-separated list of additional locations WA should
|
||||
use to look for extensions.
|
56
doc/source/contributing.rst
Normal file
56
doc/source/contributing.rst
Normal file
@ -0,0 +1,56 @@
|
||||
|
||||
Contributing Code
|
||||
=================
|
||||
|
||||
We welcome code contributions via GitHub pull requests.To help with
|
||||
maintainability of the code line we ask that the code uses a coding style
|
||||
consistent with the rest of WA code. Briefly, it is
|
||||
|
||||
- `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_ with line length and block
|
||||
comment rules relaxed (the wrapper for PEP8 checker inside ``dev_scripts``
|
||||
will run it with appropriate configuration).
|
||||
- Four-space indentation (*no tabs!*).
|
||||
- Title-case for class names, underscore-delimited lower case for functions,
|
||||
methods, and variables.
|
||||
- Use descriptive variable names. Delimit words with ``'_'`` for readability.
|
||||
Avoid shortening words, skipping vowels, etc (common abbreviations such as
|
||||
"stats" for "statistics", "config" for "configuration", etc are OK). Do
|
||||
*not* use Hungarian notation (so prefer ``birth_date`` over ``dtBirth``).
|
||||
|
||||
New extensions should also follow implementation guidelines specified in
|
||||
:ref:`writing_extensions` section of the documentation.
|
||||
|
||||
We ask that the following checks are performed on the modified code prior to
|
||||
submitting a pull request:
|
||||
|
||||
.. note:: You will need pylint and pep8 static checkers installed::
|
||||
|
||||
pip install pep8
|
||||
pip install pylint
|
||||
|
||||
It is recommened that you install via pip rather than through your
|
||||
distribution's package mananger because the latter is likely to
|
||||
contain out-of-date version of these tools.
|
||||
|
||||
- ``./dev_scripts/pylint`` should be run without arguments and should produce no
|
||||
output (any output should be addressed by making appropriate changes in the
|
||||
code or adding a pylint ignore directive, if there is a good reason for
|
||||
keeping the code as is).
|
||||
- ``./dev_scripts/pep8`` should be run without arguments and should produce no
|
||||
output (any output should be addressed by making appropriate changes in the
|
||||
code).
|
||||
- If the modifications touch core framework (anything under ``wlauto/core``), unit
|
||||
tests should be run using ``nosetests``, and they should all pass.
|
||||
|
||||
- If significant additions have been made to the framework, unit
|
||||
tests should be added to cover the new functionality.
|
||||
|
||||
- If modifications have been made to documentation (this includes description
|
||||
attributes for Parameters and Extensions), documentation should be built to
|
||||
make sure no errors or warning during build process, and a visual inspection
|
||||
of new/updated sections in resulting HTML should be performed to ensure
|
||||
everything renders as expected.
|
||||
|
||||
Once you have your contribution is ready, please follow instructions in `GitHub
|
||||
documentation <https://help.github.com/articles/creating-a-pull-request/>`_ to
|
||||
create a pull request.
|
74
doc/source/conventions.rst
Normal file
74
doc/source/conventions.rst
Normal file
@ -0,0 +1,74 @@
|
||||
===========
|
||||
Conventions
|
||||
===========
|
||||
|
||||
Interface Definitions
|
||||
=====================
|
||||
|
||||
Throughout this documentation a number of stubbed-out class definitions will be
|
||||
presented showing an interface defined by a base class that needs to be
|
||||
implemented by the deriving classes. The following conventions will be used when
|
||||
presenting such an interface:
|
||||
|
||||
- Methods shown raising :class:`NotImplementedError` are abstract and *must*
|
||||
be overridden by subclasses.
|
||||
- Methods with ``pass`` in their body *may* be (but do not need to be) overridden
|
||||
by subclasses. If not overridden, these methods will default to the base
|
||||
class implementation, which may or may not be a no-op (the ``pass`` in the
|
||||
interface specification does not necessarily mean that the method does not have an
|
||||
actual implementation in the base class).
|
||||
|
||||
.. note:: If you *do* override these methods you must remember to call the
|
||||
base class' version inside your implementation as well.
|
||||
|
||||
- Attributes who's value is shown as ``None`` *must* be redefined by the
|
||||
subclasses with an appropriate value.
|
||||
- Attributes who's value is shown as something other than ``None`` (including
|
||||
empty strings/lists/dicts) *may* be (but do not need to be) overridden by
|
||||
subclasses. If not overridden, they will default to the value shown.
|
||||
|
||||
Keep in mind that the above convention applies only when showing interface
|
||||
definitions and may not apply elsewhere in the documentation. Also, in the
|
||||
interest of clarity, only the relevant parts of the base class definitions will
|
||||
be shown some members (such as internal methods) may be omitted.
|
||||
|
||||
|
||||
Code Snippets
|
||||
=============
|
||||
|
||||
Code snippets provided are intended to be valid Python code, and to be complete.
|
||||
However, for the sake of clarity, in some cases only the relevant parts will be
|
||||
shown with some details omitted (details that may necessary to validity of the code
|
||||
but not to understanding of the concept being illustrated). In such cases, a
|
||||
commented ellipsis will be used to indicate that parts of the code have been
|
||||
dropped. E.g. ::
|
||||
|
||||
# ...
|
||||
|
||||
def update_result(self, context):
|
||||
# ...
|
||||
context.result.add_metric('energy', 23.6, 'Joules', lower_is_better=True)
|
||||
|
||||
# ...
|
||||
|
||||
|
||||
Core Class Names
|
||||
================
|
||||
|
||||
When core classes are referenced throughout the documentation, usually their
|
||||
fully-qualified names are given e.g. :class:`wlauto.core.workload.Workload`.
|
||||
This is done so that Sphinx_ can resolve them and provide a link. While
|
||||
implementing extensions, however, you should *not* be importing anything
|
||||
directly form under :mod:`wlauto.core`. Instead, classes you are meant to
|
||||
instantiate or subclass have been aliased in the root :mod:`wlauto` package,
|
||||
and should be imported from there, e.g. ::
|
||||
|
||||
from wlauto import Workload
|
||||
|
||||
All examples given in the documentation follow this convention. Please note that
|
||||
this only applies to the :mod:`wlauto.core` subpackage; all other classes
|
||||
should be imported for their corresponding subpackages.
|
||||
|
||||
.. _Sphinx: http://sphinx-doc.org/
|
||||
|
||||
|
BIN
doc/source/daq-wiring.png
Normal file
BIN
doc/source/daq-wiring.png
Normal file
Binary file not shown.
After ![]() (image error) Size: 151 KiB |
252
doc/source/daq_device_setup.rst
Normal file
252
doc/source/daq_device_setup.rst
Normal file
@ -0,0 +1,252 @@
|
||||
.. _daq_setup:
|
||||
|
||||
DAQ Server Guide
|
||||
================
|
||||
|
||||
NI-DAQ, or just "DAQ", is the Data Acquisition device developed by National
|
||||
Instruments:
|
||||
|
||||
http://www.ni.com/data-acquisition/
|
||||
|
||||
WA uses the DAQ to collect power measurements during workload execution. A
|
||||
client/server solution for this is distributed as part of WA, though it is
|
||||
distinct from WA and may be used separately (by invoking the client APIs from a
|
||||
Python script, or used directly from the command line).
|
||||
|
||||
This solution is dependent on the NI-DAQmx driver for the DAQ device. At the
|
||||
time of writing, only Windows versions of the driver are supported (there is an
|
||||
old Linux version that works on some versions of RHEL and Centos, but it is
|
||||
unsupported and won't work with recent Linux kernels). Because of this, the
|
||||
server part of the solution will need to be run on a Windows machine (though it
|
||||
should also work on Linux, if the driver becomes available).
|
||||
|
||||
|
||||
.. _daq_wiring:
|
||||
|
||||
DAQ Device Wiring
|
||||
-----------------
|
||||
|
||||
The server expects the device to be wired in a specific way in order to be able
|
||||
to collect power measurements. Two consecutive Analogue Input (AI) channels on
|
||||
the DAQ are used to form a logical "port" (starting with AI/0 and AI/1 for port
|
||||
0). Of these, the lower/even channel (e.g. AI/0) is used to measure the voltage
|
||||
on the rail we're interested in; the higher/odd channel (e.g. AI/1) is used to
|
||||
measure the voltage drop across a known very small resistor on the same rail,
|
||||
which is then used to calculate current. The logical wiring diagram looks like
|
||||
this::
|
||||
|
||||
Port N
|
||||
======
|
||||
|
|
||||
| AI/(N*2)+ <--- Vr -------------------------|
|
||||
| |
|
||||
| AI/(N*2)- <--- GND -------------------// |
|
||||
| |
|
||||
| AI/(N*2+1)+ <--- V ------------|-------V |
|
||||
| r | |
|
||||
| AI/(N*2+1)- <--- Vr --/\/\/\----| |
|
||||
| | |
|
||||
| | |
|
||||
| |------------------------------|
|
||||
======
|
||||
|
||||
Where:
|
||||
V: Voltage going into the resistor
|
||||
Vr: Voltage between resistor and the SOC
|
||||
GND: Ground
|
||||
r: The resistor across the rail with a known
|
||||
small value.
|
||||
|
||||
|
||||
The physical wiring will depend on the specific DAQ device, as channel layout
|
||||
varies between models.
|
||||
|
||||
.. note:: Current solution supports variable number of ports, however it
|
||||
assumes that the ports are sequential and start at zero. E.g. if you
|
||||
want to measure power on three rails, you will need to wire ports 0-2
|
||||
(AI/0 to AI/5 channels on the DAQ) to do it. It is not currently
|
||||
possible to use any other configuration (e.g. ports 1, 2 and 5).
|
||||
|
||||
|
||||
As an example, the following illustration shows the wiring of PORT0 (using AI/0
|
||||
and AI/1 channels) on a DAQ USB-6210
|
||||
|
||||
.. image:: daq-wiring.png
|
||||
:scale: 70 %
|
||||
|
||||
Setting up NI-DAQmx driver on a Windows Machine
|
||||
-----------------------------------------------
|
||||
|
||||
- The NI-DAQmx driver is pretty big in size, 1.5 GB. The driver name is
|
||||
'NI-DAQmx' and its version '9.7.0f0' which you can obtain it from National
|
||||
Instruments website by downloading NI Measurement & Automation Explorer (Ni
|
||||
MAX) from: http://joule.ni.com/nidu/cds/view/p/id/3811/lang/en
|
||||
|
||||
.. note:: During the installation process, you might be prompted to install
|
||||
.NET framework 4.
|
||||
|
||||
- The installation process is quite long, 7-15 minutes.
|
||||
- Once installed, open NI MAX, which should be in your desktop, if not type its
|
||||
name in the start->search.
|
||||
- Connect the NI-DAQ device to your machine. You should see it appear under
|
||||
'Devices and Interfaces'. If not, press 'F5' to refresh the list.
|
||||
- Complete the device wiring as described in the :ref:`daq_wiring` section.
|
||||
- Quit NI MAX.
|
||||
|
||||
|
||||
Setting up DAQ server
|
||||
---------------------
|
||||
|
||||
The DAQ power measurement solution is implemented in daqpower Python library,
|
||||
the package for which can be found in WA's install location under
|
||||
``wlauto/external/daq_server/daqpower-1.0.0.tar.gz`` (the version number in your
|
||||
installation may be different).
|
||||
|
||||
- Install NI-DAQmx driver, as described in the previous section.
|
||||
- Install Python 2.7.
|
||||
- Download and install ``pip``, ``numpy`` and ``twisted`` Python packages.
|
||||
These packages have C extensions, an so you will need a native compiler set
|
||||
up if you want to install them from PyPI. As an easier alternative, you can
|
||||
find pre-built Windows installers for these packages here_ (the versions are
|
||||
likely to be older than what's on PyPI though).
|
||||
- Install the daqpower package using pip::
|
||||
|
||||
pip install C:\Python27\Lib\site-packages\wlauto\external\daq_server\daqpower-1.0.0.tar.gz
|
||||
|
||||
This should automatically download and install ``PyDAQmx`` package as well
|
||||
(the Python bindings for the NI-DAQmx driver).
|
||||
|
||||
.. _here: http://www.lfd.uci.edu/~gohlke/pythonlibs/
|
||||
|
||||
|
||||
Running DAQ server
|
||||
------------------
|
||||
|
||||
Once you have installed the ``daqpower`` package and the required dependencies as
|
||||
described above, you can start the server by executing ``run-daq-server`` from the
|
||||
command line. The server will start listening on the default port, 45677.
|
||||
|
||||
.. note:: There is a chance that pip will not add ``run-daq-server`` into your
|
||||
path. In that case, you can run daq server as such:
|
||||
``python C:\path to python\Scripts\run-daq-server``
|
||||
|
||||
You can optionally specify flags to control the behaviour or the server::
|
||||
|
||||
usage: run-daq-server [-h] [-d DIR] [-p PORT] [--debug] [--verbose]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-d DIR, --directory DIR
|
||||
Working directory
|
||||
-p PORT, --port PORT port the server will listen on.
|
||||
--debug Run in debug mode (no DAQ connected).
|
||||
--verbose Produce verobose output.
|
||||
|
||||
.. note:: The server will use a working directory (by default, the directory
|
||||
the run-daq-server command was executed in, or the location specified
|
||||
with -d flag) to store power traces before they are collected by the
|
||||
client. This directory must be read/write-able by the user running
|
||||
the server.
|
||||
|
||||
|
||||
Collecting Power with WA
|
||||
------------------------
|
||||
|
||||
.. note:: You do *not* need to install the ``daqpower`` package on the machine
|
||||
running WA, as it is already included in the WA install structure.
|
||||
However, you do need to make sure that ``twisted`` package is
|
||||
installed.
|
||||
|
||||
You can enable ``daq`` instrument your agenda/config.py in order to get WA to
|
||||
collect power measurements. At minimum, you will also need to specify the
|
||||
resistor values for each port in your configuration, e.g.::
|
||||
|
||||
resistor_values = [0.005, 0.005] # in Ohms
|
||||
|
||||
This also specifies the number of logical ports (measurement sites) you want to
|
||||
use, and, implicitly, the port numbers (ports 0 to N-1 will be used).
|
||||
|
||||
.. note:: "ports" here refers to the logical ports wired on the DAQ (see :ref:`daq_wiring`,
|
||||
not to be confused with the TCP port the server is listening on.
|
||||
|
||||
Unless you're running the DAQ server and WA on the same machine (unlikely
|
||||
considering that WA is officially supported only on Linux and recent NI-DAQmx
|
||||
drivers are only available on Windows), you will also need to specify the IP
|
||||
address of the server::
|
||||
|
||||
daq_server = 127.0.0.1
|
||||
|
||||
There are a number of other settings that can optionally be specified in the
|
||||
configuration (e.g. the labels to be used for DAQ ports). Please refer to the
|
||||
:class:`wlauto.instrumentation.daq.Daq` documentation for details.
|
||||
|
||||
|
||||
Collecting Power from the Command Line
|
||||
--------------------------------------
|
||||
|
||||
``daqpower`` package also comes with a client that may be used from the command
|
||||
line. Unlike when collecting power with WA, you *will* need to install the
|
||||
``daqpower`` package. Once installed, you will be able to interract with a
|
||||
running DAQ server by invoking ``send-daq-command``. The invocation syntax is ::
|
||||
|
||||
send-daq-command --host HOST [--port PORT] COMMAND [OPTIONS]
|
||||
|
||||
Options are command-specific. COMMAND may be one of the following (and they
|
||||
should generally be inoked in that order):
|
||||
|
||||
:configure: Set up a new session, specifying the configuration values to
|
||||
be used. If there is already a configured session, it will
|
||||
be terminated. OPTIONS for this this command are the DAQ
|
||||
configuration parameters listed in the DAQ instrument
|
||||
documentation with all ``_`` replaced by ``-`` and prefixed
|
||||
with ``--``, e.g. ``--resistor-values``.
|
||||
:start: Start collecting power measurments.
|
||||
:stop: Stop collecting power measurments.
|
||||
:get_data: Pull files containg power measurements from the server.
|
||||
There is one option for this command:
|
||||
``--output-directory`` which specifies where the files will
|
||||
be pulled to; if this is not specified, the will be in the
|
||||
current directory.
|
||||
:close: Close the currently configured server session. This will get rid
|
||||
of the data files and configuration on the server, so it would
|
||||
no longer be possible to use "start" or "get_data" commands
|
||||
before a new session is configured.
|
||||
|
||||
A typical command line session would go like this:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
send-daq-command --host 127.0.0.1 configure --resistor-values 0.005 0.005
|
||||
# set up and kick off the use case you want to measure
|
||||
send-daq-command --host 127.0.0.1 start
|
||||
# wait for the use case to complete
|
||||
send-daq-command --host 127.0.0.1 stop
|
||||
send-daq-command --host 127.0.0.1 get_data
|
||||
# files called PORT_0.csv and PORT_1.csv will appear in the current directory
|
||||
# containing measurements collected during use case execution
|
||||
send-daq-command --host 127.0.0.1 close
|
||||
# the session is terminated and the csv files on the server have been
|
||||
# deleted. A new session may now be configured.
|
||||
|
||||
In addtion to these "standard workflow" commands, the following commands are
|
||||
also available:
|
||||
|
||||
:list_devices: Returns a list of DAQ devices detected by the NI-DAQmx
|
||||
driver. In case mutiple devices are connected to the
|
||||
server host, you can specify the device you want to use
|
||||
with ``--device-id`` option when configuring a session.
|
||||
:list_ports: Returns a list of ports tha have been configured for the
|
||||
current session, e.g. ``['PORT_0', 'PORT_1']``.
|
||||
:list_port_files: Returns a list of data files that have been geneted
|
||||
(unless something went wrong, there should be one for
|
||||
each port).
|
||||
|
||||
|
||||
Collecting Power from another Python Script
|
||||
-------------------------------------------
|
||||
|
||||
You can invoke the above commands from a Python script using
|
||||
:py:func:`daqpower.client.execute_command` function, passing in
|
||||
:class:`daqpower.config.ServerConfiguration` and, in case of the configure command,
|
||||
:class:`daqpower.config.DeviceConfigruation`. Please see the implementation of
|
||||
the ``daq`` WA instrument for examples of how these APIs can be used.
|
@ -1,19 +0,0 @@
|
||||
=====================
|
||||
Developer Information
|
||||
=====================
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 4
|
||||
:local:
|
||||
|
||||
------------------
|
||||
|
||||
.. include:: developer_information/developer_guide.rst
|
||||
|
||||
------------------
|
||||
|
||||
.. include:: developer_information/how_to.rst
|
||||
|
||||
------------------
|
||||
|
||||
.. include:: developer_information/developer_reference.rst
|
@ -1,12 +0,0 @@
|
||||
.. _developer_guide:
|
||||
|
||||
***************
|
||||
Developer Guide
|
||||
***************
|
||||
|
||||
.. contents::
|
||||
:depth: 3
|
||||
:local:
|
||||
|
||||
.. include:: developer_information/developer_guide/writing_plugins.rst
|
||||
|
@ -1,583 +0,0 @@
|
||||
.. _writing-plugins:
|
||||
|
||||
|
||||
Writing Plugins
|
||||
================
|
||||
|
||||
Workload Automation offers several plugin points (or plugin types). The most
|
||||
interesting of these are
|
||||
|
||||
:workloads: These are the tasks that get executed and measured on the device. These
|
||||
can be benchmarks, high-level use cases, or pretty much anything else.
|
||||
:targets: These are interfaces to the physical devices (development boards or end-user
|
||||
devices, such as smartphones) that use cases run on. Typically each model of a
|
||||
physical device would require its own interface class (though some functionality
|
||||
may be reused by subclassing from an existing base).
|
||||
:instruments: Instruments allow collecting additional data from workload execution (e.g.
|
||||
system traces). Instruments are not specific to a particular workload. Instruments
|
||||
can hook into any stage of workload execution.
|
||||
:output processors: These are used to format the results of workload execution once they have been
|
||||
collected. Depending on the callback used, these will run either after each
|
||||
iteration and/or at the end of the run, after all of the results have been
|
||||
collected.
|
||||
|
||||
You can create a plugin by subclassing the appropriate base class, defining
|
||||
appropriate methods and attributes, and putting the .py file containing the
|
||||
class into the "plugins" subdirectory under ``~/.workload_automation`` (or
|
||||
equivalent) where it will be automatically picked up by WA.
|
||||
|
||||
|
||||
Plugin Basics
|
||||
--------------
|
||||
|
||||
This sub-section covers things common to implementing plugins of all types. It
|
||||
is recommended you familiarize yourself with the information here before
|
||||
proceeding onto guidance for specific plugin types.
|
||||
|
||||
.. _resource-resolution:
|
||||
|
||||
Dynamic Resource Resolution
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The idea is to decouple resource identification from resource discovery.
|
||||
Workloads/instruments/devices/etc state *what* resources they need, and not
|
||||
*where* to look for them -- this instead is left to the resource resolver that
|
||||
is part of the execution context. The actual discovery of resources is
|
||||
performed by resource getters that are registered with the resolver.
|
||||
|
||||
A resource type is defined by a subclass of
|
||||
:class:`wa.framework.resource.Resource`. An instance of this class describes a
|
||||
resource that is to be obtained. At minimum, a ``Resource`` instance has an
|
||||
owner (which is typically the object that is looking for the resource), but
|
||||
specific resource types may define other parameters that describe an instance of
|
||||
that resource (such as file names, URLs, etc).
|
||||
|
||||
An object looking for a resource invokes a resource resolver with an instance of
|
||||
``Resource`` describing the resource it is after. The resolver goes through the
|
||||
getters registered for that resource type in priority order attempting to obtain
|
||||
the resource; once the resource is obtained, it is returned to the calling
|
||||
object. If none of the registered getters could find the resource,
|
||||
``NotFoundError`` is raised (or ``None`` is returned instead, if invoked with
|
||||
``strict=False``).
|
||||
|
||||
The most common kind of object looking for resources is a ``Workload``, and the
|
||||
``Workload`` class defines
|
||||
:py:meth:`wa.framework.workload.Workload.init_resources` method, which may be
|
||||
overridden by subclasses to perform resource resolution. For example, a workload
|
||||
looking for an executable file would do so like this::
|
||||
|
||||
from wa import Workload
|
||||
from wa.import Executable
|
||||
|
||||
class MyBenchmark(Workload):
|
||||
|
||||
# ...
|
||||
|
||||
def init_resources(self, resolver):
|
||||
resource = Executable(self, self.target.abi, 'my_benchmark')
|
||||
host_exe = resolver.get(resource)
|
||||
|
||||
# ...
|
||||
|
||||
|
||||
Currently available resource types are defined in :py:mod:`wa.framework.resources`.
|
||||
|
||||
.. _deploying-executables:
|
||||
|
||||
Deploying executables to a target
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Some targets may have certain restrictions on where executable binaries may be
|
||||
placed and how they should be invoked. To ensure your plugin works with as
|
||||
wide a range of targets as possible, you should use WA APIs for deploying and
|
||||
invoking executables on a target, as outlined below.
|
||||
|
||||
As with other resources, host-side paths to the executable binary to be deployed
|
||||
should be obtained via the :ref:`resource resolver <resource-resolution>`. A
|
||||
special resource type, ``Executable`` is used to identify a binary to be
|
||||
deployed. This is similar to the regular ``File`` resource, however it takes an
|
||||
additional parameter that specifies the ABI for which the executable was
|
||||
compiled for.
|
||||
|
||||
In order for the binary to be obtained in this way, it must be stored in one of
|
||||
the locations scanned by the resource resolver in a directory structure
|
||||
``<root>/bin/<abi>/<binary>`` (where ``root`` is the base resource location to
|
||||
be searched, e.g. ``~/.workload_automation/dependencies/<plugin name>``, and
|
||||
``<abi>`` is the ABI for which the executable has been compiled, as returned by
|
||||
``self.target.abi``).
|
||||
|
||||
Once the path to the host-side binary has been obtained, it may be deployed
|
||||
using one of two methods from a
|
||||
`Target <http://devlib.readthedocs.io/en/latest/target.html>`_ instance --
|
||||
``install`` or ``install_if_needed``. The latter will check a version of that
|
||||
binary has been previously deployed by WA and will not try to re-install.
|
||||
|
||||
.. code:: python
|
||||
|
||||
from wa import Executable
|
||||
|
||||
host_binary = context.get(Executable(self, self.target.abi, 'some_binary'))
|
||||
target_binary = self.target.install_if_needed(host_binary)
|
||||
|
||||
|
||||
.. note:: Please also note that the check is done based solely on the binary name.
|
||||
For more information please see the devlib
|
||||
`documentation <http://devlib.readthedocs.io/en/latest/target.html#Target.install_if_needed>`_.
|
||||
|
||||
Both of the above methods will return the path to the installed binary on the
|
||||
target. The executable should be invoked *only* via that path; do **not** assume
|
||||
that it will be in ``PATH`` on the target (or that the executable with the same
|
||||
name in ``PATH`` is the version deployed by WA.
|
||||
|
||||
For more information on how to implement this, please see the
|
||||
:ref:`how to guide <deploying-executables-example>`.
|
||||
|
||||
|
||||
Deploying assets
|
||||
-----------------
|
||||
WA provides a generic mechanism for deploying assets during workload initialization.
|
||||
WA will automatically try to retrieve and deploy each asset to the target's working directory
|
||||
that is contained in a workloads ``deployable_assets`` attribute stored as a list.
|
||||
|
||||
If the parameter ``cleanup_assets`` is set then any asset deployed will be removed
|
||||
again and the end of the run.
|
||||
|
||||
If the workload requires a custom deployment mechanism the ``deploy_assets``
|
||||
method can be overridden for that particular workload, in which case, either
|
||||
additional assets should have their on target paths added to the workload's
|
||||
``deployed_assests`` attribute or the corresponding ``remove_assets`` method
|
||||
should also be implemented.
|
||||
|
||||
.. _instrument-reference:
|
||||
|
||||
Adding an Instrument
|
||||
---------------------
|
||||
Instruments can be used to collect additional measurements during workload
|
||||
execution (e.g. collect power readings). An instrument can hook into almost any
|
||||
stage of workload execution. Any new instrument should be a subclass of
|
||||
Instrument and it must have a name. When a new instrument is added to Workload
|
||||
Automation, the methods of the new instrument will be found automatically and
|
||||
hooked up to the supported signals. Once a signal is broadcasted, the
|
||||
corresponding registered method is invoked.
|
||||
|
||||
Each method in ``Instrument`` must take two arguments, which are ``self`` and
|
||||
``context``. Supported methods and their corresponding signals can be found in
|
||||
the :ref:`Signals Documentation <instruments_method_map>`. To make
|
||||
implementations easier and common, the basic steps to add new instrument is
|
||||
similar to the steps to add new workload and an example can be found in the
|
||||
:ref:`How To <adding-an-instrument-example>` section.
|
||||
|
||||
.. _instrument-api:
|
||||
|
||||
To implement your own instrument the relevant methods of the interface shown
|
||||
below should be implemented:
|
||||
|
||||
:name:
|
||||
|
||||
The name of the instrument, this must be unique to WA.
|
||||
|
||||
:description:
|
||||
|
||||
A description of what the instrument can be used for.
|
||||
|
||||
:parameters:
|
||||
|
||||
A list of additional :class:`Parameters` the instrument can take.
|
||||
|
||||
:initialize(context):
|
||||
|
||||
This method will only be called once during the workload run
|
||||
therefore operations that only need to be performed initially should
|
||||
be performed here for example pushing the files to the target device,
|
||||
installing them.
|
||||
|
||||
:setup(context):
|
||||
|
||||
This method is invoked after the workload is setup. All the
|
||||
necessary setup should go inside this method. Setup, includes
|
||||
operations like clearing logs, additional configuration etc.
|
||||
|
||||
:start(context):
|
||||
|
||||
It is invoked just before the workload start execution. Here is
|
||||
where instrument measurement start being registered/taken.
|
||||
|
||||
:stop(context):
|
||||
|
||||
It is invoked just after the workload execution stops and where
|
||||
the measurements should stop being taken/registered.
|
||||
|
||||
:update_output(context):
|
||||
|
||||
This method is invoked after the workload updated its result and
|
||||
where the taken measures should be added to the result so it can be
|
||||
processed by WA.
|
||||
|
||||
:teardown(context):
|
||||
|
||||
It is invoked after the workload is torn down. It is a good place
|
||||
to clean any logs generated by the instrument.
|
||||
|
||||
:finalize(context):
|
||||
|
||||
This method is the complement to the initialize method and will also
|
||||
only be called once so should be used to deleting/uninstalling files
|
||||
pushed to the device.
|
||||
|
||||
|
||||
This is similar to a ``Workload``, except all methods are optional. In addition to
|
||||
the workload-like methods, instruments can define a number of other methods that
|
||||
will get invoked at various points during run execution. The most useful of
|
||||
which is perhaps ``initialize`` that gets invoked after the device has been
|
||||
initialised for the first time, and can be used to perform one-time setup (e.g.
|
||||
copying files to the device -- there is no point in doing that for each
|
||||
iteration). The full list of available methods can be found in
|
||||
:ref:`Signals Documentation <instruments_method_map>`.
|
||||
|
||||
.. _prioritization:
|
||||
|
||||
Prioritization
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Callbacks (e.g. ``setup()`` methods) for all instruments get executed at the
|
||||
same point during workload execution, one after another. The order in which the
|
||||
callbacks get invoked should be considered arbitrary and should not be relied
|
||||
on (e.g. you cannot expect that just because instrument A is listed before
|
||||
instrument B in the config, instrument A's callbacks will run first).
|
||||
|
||||
In some cases (e.g. in ``start()`` and ``stop()`` methods), it is important to
|
||||
ensure that a particular instrument's callbacks run a closely as possible to the
|
||||
workload's invocations in order to maintain accuracy of readings; or,
|
||||
conversely, that a callback is executed after the others, because it takes a
|
||||
long time and may throw off the accuracy of other instruments. You can do
|
||||
this by using decorators on the appropriate methods. The available decorators are:
|
||||
``very_slow``, ``slow``, ``normal``, ``fast``, ``very_fast``, with ``very_fast``
|
||||
running closest to the workload invocation and ``very_slow`` running furtherest
|
||||
away. For example::
|
||||
|
||||
from wa import very_fast
|
||||
# ..
|
||||
|
||||
class PreciseInstrument(Instrument)
|
||||
|
||||
# ...
|
||||
@very_fast
|
||||
def start(self, context):
|
||||
pass
|
||||
|
||||
@very_fast
|
||||
def stop(self, context):
|
||||
pass
|
||||
|
||||
# ...
|
||||
|
||||
``PreciseInstrument`` will be started after all other instruments (i.e.
|
||||
*just* before the workload runs), and it will stopped before all other
|
||||
instruments (i.e. *just* after the workload runs).
|
||||
|
||||
If more than one active instrument has specified fast (or slow) callbacks, then
|
||||
their execution order with respect to each other is not guaranteed. In general,
|
||||
having a lot of instruments enabled is going to negatively affect the
|
||||
readings. The best way to ensure accuracy of measurements is to minimize the
|
||||
number of active instruments (perhaps doing several identical runs with
|
||||
different instruments enabled).
|
||||
|
||||
Example
|
||||
^^^^^^^
|
||||
|
||||
Below is a simple instrument that measures the execution time of a workload::
|
||||
|
||||
class ExecutionTimeInstrument(Instrument):
|
||||
"""
|
||||
Measure how long it took to execute the run() methods of a Workload.
|
||||
|
||||
"""
|
||||
|
||||
name = 'execution_time'
|
||||
|
||||
def initialize(self, context):
|
||||
self.start_time = None
|
||||
self.end_time = None
|
||||
|
||||
@very_fast
|
||||
def start(self, context):
|
||||
self.start_time = time.time()
|
||||
|
||||
@very_fast
|
||||
def stop(self, context):
|
||||
self.end_time = time.time()
|
||||
|
||||
def update_output(self, context):
|
||||
execution_time = self.end_time - self.start_time
|
||||
context.add_metric('execution_time', execution_time, 'seconds')
|
||||
|
||||
|
||||
.. include:: developer_information/developer_guide/instrument_method_map.rst
|
||||
|
||||
.. _adding-an-output-processor:
|
||||
|
||||
Adding an Output processor
|
||||
----------------------------
|
||||
|
||||
A output processor is responsible for processing the results. This may
|
||||
involve formatting and writing them to a file, uploading them to a database,
|
||||
generating plots, etc. WA comes with a few output processors that output
|
||||
results in a few common formats (such as csv or JSON).
|
||||
|
||||
You can add your own output processors by creating a Python file in
|
||||
``~/.workload_automation/plugins`` with a class that derives from
|
||||
:class:`wa.OutputProcessor <wa.framework.processor.OutputProcessor>`, and should
|
||||
implement the relevant methods shown below, for more information and please
|
||||
see the
|
||||
:ref:`Adding an Output Processor <adding-an-output-processor-example>` section.
|
||||
|
||||
:name:
|
||||
|
||||
The name of the output processor, this must be unique to WA.
|
||||
|
||||
:description:
|
||||
|
||||
A description of what the output processor can be used for.
|
||||
|
||||
:parameters:
|
||||
|
||||
A list of additional :class:`Parameters` the output processor can take.
|
||||
|
||||
:initialize(context):
|
||||
|
||||
This method will only be called once during the workload run
|
||||
therefore operations that only need to be performed initially should
|
||||
be performed here.
|
||||
|
||||
:process_job_output(output, target_info, run_ouput):
|
||||
|
||||
This method should be used to perform the processing of the
|
||||
output from an individual job output. This is where any
|
||||
additional artifacts should be generated if applicable.
|
||||
|
||||
:export_job_output(output, target_info, run_ouput):
|
||||
|
||||
This method should be used to perform the exportation of the
|
||||
existing data collected/generated for an individual job. E.g.
|
||||
uploading them to a database etc.
|
||||
|
||||
:process_run_output(output, target_info):
|
||||
|
||||
This method should be used to perform the processing of the
|
||||
output from the run as a whole. This is where any
|
||||
additional artifacts should be generated if applicable.
|
||||
|
||||
:export_run_output(output, target_info):
|
||||
|
||||
This method should be used to perform the exportation of the
|
||||
existing data collected/generated for the run as a whole. E.g.
|
||||
uploading them to a database etc.
|
||||
|
||||
:finalize(context):
|
||||
|
||||
This method is the complement to the initialize method and will also
|
||||
only be called once.
|
||||
|
||||
|
||||
The method names should be fairly self-explanatory. The difference between
|
||||
"process" and "export" methods is that export methods will be invoked after
|
||||
process methods for all output processors have been generated. Process methods
|
||||
may generate additional artifacts (metrics, files, etc.), while export methods
|
||||
should not -- they should only handle existing results (upload them to a
|
||||
database, archive on a filer, etc).
|
||||
|
||||
The output object passed to job methods is an instance of
|
||||
:class:`wa.framework.output.JobOutput`, the output object passed to run methods
|
||||
is an instance of :class:`wa.RunOutput <wa.framework.output.RunOutput>`.
|
||||
|
||||
|
||||
Adding a Resource Getter
|
||||
------------------------
|
||||
|
||||
A resource getter is a plugin that is designed to retrieve a resource
|
||||
(binaries, APK files or additional workload assets). Resource getters are invoked in
|
||||
priority order until one returns the desired resource.
|
||||
|
||||
If you want WA to look for resources somewhere it doesn't by default (e.g. you
|
||||
have a repository of APK files), you can implement a getter for the resource and
|
||||
register it with a higher priority than the standard WA getters, so that it gets
|
||||
invoked first.
|
||||
|
||||
Instances of a resource getter should implement the following interface::
|
||||
|
||||
class ResourceGetter(Plugin):
|
||||
|
||||
name = None
|
||||
|
||||
def register(self, resolver):
|
||||
raise NotImplementedError()
|
||||
|
||||
The getter should define a name for itself (as with all plugins), in addition it
|
||||
should implement the ``register`` method. This involves registering a method
|
||||
with the resolver that should used to be called when trying to retrieve a resource
|
||||
(typically ``get``) along with it's priority (see `Getter Prioritization`_
|
||||
below. That method should return an instance of the resource that
|
||||
has been discovered (what "instance" means depends on the resource, e.g. it
|
||||
could be a file path), or ``None`` if this getter was unable to discover
|
||||
that resource.
|
||||
|
||||
Getter Prioritization
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A priority is an integer with higher numeric values indicating a higher
|
||||
priority. The following standard priority aliases are defined for getters:
|
||||
|
||||
|
||||
:preferred: Take this resource in favour of the environment resource.
|
||||
:local: Found somewhere under ~/.workload_automation/ or equivalent, or
|
||||
from environment variables, external configuration files, etc.
|
||||
These will override resource supplied with the package.
|
||||
:lan: Resource will be retrieved from a locally mounted remote location
|
||||
(such as samba share)
|
||||
:remote: Resource will be downloaded from a remote location (such as an HTTP
|
||||
server)
|
||||
:package: Resource provided with the package.
|
||||
|
||||
These priorities are defined as class members of
|
||||
:class:`wa.framework.resource.SourcePriority`, e.g. ``SourcePriority.preferred``.
|
||||
|
||||
Most getters in WA will be registered with either ``local`` or
|
||||
``package`` priorities. So if you want your getter to override the default, it
|
||||
should typically be registered as ``preferred``.
|
||||
|
||||
You don't have to stick to standard priority levels (though you should, unless
|
||||
there is a good reason). Any integer is a valid priority. The standard priorities
|
||||
range from 0 to 40 in increments of 10.
|
||||
|
||||
Example
|
||||
~~~~~~~
|
||||
|
||||
The following is an implementation of a getter that searches for files in the
|
||||
users dependencies directory, typically
|
||||
``~/.workload_automation/dependencies/<workload_name>`` It uses the
|
||||
``get_from_location`` method to filter the available files in the provided
|
||||
directory appropriately::
|
||||
|
||||
import sys
|
||||
|
||||
from wa import settings,
|
||||
from wa.framework.resource import ResourceGetter, SourcePriority
|
||||
from wa.framework.getters import get_from_location
|
||||
from wa.utils.misc import ensure_directory_exists as _d
|
||||
|
||||
class UserDirectory(ResourceGetter):
|
||||
|
||||
name = 'user'
|
||||
|
||||
def register(self, resolver):
|
||||
resolver.register(self.get, SourcePriority.local)
|
||||
|
||||
def get(self, resource):
|
||||
basepath = settings.dependencies_directory
|
||||
directory = _d(os.path.join(basepath, resource.owner.name))
|
||||
return get_from_location(directory, resource)
|
||||
|
||||
.. _adding_a_target:
|
||||
|
||||
Adding a Target
|
||||
---------------
|
||||
|
||||
In WA3, a 'target' consists of a platform and a devlib target. The
|
||||
implementations of the targets are located in ``devlib``. WA3 will instantiate a
|
||||
devlib target passing relevant parameters parsed from the configuration. For
|
||||
more information about devlib targets please see `the documentation
|
||||
<http://devlib.readthedocs.io/en/latest/target.html>`_.
|
||||
|
||||
The currently available platforms are:
|
||||
:generic: The 'standard' platform implementation of the target, this should
|
||||
work for the majority of use cases.
|
||||
:juno: A platform implementation specifically for the juno.
|
||||
:tc2: A platform implementation specifically for the tc2.
|
||||
:gem5: A platform implementation to interact with a gem5 simulation.
|
||||
|
||||
The currently available targets from devlib are:
|
||||
:linux: A device running a Linux based OS.
|
||||
:android: A device running Android OS.
|
||||
:local: Used to run locally on a linux based host.
|
||||
:chromeos: A device running ChromeOS, supporting an android container if available.
|
||||
|
||||
For an example of adding you own customized version of an existing devlib target,
|
||||
please see the how to section :ref:`Adding a Custom Target <adding-custom-target-example>`.
|
||||
|
||||
|
||||
Other Plugin Types
|
||||
---------------------
|
||||
|
||||
In addition to plugin types covered above, there are few other, more
|
||||
specialized ones. They will not be covered in as much detail. Most of them
|
||||
expose relatively simple interfaces with only a couple of methods and it is
|
||||
expected that if the need arises to extend them, the API-level documentation
|
||||
that accompanies them, in addition to what has been outlined here, should
|
||||
provide enough guidance.
|
||||
|
||||
:commands: This allows extending WA with additional sub-commands (to supplement
|
||||
exiting ones outlined in the :ref:`invocation` section).
|
||||
:modules: Modules are "plugins for plugins". They can be loaded by other
|
||||
plugins to expand their functionality (for example, a flashing
|
||||
module maybe loaded by a device in order to support flashing).
|
||||
|
||||
|
||||
Packaging Your Plugins
|
||||
----------------------
|
||||
|
||||
If your have written a bunch of plugins, and you want to make it easy to
|
||||
deploy them to new systems and/or to update them on existing systems, you can
|
||||
wrap them in a Python package. You can use ``wa create package`` command to
|
||||
generate appropriate boiler plate. This will create a ``setup.py`` and a
|
||||
directory for your package that you can place your plugins into.
|
||||
|
||||
For example, if you have a workload inside ``my_workload.py`` and an output
|
||||
processor in ``my_output_processor.py``, and you want to package them as
|
||||
``my_wa_exts`` package, first run the create command ::
|
||||
|
||||
wa create package my_wa_exts
|
||||
|
||||
This will create a ``my_wa_exts`` directory which contains a
|
||||
``my_wa_exts/setup.py`` and a subdirectory ``my_wa_exts/my_wa_exts`` which is
|
||||
the package directory for your plugins (you can rename the top-level
|
||||
``my_wa_exts`` directory to anything you like -- it's just a "container" for the
|
||||
setup.py and the package directory). Once you have that, you can then copy your
|
||||
plugins into the package directory, creating
|
||||
``my_wa_exts/my_wa_exts/my_workload.py`` and
|
||||
``my_wa_exts/my_wa_exts/my_output_processor.py``. If you have a lot of
|
||||
plugins, you might want to organize them into subpackages, but only the
|
||||
top-level package directory is created by default, and it is OK to have
|
||||
everything in there.
|
||||
|
||||
.. note:: When discovering plugins through this mechanism, WA traverses the
|
||||
Python module/submodule tree, not the directory structure, therefore,
|
||||
if you are going to create subdirectories under the top level directory
|
||||
created for you, it is important that your make sure they are valid
|
||||
Python packages; i.e. each subdirectory must contain a __init__.py
|
||||
(even if blank) in order for the code in that directory and its
|
||||
subdirectories to be discoverable.
|
||||
|
||||
At this stage, you may want to edit ``params`` structure near the bottom of
|
||||
the ``setup.py`` to add correct author, license and contact information (see
|
||||
"Writing the Setup Script" section in standard Python documentation for
|
||||
details). You may also want to add a README and/or a COPYING file at the same
|
||||
level as the setup.py. Once you have the contents of your package sorted,
|
||||
you can generate the package by running ::
|
||||
|
||||
cd my_wa_exts
|
||||
python setup.py sdist
|
||||
|
||||
This will generate ``my_wa_exts/dist/my_wa_exts-0.0.1.tar.gz`` package which
|
||||
can then be deployed on the target system with standard Python package
|
||||
management tools, e.g. ::
|
||||
|
||||
sudo pip install my_wa_exts-0.0.1.tar.gz
|
||||
|
||||
As part of the installation process, the setup.py in the package, will write the
|
||||
package's name into ``~/.workoad_automation/packages``. This will tell WA that
|
||||
the package contains plugin and it will load them next time it runs.
|
||||
|
||||
.. note:: There are no uninstall hooks in ``setuputils``, so if you ever
|
||||
uninstall your WA plugins package, you will have to manually remove
|
||||
it from ``~/.workload_automation/packages`` otherwise WA will complain
|
||||
about a missing package next time you try to run it.
|
@ -1,29 +0,0 @@
|
||||
.. _developer_reference:
|
||||
|
||||
********************
|
||||
Developer Reference
|
||||
********************
|
||||
|
||||
.. contents::
|
||||
:depth: 3
|
||||
:local:
|
||||
|
||||
|
||||
.. include:: developer_information/developer_reference/framework_overview.rst
|
||||
|
||||
-----------------
|
||||
|
||||
.. include:: developer_information/developer_reference/plugins.rst
|
||||
|
||||
-----------------
|
||||
|
||||
.. include:: developer_information/developer_reference/revent.rst
|
||||
|
||||
-----------------
|
||||
|
||||
.. include:: developer_information/developer_reference/serialization.rst
|
||||
|
||||
-----------------
|
||||
|
||||
.. include:: developer_information/developer_reference/contributing.rst
|
||||
|
File diff suppressed because one or more lines are too long
Before (image error) Size: 42 KiB |
File diff suppressed because one or more lines are too long
Before (image error) Size: 74 KiB |
@ -1,189 +0,0 @@
|
||||
Contributing
|
||||
============
|
||||
|
||||
Code
|
||||
----
|
||||
|
||||
We welcome code contributions via GitHub pull requests. To help with
|
||||
maintainability of the code line we ask that the code uses a coding style
|
||||
consistent with the rest of WA code. Briefly, it is
|
||||
|
||||
- `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_ with line length and block
|
||||
comment rules relaxed (the wrapper for PEP8 checker inside ``dev_scripts``
|
||||
will run it with appropriate configuration).
|
||||
- Four-space indentation (*no tabs!*).
|
||||
- Title-case for class names, underscore-delimited lower case for functions,
|
||||
methods, and variables.
|
||||
- Use descriptive variable names. Delimit words with ``'_'`` for readability.
|
||||
Avoid shortening words, skipping vowels, etc (common abbreviations such as
|
||||
"stats" for "statistics", "config" for "configuration", etc are OK). Do
|
||||
*not* use Hungarian notation (so prefer ``birth_date`` over ``dtBirth``).
|
||||
|
||||
New extensions should also follow implementation guidelines specified in the
|
||||
:ref:`writing-plugins` section of the documentation.
|
||||
|
||||
We ask that the following checks are performed on the modified code prior to
|
||||
submitting a pull request:
|
||||
|
||||
.. note:: You will need pylint and pep8 static checkers installed::
|
||||
|
||||
pip install pep8
|
||||
pip install pylint
|
||||
|
||||
It is recommended that you install via pip rather than through your
|
||||
distribution's package manager because the latter is likely to
|
||||
contain out-of-date version of these tools.
|
||||
|
||||
- ``./dev_scripts/pylint`` should be run without arguments and should produce no
|
||||
output (any output should be addressed by making appropriate changes in the
|
||||
code or adding a pylint ignore directive, if there is a good reason for
|
||||
keeping the code as is).
|
||||
- ``./dev_scripts/pep8`` should be run without arguments and should produce no
|
||||
output (any output should be addressed by making appropriate changes in the
|
||||
code).
|
||||
- If the modifications touch core framework (anything under ``wa/framework``), unit
|
||||
tests should be run using ``nosetests``, and they should all pass.
|
||||
|
||||
- If significant additions have been made to the framework, unit
|
||||
tests should be added to cover the new functionality.
|
||||
|
||||
- If modifications have been made to the UI Automation source of a workload, the
|
||||
corresponding APK should be rebuilt and submitted as part of the same pull
|
||||
request. This can be done via the ``build.sh`` script in the relevant
|
||||
``uiauto`` subdirectory.
|
||||
- If modifications have been made to documentation (this includes description
|
||||
attributes for Parameters and Extensions), documentation should be built to
|
||||
make sure no errors or warning during build process, and a visual inspection
|
||||
of new/updated sections in resulting HTML should be performed to ensure
|
||||
everything renders as expected.
|
||||
|
||||
Once you have your contribution is ready, please follow instructions in `GitHub
|
||||
documentation <https://help.github.com/articles/creating-a-pull-request/>`_ to
|
||||
create a pull request.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
Headings
|
||||
~~~~~~~~
|
||||
|
||||
To allow for consistent headings to be used through out the document the
|
||||
following character sequences should be used when creating headings
|
||||
|
||||
::
|
||||
|
||||
=========
|
||||
Heading 1
|
||||
=========
|
||||
|
||||
Only used for top level headings which should also have an entry in the
|
||||
navigational side bar.
|
||||
|
||||
*********
|
||||
Heading 2
|
||||
*********
|
||||
|
||||
Main page heading used for page title, should not have a top level entry in the
|
||||
side bar.
|
||||
|
||||
Heading 3
|
||||
==========
|
||||
|
||||
Regular section heading.
|
||||
|
||||
Heading 4
|
||||
---------
|
||||
|
||||
Sub-heading.
|
||||
|
||||
Heading 5
|
||||
~~~~~~~~~
|
||||
|
||||
Heading 6
|
||||
^^^^^^^^^
|
||||
|
||||
Heading 7
|
||||
"""""""""
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Configuration Listings
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To keep a consistent style for presenting configuration options, the preferred
|
||||
style is to use a `Field List`.
|
||||
|
||||
(See: http://docutils.sourceforge.net/docs/user/rst/quickref.html#field-lists)
|
||||
|
||||
Example::
|
||||
|
||||
:parameter: My Description
|
||||
|
||||
Will render as:
|
||||
|
||||
:parameter: My Description
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
API Style
|
||||
~~~~~~~~~
|
||||
|
||||
When documenting an API the currently preferred style is to provide a short
|
||||
description of the class, followed by the attributes of the class in a
|
||||
`Definition List` followed by the methods using the `method` directive.
|
||||
|
||||
(See: http://docutils.sourceforge.net/docs/user/rst/quickref.html#definition-lists)
|
||||
|
||||
|
||||
Example::
|
||||
|
||||
API
|
||||
===
|
||||
|
||||
:class:`MyClass`
|
||||
----------------
|
||||
|
||||
:class:`MyClass` is an example class to demonstrate API documentation.
|
||||
|
||||
``attribute1``
|
||||
The first attribute of the example class.
|
||||
|
||||
``attribute2``
|
||||
Another attribute example.
|
||||
|
||||
methods
|
||||
"""""""
|
||||
|
||||
.. method:: MyClass.retrieve_output(name)
|
||||
|
||||
Retrieve the output for ``name``.
|
||||
|
||||
:param name: The output that should be returned.
|
||||
:return: An :class:`Output` object for ``name``.
|
||||
:raises NotFoundError: If no output can be found.
|
||||
|
||||
|
||||
Will render as:
|
||||
|
||||
:class:`MyClass` is an example class to demonstrate API documentation.
|
||||
|
||||
``attribute1``
|
||||
The first attribute of the example class.
|
||||
|
||||
``attribute2``
|
||||
Another attribute example.
|
||||
|
||||
methods
|
||||
^^^^^^^
|
||||
|
||||
.. method:: MyClass.retrieve_output(name)
|
||||
|
||||
Retrieve the output for ``name``.
|
||||
|
||||
:param name: The output that should be returned.
|
||||
:return: An :class:`Output` object for ``name``.
|
||||
:raises NotFoundError: If no output can be found.
|
@ -1,155 +0,0 @@
|
||||
Framework Overview
|
||||
==================
|
||||
|
||||
Execution Model
|
||||
---------------
|
||||
|
||||
At the high level, the execution model looks as follows:
|
||||
|
||||
.. image:: developer_information/developer_reference/WA_Execution.svg
|
||||
:scale: 100 %
|
||||
|
||||
After some initial setup, the framework initializes the device, loads and
|
||||
initialized instruments and output processors and begins executing jobs defined
|
||||
by the workload specs in the agenda. Each job executes in basic stages:
|
||||
|
||||
initialize
|
||||
Perform any once-per-run initialization of a workload instance, i.e.
|
||||
binary resource resolution.
|
||||
setup
|
||||
Initial setup for the workload is performed. E.g. required assets are
|
||||
deployed to the devices, required services or applications are launched,
|
||||
etc. Run time configuration of the device for the workload is also
|
||||
performed at this time.
|
||||
setup_rerun (apk based workloads only)
|
||||
For some apk based workloads the application is required to be started
|
||||
twice. If the ``requires_rerun`` attribute of the workload is set to
|
||||
``True`` then after the first setup method is called the application
|
||||
will be killed and then restarted. This method can then be used to
|
||||
perform any additional setup required.
|
||||
run
|
||||
This is when the workload actually runs. This is defined as the part of
|
||||
the workload that is to be measured. Exactly what happens at this stage
|
||||
depends entirely on the workload.
|
||||
extract results
|
||||
Extract any results that have been generated during the execution of the
|
||||
workload from the device and back to that target. Any files pulled from
|
||||
the devices should be added as artifacts to the run context.
|
||||
update output
|
||||
Perform any required parsing and processing of any collected results and
|
||||
add any generated metrics to the run context.
|
||||
teardown
|
||||
Final clean up is performed, e.g. applications may closed, files
|
||||
generated during execution deleted, etc.
|
||||
|
||||
Signals are dispatched (see :ref:`below <signal_dispatch>`) at each stage of
|
||||
workload execution, which installed instruments can hook into in order to
|
||||
collect measurements, alter workload execution, etc. Instruments implementation
|
||||
usually mirrors that of workloads, defining initialization, setup, teardown and
|
||||
output processing stages for a particular instrument. Instead of a ``run``
|
||||
method instruments usually implement ``start`` and ``stop`` methods instead
|
||||
which triggered just before and just after a workload run. However, the signal
|
||||
dispatch mechanism gives a high degree of flexibility to instruments allowing
|
||||
them to hook into almost any stage of a WA run (apart from the very early
|
||||
initialization).
|
||||
|
||||
Metrics and artifacts generated by workloads and instruments are accumulated by
|
||||
the framework and are then passed to active output processors. This happens
|
||||
after each individual workload execution and at the end of the run. A output
|
||||
processor may chose to act at either or both of these points.
|
||||
|
||||
|
||||
Control Flow
|
||||
------------
|
||||
|
||||
This section goes into more detail explaining the relationship between the major
|
||||
components of the framework and how control passes between them during a run. It
|
||||
will only go through the major transitions and interactions and will not attempt
|
||||
to describe every single thing that happens.
|
||||
|
||||
.. note:: This is the control flow for the ``wa run`` command which is the main
|
||||
functionality of WA. Other commands are much simpler and most of what
|
||||
is described below does not apply to them.
|
||||
|
||||
#. :class:`wa.framework.entrypoint` parses the command from the arguments, creates a
|
||||
:class:`wa.framework.configuration.execution.ConfigManager` and executes the run
|
||||
command (:class:`wa.commands.run.RunCommand`) passing it the ConfigManger.
|
||||
#. Run command initializes the output directory and creates a
|
||||
:class:`wa.framework.configuration.parsers.AgendaParser` and will parser an
|
||||
agenda and populate the ConfigManger based on the command line arguments.
|
||||
Finally it instantiates a :class:`wa.framework.execution.Executor` and
|
||||
passes it the completed ConfigManager.
|
||||
#. The Executor uses the ConfigManager to create a
|
||||
:class:`wa.framework.configuration.core.RunConfiguration` and fully defines the
|
||||
configuration for the run (which will be serialised into ``__meta`` subdirectory
|
||||
under the output directory).
|
||||
#. The Executor proceeds to instantiate a TargetManager, used to handle the
|
||||
device connection and configuration, and a
|
||||
:class:`wa.framework.execution.ExecutionContext` which is used to track the
|
||||
current state of the run execution and also serves as a means of
|
||||
communication between the core framework and plugins. After this any required
|
||||
instruments and output processors are initialized and installed.
|
||||
#. Finally, the Executor instantiates a :class:`wa.framework.execution.Runner`,
|
||||
initializes its job queue with workload specs from the RunConfiguration, and
|
||||
kicks it off.
|
||||
#. The Runner performs the run time configuration of the device and goes
|
||||
through the workload specs (in the order defined by ``execution_order``
|
||||
setting), running each spec according to the execution model described in the
|
||||
previous section and sending signals (see below) at appropriate points during
|
||||
execution.
|
||||
#. At the end of the run, the control is briefly passed back to the Executor,
|
||||
which outputs a summary for the run.
|
||||
|
||||
|
||||
.. _signal_dispatch:
|
||||
|
||||
Signal Dispatch
|
||||
---------------
|
||||
|
||||
WA uses the `louie <https://github.com/11craft/louie/>`_ (formerly,
|
||||
pydispatcher) library for signal dispatch. Callbacks can be registered for
|
||||
signals emitted during the run. WA uses a version of louie that has been
|
||||
modified to introduce :ref:`priority <prioritization>` to registered callbacks
|
||||
(so that callbacks that are know to be slow can be registered with a lower
|
||||
priority and therefore do not interfere with other callbacks).
|
||||
|
||||
This mechanism is abstracted for instruments. Methods of an
|
||||
:class:`wa.framework.Instrument` subclass automatically get hooked to
|
||||
appropriate signals based on their names when the instrument is "installed"
|
||||
for the run. Priority can then be specified by adding ``extremely_fast``,
|
||||
``very_fast``, ``fast`` , ``slow``, ``very_slow`` or ``extremely_slow``
|
||||
:ref:`decorators <instruments_method_map>` to the method definitions.
|
||||
|
||||
The full list of method names and the signals they map to may be seen at the
|
||||
:ref:`instrument method map <instruments_method_map>`.
|
||||
|
||||
Signal dispatching mechanism may also be used directly, for example to
|
||||
dynamically register callbacks at runtime or allow plugins other than
|
||||
``Instruments`` to access stages of the run they are normally not aware of.
|
||||
|
||||
Signals can be either paired or non paired signals. Non paired signals are one
|
||||
off signals that are sent to indicate special events or transitions in execution
|
||||
stages have occurred for example ``TARGET_CONNECTED``. Paired signals are used to
|
||||
signify the start and end of a particular event. If the start signal has been
|
||||
sent the end signal is guaranteed to also be sent, whether the operation was a
|
||||
successes or not, however in the case of correct operation an additional success
|
||||
signal will also be sent. For example in the event of a successful reboot of the
|
||||
the device, the following signals will be sent ``BEFORE_REBOOT``,
|
||||
``SUCCESSFUL_REBOOT`` and ``AFTER_REBOOT``.
|
||||
|
||||
An overview of what signals are sent at which point during execution can be seen
|
||||
below. Most of the paired signals have been removed from the diagram for clarity
|
||||
and shown as being dispatched from a particular stage of execution, however in
|
||||
reality these signals will be sent just before and just after these stages are
|
||||
executed. As mentioned above for each of these signals there will be at least 2
|
||||
and up to 3 signals sent. If the "BEFORE_X" signal (sent just before the stage
|
||||
is ran) is sent then the "AFTER_X" (sent just after the stage is ran) signal is
|
||||
guaranteed to also be sent, and under normal operation a "SUCCESSFUL_X" signal
|
||||
is also sent just after stage has been completed. The diagram also lists the
|
||||
conditional signals that can be sent at any time during execution if something
|
||||
unexpected happens, for example an error occurs or the user aborts the run.
|
||||
|
||||
.. image:: developer_information/developer_reference/WA_Signal_Dispatch.svg
|
||||
:scale: 100 %
|
||||
|
||||
For more information see :ref:`Instrumentation Signal-Method Mapping <instruments_method_map>`.
|
@ -1,663 +0,0 @@
|
||||
.. plugins:
|
||||
|
||||
|
||||
Plugins
|
||||
=======
|
||||
|
||||
Workload Automation offers several plugin points (or plugin types). The most
|
||||
interesting of these are
|
||||
|
||||
:workloads: These are the tasks that get executed and measured on the device. These
|
||||
can be benchmarks, high-level use cases, or pretty much anything else.
|
||||
:targets: These are interfaces to the physical devices (development boards or end-user
|
||||
devices, such as smartphones) that use cases run on. Typically each model of a
|
||||
physical device would require its own interface class (though some functionality
|
||||
may be reused by subclassing from an existing base).
|
||||
:instruments: Instruments allow collecting additional data from workload execution (e.g.
|
||||
system traces). Instruments are not specific to a particular workload. Instruments
|
||||
can hook into any stage of workload execution.
|
||||
:output processors: These are used to format the results of workload execution once they have been
|
||||
collected. Depending on the callback used, these will run either after each
|
||||
iteration and/or at the end of the run, after all of the results have been
|
||||
collected.
|
||||
|
||||
You can create a plugin by subclassing the appropriate base class, defining
|
||||
appropriate methods and attributes, and putting the .py file containing the
|
||||
class into the "plugins" subdirectory under ``~/.workload_automation`` (or
|
||||
equivalent) where it will be automatically picked up by WA.
|
||||
|
||||
|
||||
Plugin Basics
|
||||
--------------
|
||||
|
||||
This section contains reference information common to plugins of all types.
|
||||
|
||||
.. _context:
|
||||
|
||||
The Context
|
||||
~~~~~~~~~~~
|
||||
|
||||
.. note:: For clarification on the meaning of "workload specification" "spec", "job"
|
||||
and "workload" and the distinction between them, please see the :ref:`glossary <glossary>`.
|
||||
|
||||
The majority of methods in plugins accept a context argument. This is an
|
||||
instance of :class:`wa.framework.execution.ExecutionContext`. It contains
|
||||
information about the current state of execution of WA and keeps track of things
|
||||
like which workload is currently running.
|
||||
|
||||
Notable methods of the context are:
|
||||
|
||||
:context.get_resource(resource, strict=True):
|
||||
This method should be used to retrieve a resource using the resource getters rather than using the ResourceResolver directly as this method additionally record any found resources hash in the output metadata.
|
||||
|
||||
:context.add_artifact(name, host_file_path, kind, description=None, classifier=None):
|
||||
Plugins can add :ref:`artifacts <artifact>` of various kinds to the run
|
||||
output directory for WA and associate them with a description and/or
|
||||
:ref:`classifier <classifiers>`.
|
||||
|
||||
:context.add_metric(name, value, units=None, lower_is_better=False, classifiers=None):
|
||||
This method should be used to add :ref:`metrics <metrics>` that have been
|
||||
generated from a workload, this will allow WA to process the results
|
||||
accordingly depending on which output processors are enabled.
|
||||
|
||||
Notable attributes of the context are:
|
||||
|
||||
:context.workload:
|
||||
:class:`wa.framework.workload` object that is currently being executed.
|
||||
|
||||
:context.tm:
|
||||
This is the target manager that can be used to access various information
|
||||
about the target including initialization parameters.
|
||||
|
||||
:context.current_job:
|
||||
This is an instance of :class:`wa.framework.job.Job` and contains all
|
||||
the information relevant to the workload job currently being executed.
|
||||
|
||||
:context.current_job.spec:
|
||||
The current workload specification being executed. This is an
|
||||
instance of :class:`wa.framework.configuration.core.JobSpec`
|
||||
and defines the workload and the parameters under which it is
|
||||
being executed.
|
||||
|
||||
:context.current_job.current_iteration:
|
||||
The current iteration of the spec that is being executed. Note that this
|
||||
is the iteration for that spec, i.e. the number of times that spec has
|
||||
been run, *not* the total number of all iterations have been executed so
|
||||
far.
|
||||
|
||||
:context.job_output:
|
||||
This is the output object for the current iteration which
|
||||
is an instance of :class:`wa.framework.output.JobOutput`. It contains
|
||||
the status of the iteration as well as the metrics and artifacts
|
||||
generated by the job.
|
||||
|
||||
|
||||
In addition to these, context also defines a few useful paths (see below).
|
||||
|
||||
|
||||
Paths
|
||||
~~~~~
|
||||
|
||||
You should avoid using hard-coded absolute paths in your plugins whenever
|
||||
possible, as they make your code too dependent on a particular environment and
|
||||
may mean having to make adjustments when moving to new (host and/or device)
|
||||
platforms. To help avoid hard-coded absolute paths, WA defines a number of
|
||||
standard locations. You should strive to define your paths relative
|
||||
to one of these.
|
||||
|
||||
On the host
|
||||
^^^^^^^^^^^
|
||||
|
||||
Host paths are available through the context object, which is passed to most
|
||||
plugin methods.
|
||||
|
||||
context.run_output_directory
|
||||
This is the top-level output directory for all WA results (by default,
|
||||
this will be "wa_output" in the directory in which WA was invoked.
|
||||
|
||||
context.output_directory
|
||||
This is the output directory for the current iteration. This will an
|
||||
iteration-specific subdirectory under the main results location. If
|
||||
there is no current iteration (e.g. when processing overall run results)
|
||||
this will point to the same location as ``run_output_directory``.
|
||||
|
||||
|
||||
Additionally, the global ``wa.settings`` object exposes on other location:
|
||||
|
||||
settings.dependency_directory
|
||||
this is the root directory for all plugin dependencies (e.g. media
|
||||
files, assets etc) that are not included within the plugin itself.
|
||||
|
||||
As per Python best practice, it is recommended that methods and values in
|
||||
``os.path`` standard library module are used for host path manipulation.
|
||||
|
||||
On the target
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
Workloads and instruments have a ``target`` attribute, which is an interface to
|
||||
the target used by WA. It defines the following location:
|
||||
|
||||
target.working_directory
|
||||
This is the directory for all WA-related files on the target. All files
|
||||
deployed to the target should be pushed to somewhere under this location
|
||||
(the only exception being executables installed with ``target.install``
|
||||
method).
|
||||
|
||||
Since there could be a mismatch between path notation used by the host and the
|
||||
target, the ``os.path`` modules should *not* be used for on-target path
|
||||
manipulation. Instead target has an equipment module exposed through
|
||||
``target.path`` attribute. This has all the same attributes and behaves the
|
||||
same way as ``os.path``, but is guaranteed to produce valid paths for the target,
|
||||
irrespective of the host's path notation. For example:
|
||||
|
||||
.. code:: python
|
||||
|
||||
result_file = self.target.path.join(self.target.working_directory, "result.txt")
|
||||
self.command = "{} -a -b -c {}".format(target_binary, result_file)
|
||||
|
||||
.. note:: Output processors, unlike workloads and instruments, do not have their
|
||||
own target attribute as they are designed to be able to be run offline.
|
||||
|
||||
.. _plugin-parameters:
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~~
|
||||
|
||||
All plugins can be parametrized. Parameters are specified using
|
||||
``parameters`` class attribute. This should be a list of
|
||||
:class:`wa.framework.plugin.Parameter` instances. The following attributes can be
|
||||
specified on parameter creation:
|
||||
|
||||
:name:
|
||||
This is the only mandatory argument. The name will be used to create a
|
||||
corresponding attribute in the plugin instance, so it must be a valid
|
||||
Python identifier.
|
||||
|
||||
:kind:
|
||||
This is the type of the value of the parameter. This must be an
|
||||
callable. Normally this should be a standard Python type, e.g. ``int``
|
||||
or ``float``, or one the types defined in :mod:`wa.utils.types`.
|
||||
If not explicitly specified, this will default to ``str``.
|
||||
|
||||
.. note:: Irrespective of the ``kind`` specified, ``None`` is always a
|
||||
valid value for a parameter. If you don't want to allow
|
||||
``None``, then set ``mandatory`` (see below) to ``True``.
|
||||
|
||||
:allowed_values:
|
||||
A list of the only allowed values for this parameter.
|
||||
|
||||
.. note:: For composite types, such as ``list_of_strings`` or
|
||||
``list_of_ints`` in :mod:`wa.utils.types`, each element of
|
||||
the value will be checked against ``allowed_values`` rather
|
||||
than the composite value itself.
|
||||
|
||||
:default:
|
||||
The default value to be used for this parameter if one has not been
|
||||
specified by the user. Defaults to ``None``.
|
||||
|
||||
:mandatory:
|
||||
A ``bool`` indicating whether this parameter is mandatory. Setting this
|
||||
to ``True`` will make ``None`` an illegal value for the parameter.
|
||||
Defaults to ``False``.
|
||||
|
||||
.. note:: Specifying a ``default`` will mean that this parameter will,
|
||||
effectively, be ignored (unless the user sets the param to ``None``).
|
||||
|
||||
.. note:: Mandatory parameters are *bad*. If at all possible, you should
|
||||
strive to provide a sensible ``default`` or to make do without
|
||||
the parameter. Only when the param is absolutely necessary,
|
||||
and there really is no sensible default that could be given
|
||||
(e.g. something like login credentials), should you consider
|
||||
making it mandatory.
|
||||
|
||||
:constraint:
|
||||
This is an additional constraint to be enforced on the parameter beyond
|
||||
its type or fixed allowed values set. This should be a predicate (a function
|
||||
that takes a single argument -- the user-supplied value -- and returns
|
||||
a ``bool`` indicating whether the constraint has been satisfied).
|
||||
|
||||
:override:
|
||||
A parameter name must be unique not only within an plugin but also
|
||||
with that plugin's class hierarchy. If you try to declare a parameter
|
||||
with the same name as already exists, you will get an error. If you do
|
||||
want to override a parameter from further up in the inheritance
|
||||
hierarchy, you can indicate that by setting ``override`` attribute to
|
||||
``True``.
|
||||
|
||||
When overriding, you do not need to specify every other attribute of the
|
||||
parameter, just the ones you what to override. Values for the rest will
|
||||
be taken from the parameter in the base class.
|
||||
|
||||
|
||||
Validation and cross-parameter constraints
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
A plugin will get validated at some point after construction. When exactly
|
||||
this occurs depends on the plugin type, but it *will* be validated before it
|
||||
is used.
|
||||
|
||||
You can implement ``validate`` method in your plugin (that takes no arguments
|
||||
beyond the ``self``) to perform any additional *internal* validation in your
|
||||
plugin. By "internal", I mean that you cannot make assumptions about the
|
||||
surrounding environment (e.g. that the device has been initialized).
|
||||
|
||||
The contract for ``validate`` method is that it should raise an exception
|
||||
(either ``wa.framework.exception.ConfigError`` or plugin-specific exception type -- see
|
||||
further on this page) if some validation condition has not, and cannot, been met.
|
||||
If the method returns without raising an exception, then the plugin is in a
|
||||
valid internal state.
|
||||
|
||||
Note that ``validate`` can be used not only to verify, but also to impose a
|
||||
valid internal state. In particular, this where cross-parameter constraints can
|
||||
be resolved. If the ``default`` or ``allowed_values`` of one parameter depend on
|
||||
another parameter, there is no way to express that declaratively when specifying
|
||||
the parameters. In that case the dependent attribute should be left unspecified
|
||||
on creation and should instead be set inside ``validate``.
|
||||
|
||||
Logging
|
||||
~~~~~~~
|
||||
|
||||
Every plugin class has it's own logger that you can access through
|
||||
``self.logger`` inside the plugin's methods. Generally, a :class:`Target` will
|
||||
log everything it is doing, so you shouldn't need to add much additional logging
|
||||
for device actions. However you might what to log additional information, e.g.
|
||||
what settings your plugin is using, what it is doing on the host, etc.
|
||||
(Operations on the host will not normally be logged, so your plugin should
|
||||
definitely log what it is doing on the host). One situation in particular where
|
||||
you should add logging is before doing something that might take a significant
|
||||
amount of time, such as downloading a file.
|
||||
|
||||
|
||||
Documenting
|
||||
~~~~~~~~~~~
|
||||
|
||||
All plugins and their parameter should be documented. For plugins
|
||||
themselves, this is done through ``description`` class attribute. The convention
|
||||
for an plugin description is that the first paragraph should be a short
|
||||
summary description of what the plugin does and why one would want to use it
|
||||
(among other things, this will get extracted and used by ``wa list`` command).
|
||||
Subsequent paragraphs (separated by blank lines) can then provide a more
|
||||
detailed description, including any limitations and setup instructions.
|
||||
|
||||
For parameters, the description is passed as an argument on creation. Please
|
||||
note that if ``default``, ``allowed_values``, or ``constraint``, are set in the
|
||||
parameter, they do not need to be explicitly mentioned in the description (wa
|
||||
documentation utilities will automatically pull those). If the ``default`` is set
|
||||
in ``validate`` or additional cross-parameter constraints exist, this *should*
|
||||
be documented in the parameter description.
|
||||
|
||||
Both plugins and their parameters should be documented using reStructureText
|
||||
markup (standard markup for Python documentation). See:
|
||||
|
||||
http://docutils.sourceforge.net/rst.html
|
||||
|
||||
Aside from that, it is up to you how you document your plugin. You should try
|
||||
to provide enough information so that someone unfamiliar with your plugin is
|
||||
able to use it, e.g. you should document all settings and parameters your
|
||||
plugin expects (including what the valid values are).
|
||||
|
||||
|
||||
Error Notification
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
When you detect an error condition, you should raise an appropriate exception to
|
||||
notify the user. The exception would typically be :class:`ConfigError` or
|
||||
(depending the type of the plugin)
|
||||
:class:`WorkloadError`/:class:`DeviceError`/:class:`InstrumentError`/:class:`OutputProcessorError`.
|
||||
All these errors are defined in :mod:`wa.framework.exception` module.
|
||||
|
||||
A :class:`ConfigError` should be raised where there is a problem in configuration
|
||||
specified by the user (either through the agenda or config files). These errors
|
||||
are meant to be resolvable by simple adjustments to the configuration (and the
|
||||
error message should suggest what adjustments need to be made. For all other
|
||||
errors, such as missing dependencies, mis-configured environment, problems
|
||||
performing operations, etc., the plugin type-specific exceptions should be
|
||||
used.
|
||||
|
||||
If the plugin itself is capable of recovering from the error and carrying
|
||||
on, it may make more sense to log an ERROR or WARNING level message using the
|
||||
plugin's logger and to continue operation.
|
||||
|
||||
.. _metrics:
|
||||
|
||||
Metrics
|
||||
~~~~~~~
|
||||
This is what WA uses to store a single metric collected from executing a workload.
|
||||
|
||||
:name: the name of the metric. Uniquely identifies the metric
|
||||
within the results.
|
||||
:value: The numerical value of the metric for this execution of a
|
||||
workload. This can be either an int or a float.
|
||||
:units: Units for the collected value. Can be None if the value
|
||||
has no units (e.g. it's a count or a standardised score).
|
||||
:lower_is_better: Boolean flag indicating where lower values are
|
||||
better than higher ones. Defaults to False.
|
||||
:classifiers: A set of key-value pairs to further classify this
|
||||
metric beyond current iteration (e.g. this can be used
|
||||
to identify sub-tests).
|
||||
|
||||
Metrics can be added to WA output via the :ref:`context <context>`:
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
context.add_metric("score", 9001)
|
||||
context.add_metric("time", 2.35, "seconds", lower_is_better=True)
|
||||
|
||||
You only need to specify the name and the value for the metric. Units and
|
||||
classifiers are optional, and, if not specified otherwise, it will be assumed
|
||||
that higher values are better (``lower_is_better=False``).
|
||||
|
||||
The metric will be added to the result for the current job, if there is one;
|
||||
otherwise, it will be added to the overall run result.
|
||||
|
||||
.. _artifact:
|
||||
|
||||
Artifacts
|
||||
~~~~~~~~~
|
||||
This is an artifact generated during execution/post-processing of a workload.
|
||||
Unlike :ref:`metrics <metrics>`, this represents an actual artifact, such as a
|
||||
file, generated. This may be "output", such as trace, or it could be "meta
|
||||
data" such as logs. These are distinguished using the ``kind`` attribute, which
|
||||
also helps WA decide how it should be handled. Currently supported kinds are:
|
||||
|
||||
:log: A log file. Not part of the "output" as such but contains
|
||||
information about the run/workload execution that be useful for
|
||||
diagnostics/meta analysis.
|
||||
:meta: A file containing metadata. This is not part of the "output", but
|
||||
contains information that may be necessary to reproduce the
|
||||
results (contrast with ``log`` artifacts which are *not*
|
||||
necessary).
|
||||
:data: This file contains new data, not available otherwise and should
|
||||
be considered part of the "output" generated by WA. Most traces
|
||||
would fall into this category.
|
||||
:export: Exported version of results or some other artifact. This
|
||||
signifies that this artifact does not contain any new data
|
||||
that is not available elsewhere and that it may be safely
|
||||
discarded without losing information.
|
||||
:raw: Signifies that this is a raw dump/log that is normally processed
|
||||
to extract useful information and is then discarded. In a sense,
|
||||
it is the opposite of ``export``, but in general may also be
|
||||
discarded.
|
||||
|
||||
.. note:: whether a file is marked as ``log``/``data`` or ``raw``
|
||||
depends on how important it is to preserve this file,
|
||||
e.g. when archiving, vs how much space it takes up.
|
||||
Unlike ``export`` artifacts which are (almost) always
|
||||
ignored by other exporters as that would never result
|
||||
in data loss, ``raw`` files *may* be processed by
|
||||
exporters if they decided that the risk of losing
|
||||
potentially (though unlikely) useful data is greater
|
||||
than the time/space cost of handling the artifact (e.g.
|
||||
a database uploader may choose to ignore ``raw``
|
||||
artifacts, whereas a network filer archiver may choose
|
||||
to archive them).
|
||||
|
||||
.. note: The kind parameter is intended to represent the logical
|
||||
function of a particular artifact, not it's intended means of
|
||||
processing -- this is left entirely up to the output
|
||||
processors.
|
||||
|
||||
As with :ref:`metrics`, artifacts are added via the :ref:`context <context>`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
context.add_artifact("benchmark-output", "bech-out.txt", kind="raw",
|
||||
description="stdout from running the benchmark")
|
||||
|
||||
.. note:: The file *must* exist on the host by the point at which the artifact
|
||||
is added, otherwise an error will be raised.
|
||||
|
||||
The artifact will be added to the result of the current job, if there is one;
|
||||
otherwise, it will be added to the overall run result. In some situations, you
|
||||
may wish to add an artifact to the overall run while being inside a job context,
|
||||
this can be done with ``add_run_artifact``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
context.add_run_artifact("score-summary", "scores.txt", kind="export",
|
||||
description="""
|
||||
Summary of the scores so far. Updated after
|
||||
every job.
|
||||
""")
|
||||
|
||||
In this case, you also need to make sure that the file represented by the
|
||||
artifact is written to the output directory for the run and not the current job.
|
||||
|
||||
.. _metadata:
|
||||
|
||||
Metadata
|
||||
~~~~~~~~
|
||||
|
||||
There may be additional data collected by your plugin that you want to record as
|
||||
part of the result, but that does not fall under the definition of a "metric".
|
||||
For example, you may want to record the version of the binary you're executing.
|
||||
You can do this by adding a metadata entry:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
context.add_metadata("exe-version", 1.3)
|
||||
|
||||
|
||||
Metadata will be added either to the current job result, or to the run result,
|
||||
depending on the current context. Metadata values can be scalars or nested
|
||||
structures of dicts/sequences; the only constraint is that all constituent
|
||||
objects of the value must be POD (Plain Old Data) types -- see :ref:`WA POD
|
||||
types <wa-pods>`.
|
||||
|
||||
There is special support for handling metadata entries that are dicts of values.
|
||||
The following call adds a metadata entry ``"versions"`` who's value is
|
||||
``{"my_exe": 1.3}``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
context.add_metadata("versions", "my_exe", 1.3)
|
||||
|
||||
If you attempt to add a metadata entry that already exists, an error will be
|
||||
raised, unless ``force=True`` is specified, in which case, it will be
|
||||
overwritten.
|
||||
|
||||
Updating an existing entry whose value is a collection can be done with
|
||||
``update_metadata``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
context.update_metadata("ran_apps", "my_exe")
|
||||
context.update_metadata("versions", "my_other_exe", "2.3.0")
|
||||
|
||||
The first call appends ``"my_exe"`` to the list at metadata entry
|
||||
``"ran_apps"``. The second call updates the ``"versions"`` dict in the metadata
|
||||
with an entry for ``"my_other_exe"``.
|
||||
|
||||
If an entry does not exit, ``update_metadata`` will create it, so it's
|
||||
recommended to always use that for non-scalar entries, unless the intention is
|
||||
specifically to ensure that the entry does not exist at the time of the call.
|
||||
|
||||
.. _classifiers:
|
||||
|
||||
Classifiers
|
||||
~~~~~~~~~~~
|
||||
|
||||
Classifiers are key-value pairs of tags that can be attached to metrics,
|
||||
artifacts, jobs, or the entire run. Run and job classifiers get propagated to
|
||||
metrics and artifacts. Classifier keys should be strings, and their values
|
||||
should be simple scalars (i.e. strings, numbers, or bools).
|
||||
|
||||
Classifiers can be thought of as "tags" that are used to annotate metrics and
|
||||
artifacts, in order to make it easier to sort through them later. WA itself does
|
||||
not do anything with them, however output processors will augment the output
|
||||
they generate with them (for example, ``csv`` processor can add additional
|
||||
columns for classifier keys).
|
||||
|
||||
Classifiers are typically added by the user to attach some domain-specific
|
||||
information (e.g. experiment configuration identifier) to the results, see
|
||||
:ref:`using classifiers <using-classifiers>`. However, plugins can also attach
|
||||
additional classifiers, by specifying them in ``add_metric()`` and
|
||||
``add_artifacts()`` calls.
|
||||
|
||||
|
||||
Metadata vs Classifiers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Both metadata and classifiers are sets of essentially opaque key-value pairs
|
||||
that get included in WA output. While they may seem somewhat similar and
|
||||
interchangeable, they serve different purposes and are handled differently by
|
||||
the framework.
|
||||
|
||||
Classifiers are used to annotate generated metrics and artifacts in order to
|
||||
assist post-processing tools in sorting through them. Metadata is used to record
|
||||
additional information that is not necessary for processing the results, but
|
||||
that may be needed in order to reproduce them or to make sense of them in a
|
||||
grander context.
|
||||
|
||||
These are specific differences in how they are handled:
|
||||
|
||||
- Classifiers are often provided by the user via the agenda (though can also be
|
||||
added by plugins). Metadata in only created by the framework and plugins.
|
||||
- Classifier values must be simple scalars; metadata values can be nested
|
||||
collections, such as lists or dicts.
|
||||
- Classifiers are used by output processors to augment the output the latter
|
||||
generated; metadata typically isn't.
|
||||
- Classifiers are essentially associated with the individual metrics and
|
||||
artifacts (though in the agenda they're specified at workload, section, or
|
||||
global run levels); metadata is associated with a particular job or run, and
|
||||
not with metrics or artifacts.
|
||||
|
||||
--------------------
|
||||
|
||||
.. _execution-decorators:
|
||||
|
||||
Execution Decorators
|
||||
---------------------
|
||||
|
||||
The following decorators are available for use in order to control how often a
|
||||
method should be able to be executed.
|
||||
|
||||
For example, if we want to ensure that no matter how many iterations of a
|
||||
particular workload are ran, we only execute the initialize method for that instance
|
||||
once, we would use the decorator as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from wa.utils.exec_control import once
|
||||
|
||||
@once
|
||||
def initialize(self, context):
|
||||
# Perform one time initialization e.g. installing a binary to target
|
||||
# ..
|
||||
|
||||
@once_per_instance
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
The specified method will be invoked only once for every bound instance within
|
||||
the environment.
|
||||
|
||||
@once_per_class
|
||||
~~~~~~~~~~~~~~~
|
||||
The specified method will be invoked only once for all instances of a class
|
||||
within the environment.
|
||||
|
||||
@once
|
||||
~~~~~
|
||||
The specified method will be invoked only once within the environment.
|
||||
|
||||
.. warning:: If a method containing a super call is decorated, this will also cause
|
||||
stop propagation up the hierarchy, unless this is the desired
|
||||
effect, additional functionality should be implemented in a
|
||||
separate decorated method which can then be called allowing for
|
||||
normal propagation to be retained.
|
||||
|
||||
|
||||
--------------------
|
||||
|
||||
Utils
|
||||
-----
|
||||
|
||||
Workload Automation defines a number of utilities collected under
|
||||
:mod:`wa.utils` subpackage. These utilities were created to help with the
|
||||
implementation of the framework itself, but may be also be useful when
|
||||
implementing plugins.
|
||||
|
||||
--------------------
|
||||
|
||||
Workloads
|
||||
---------
|
||||
|
||||
All of the type inherit from the same base :class:`Workload` and its API can be
|
||||
seen in the :ref:`API <workload-api>` section.
|
||||
|
||||
Workload methods (except for ``validate``) take a single argument that is a
|
||||
:class:`wa.framework.execution.ExecutionContext` instance. This object keeps
|
||||
track of the current execution state (such as the current workload, iteration
|
||||
number, etc), and contains, among other things, a
|
||||
:class:`wa.framework.output.JobOutput` instance that should be populated from
|
||||
the ``update_output`` method with the results of the execution. For more
|
||||
information please see `the context`_ documentation. ::
|
||||
|
||||
# ...
|
||||
|
||||
def update_output(self, context):
|
||||
# ...
|
||||
context.add_metric('energy', 23.6, 'Joules', lower_is_better=True)
|
||||
|
||||
# ...
|
||||
|
||||
.. _workload-types:
|
||||
|
||||
Workload Types
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
There are multiple workload types that you can inherit from depending on the
|
||||
purpose of your workload, the different types along with an output of their
|
||||
intended use cases are outlined below.
|
||||
|
||||
.. _basic-workload:
|
||||
|
||||
Basic (:class:`wa.Workload <wa.framework.workload.Workload>`)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
This type of the workload is the simplest type of workload and is left the to
|
||||
developer to implement its full functionality.
|
||||
|
||||
|
||||
.. _apk-workload:
|
||||
|
||||
Apk (:class:`wa.ApkWorkload <wa.framework.workload.ApkWorkload>`)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
This workload will simply deploy and launch an android app in its basic form
|
||||
with no UI interaction.
|
||||
|
||||
.. _uiautomator-workload:
|
||||
|
||||
|
||||
UiAuto (:class:`wa.UiautoWorkload <wa.framework.workload.UiautoWorkload>`)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
This workload is for android targets which will use UiAutomator to interact with
|
||||
UI elements without a specific android app, for example performing manipulation
|
||||
of android itself. This is the preferred type of automation as the results are
|
||||
more portable and reproducible due to being able to wait for UI elements to
|
||||
appear rather than having to rely on human recordings.
|
||||
|
||||
.. _apkuiautomator-workload:
|
||||
|
||||
ApkUiAuto (:class:`wa.ApkUiautoWorkload <wa.framework.workload.ApkUiautoWorkload>`)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
The is the same as the UiAuto workload however it is also associated with an
|
||||
android app e.g. AdobeReader and will automatically deploy and launch the
|
||||
android app before running the automation.
|
||||
|
||||
.. _revent-workload:
|
||||
|
||||
Revent (:class:`wa.ReventWorkload <wa.framework.workload.ReventWorkload>`)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Revent workloads are designed primarily for games as these are unable to be
|
||||
automated with UiAutomator due to the fact that they are rendered within a
|
||||
single UI element. They require a recording to be performed manually and
|
||||
currently will need re-recording for each different device. For more
|
||||
information on revent workloads been please see :ref:`revent_files_creation`
|
||||
|
||||
.. _apkrevent-workload:
|
||||
|
||||
APKRevent (:class:`wa.ApkReventWorkload <wa.framework.workload.ApkReventWorkload>`)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
The is the same as the Revent workload however it is also associated with an
|
||||
android app e.g. AngryBirds and will automatically deploy and launch the android
|
||||
app before running the automation.
|
@ -1,341 +0,0 @@
|
||||
Revent Recordings
|
||||
=================
|
||||
|
||||
Convention for Naming revent Files for Revent Workloads
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
There is a convention for naming revent files which you should follow if you
|
||||
want to record your own revent files. Each revent file must be called (case sensitive)
|
||||
``<device name>.<stage>.revent``,
|
||||
where ``<device name>`` is the name of your device (as defined by the model
|
||||
name of your device which can be retrieved with
|
||||
``adb shell getprop ro.product.model`` or by the ``name`` attribute of your
|
||||
customized device class), and ``<stage>`` is one of the following currently
|
||||
supported stages:
|
||||
|
||||
:setup: This stage is where the application is loaded (if present). It is
|
||||
a good place to record an revent here to perform any tasks to get
|
||||
ready for the main part of the workload to start.
|
||||
:run: This stage is where the main work of the workload should be performed.
|
||||
This will allow for more accurate results if the revent file for this
|
||||
stage only records the main actions under test.
|
||||
:extract_results: This stage is used after the workload has been completed
|
||||
to retrieve any metrics from the workload e.g. a score.
|
||||
:teardown: This stage is where any final actions should be performed to
|
||||
clean up the workload.
|
||||
|
||||
Only the run stage is mandatory, the remaining stages will be replayed if a
|
||||
recording is present otherwise no actions will be performed for that particular
|
||||
stage.
|
||||
|
||||
All your custom revent files should reside at
|
||||
``'$WA_USER_DIRECTORY/dependencies/WORKLOAD NAME/'``. So
|
||||
typically to add a custom revent files for a device named "mydevice" and a
|
||||
workload name "myworkload", you would need to add the revent files to the
|
||||
directory ``~/.workload_automation/dependencies/myworkload/revent_files``
|
||||
creating the directory structure if necessary. ::
|
||||
|
||||
mydevice.setup.revent
|
||||
mydevice.run.revent
|
||||
mydevice.extract_results.revent
|
||||
mydevice.teardown.revent
|
||||
|
||||
Any revent file in the dependencies will always overwrite the revent file in the
|
||||
workload directory. So for example it is possible to just provide one revent for
|
||||
setup in the dependencies and use the run.revent that is in the workload directory.
|
||||
|
||||
|
||||
File format of revent recordings
|
||||
--------------------------------
|
||||
|
||||
You do not need to understand recording format in order to use revent. This
|
||||
section is intended for those looking to extend revent in some way, or to
|
||||
utilize revent recordings for other purposes.
|
||||
|
||||
Format Overview
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Recordings are stored in a binary format. A recording consists of three
|
||||
sections::
|
||||
|
||||
+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Header |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| Device Description |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| |
|
||||
| Event Stream |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
The header contains metadata describing the recording. The device description
|
||||
contains information about input devices involved in this recording. Finally,
|
||||
the event stream contains the recorded input events.
|
||||
|
||||
All fields are either fixed size or prefixed with their length or the number of
|
||||
(fixed-sized) elements.
|
||||
|
||||
.. note:: All values below are little endian
|
||||
|
||||
|
||||
Recording Header
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
An revent recoding header has the following structure
|
||||
|
||||
* It starts with the "magic" string ``REVENT`` to indicate that this is an
|
||||
revent recording.
|
||||
* The magic is followed by a 16 bit version number. This indicates the format
|
||||
version of the recording that follows. Current version is ``2``.
|
||||
* The next 16 bits indicate the type of the recording. This dictates the
|
||||
structure of the Device Description section. Valid values are:
|
||||
|
||||
``0``
|
||||
This is a general input event recording. The device description
|
||||
contains a list of paths from which the events where recorded.
|
||||
``1``
|
||||
This a gamepad recording. The device description contains the
|
||||
description of the gamepad used to create the recording.
|
||||
|
||||
* The header is zero-padded to 128 bits.
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 'R' | 'E' | 'V' | 'E' |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 'N' | 'T' | Version |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Mode | PADDING |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| PADDING |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Device Description
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This section describes the input devices used in the recording. Its structure is
|
||||
determined by the value of ``Mode`` field in the header.
|
||||
|
||||
General Recording
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. note:: This is the only format supported prior to version ``2``.
|
||||
|
||||
The recording has been made from all available input devices. This section
|
||||
contains the list of ``/dev/input`` paths for the devices, prefixed with total
|
||||
number of the devices recorded.
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of devices |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| Device paths +-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Similarly, each device path is a length-prefixed string. Unlike C strings, the
|
||||
path is *not* NULL-terminated.
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of device path |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| Device path |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Gamepad Recording
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
The recording has been made from a specific gamepad. All events in the stream
|
||||
will be for that device only. The section describes the device properties that
|
||||
will be used to create a virtual input device using ``/dev/uinput``. Please
|
||||
see ``linux/input.h`` header in the Linux kernel source for more information
|
||||
about the fields in this section.
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| bustype | vendor |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| product | version |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| name_length |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| name |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| ev_bits |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| |
|
||||
| key_bits (96 bytes) |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| |
|
||||
| rel_bits (96 bytes) |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| |
|
||||
| abs_bits (96 bytes) |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| num_absinfo |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
| absinfo entries |
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Each ``absinfo`` entry consists of six 32 bit values. The number of entries is
|
||||
determined by the ``abs_bits`` field.
|
||||
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| value |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| minimum |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| maximum |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| fuzz |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| flat |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| resolution |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Event Stream
|
||||
~~~~~~~~~~~~
|
||||
|
||||
The majority of an revent recording will be made up of the input events that were
|
||||
recorded. The event stream is prefixed with the number of events in the stream,
|
||||
and start and end times for the recording.
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of events |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of events (cont.) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Start Time Seconds |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Start Time Seconds (cont.) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Start Time Microseconds |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Start Time Microseconds (cont.) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| End Time Seconds |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| End Time Seconds (cont.) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| End Time Microseconds |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| End Time Microseconds (cont.) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| |
|
||||
| Events |
|
||||
| |
|
||||
| |
|
||||
| +-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Event Structure
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Each event entry structured as follows:
|
||||
|
||||
* An unsigned short integer representing which device from the list of device paths
|
||||
this event is for (zero indexed). E.g. Device ID = 3 would be the 4th
|
||||
device in the list of device paths.
|
||||
* A unsigned long integer representing the number of seconds since "epoch" when
|
||||
the event was recorded.
|
||||
* A unsigned long integer representing the microseconds part of the timestamp.
|
||||
* An unsigned integer representing the event type
|
||||
* An unsigned integer representing the event code
|
||||
* An unsigned integer representing the event value
|
||||
|
||||
For more information about the event type, code and value please read:
|
||||
https://www.kernel.org/doc/Documentation/input/event-codes.txt
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Device ID | Timestamp Seconds |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Timestamp Seconds (cont.) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Timestamp Seconds (cont.) | stamp Micoseconds |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Timestamp Micoseconds (cont.) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Timestamp Micoseconds (cont.) | Event Type |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Event Code | Event Value |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Event Value (cont.) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Parser
|
||||
~~~~~~
|
||||
|
||||
WA has a parser for revent recordings. This can be used to work with revent
|
||||
recordings in scripts. Here is an example:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from wa.utils.revent import ReventRecording
|
||||
|
||||
with ReventRecording('/path/to/recording.revent') as recording:
|
||||
print("Recording: {}".format(recording.filepath))
|
||||
print("There are {} input events".format(recording.num_events))
|
||||
print("Over a total of {} seconds".format(recording.duration))
|
@ -1,123 +0,0 @@
|
||||
.. _serialization:
|
||||
|
||||
Serialization
|
||||
=============
|
||||
|
||||
Overview of Serialization
|
||||
-------------------------
|
||||
|
||||
WA employs a serialization mechanism in order to store some of its internal
|
||||
structures inside the output directory. Serialization is performed in two
|
||||
stages:
|
||||
|
||||
1. A serializable object is converted into a POD (Plain Old Data) structure
|
||||
consisting of primitive Python types, and a few additional types (see
|
||||
:ref:`wa-pods` below).
|
||||
2. The POD structure is serialized into a particular format by a generic
|
||||
parser for that format. Currently, `yaml` and `json` are supported.
|
||||
|
||||
Deserialization works in reverse order -- first the serialized text is parsed
|
||||
into a POD, which is then converted to the appropriate object.
|
||||
|
||||
|
||||
Implementing Serializable Objects
|
||||
---------------------------------
|
||||
|
||||
In order to be considered serializable, an object must either be a POD, or it
|
||||
must implement the ``to_pod()`` method and ``from_pod`` static/class method,
|
||||
which will perform the conversion to/form pod.
|
||||
|
||||
As an example, below as a (somewhat trimmed) implementation of the ``Event``
|
||||
class:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Event(object):
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
instance = Event(pod['message'])
|
||||
instance.timestamp = pod['timestamp']
|
||||
return instance
|
||||
|
||||
def __init__(self, message):
|
||||
self.timestamp = datetime.utcnow()
|
||||
self.message = message
|
||||
|
||||
def to_pod(self):
|
||||
return dict(
|
||||
timestamp=self.timestamp,
|
||||
message=self.message,
|
||||
)
|
||||
|
||||
|
||||
Serialization API
|
||||
-----------------
|
||||
|
||||
.. function:: read_pod(source, fmt=None)
|
||||
.. function:: write_pod(pod, dest, fmt=None)
|
||||
|
||||
These read and write PODs from a file. The format will be inferred, if
|
||||
possible, from the extension of the file, or it may be specified explicitly
|
||||
with ``fmt``. ``source`` and ``dest`` can be either strings, in which case
|
||||
they will be interpreted as paths, or they can be file-like objects.
|
||||
|
||||
.. function:: is_pod(obj)
|
||||
|
||||
Returns ``True`` if ``obj`` is a POD, and ``False`` otherwise.
|
||||
|
||||
.. function:: dump(o, wfh, fmt='json', \*args, \*\*kwargs)
|
||||
.. function:: load(s, fmt='json', \*args, \*\*kwargs)
|
||||
|
||||
These implment an altenative serialization interface, which matches the
|
||||
interface exposed by the parsers for the supported formats.
|
||||
|
||||
|
||||
.. _wa-pods:
|
||||
|
||||
WA POD Types
|
||||
------------
|
||||
|
||||
POD types are types that can be handled by a serializer directly, without a need
|
||||
for any additional information. These consist of the build-in python types ::
|
||||
|
||||
list
|
||||
tuple
|
||||
dict
|
||||
set
|
||||
str
|
||||
unicode
|
||||
int
|
||||
float
|
||||
bool
|
||||
|
||||
...the standard library types ::
|
||||
|
||||
OrderedDict
|
||||
datetime
|
||||
|
||||
...and the WA-defined types ::
|
||||
|
||||
regex_type
|
||||
none_type
|
||||
level
|
||||
cpu_mask
|
||||
|
||||
Any structure consisting entirely of these types is a POD and can be serialized
|
||||
and then deserialized without losing information. It is important to note that
|
||||
only these specific types are considered POD, their subclasses are *not*.
|
||||
|
||||
.. note:: ``dict``\ s get deserialized as ``OrderedDict``\ s.
|
||||
|
||||
|
||||
Serialization Formats
|
||||
---------------------
|
||||
|
||||
WA utilizes two serialization formats: YAML and JSON. YAML is used for files
|
||||
intended to be primarily written and/or read by humans; JSON is used for files
|
||||
intended to be primarily written and/or read by WA and other programs.
|
||||
|
||||
The parsers and serializers for these formats used by WA have been modified to
|
||||
handle additional types (e.g. regular expressions) that are typically not
|
||||
supported by the formats. This was done in such a way that the resulting files
|
||||
are still valid and can be parsed by any parser for that format.
|
@ -1,11 +0,0 @@
|
||||
*******
|
||||
How Tos
|
||||
*******
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 4
|
||||
:local:
|
||||
|
||||
.. include:: developer_information/how_tos/adding_plugins.rst
|
||||
|
||||
.. include:: developer_information/how_tos/processing_output.rst
|
@ -1,702 +0,0 @@
|
||||
.. _deploying-executables-example:
|
||||
|
||||
Deploying Executables
|
||||
=====================
|
||||
|
||||
Installing binaries for a particular plugin should generally only be performed
|
||||
once during a run. This should typically be done in the ``initialize`` method,
|
||||
if the only functionality performed in the method is to install the required binaries
|
||||
then the ``initialize`` method should be decorated with the ``@once``
|
||||
:ref:`decorator <execution-decorators>` otherwise this should be placed into a dedicated
|
||||
method which is decorated instead. Please note if doing this then any installed
|
||||
paths should be added as class attributes rather than instance variables. As a
|
||||
general rule if binaries are installed as part of ``initialize`` then they
|
||||
should be uninstalled in the complementary ``finalize`` method.
|
||||
|
||||
Part of an example workload demonstrating this is shown below:
|
||||
|
||||
.. code:: python
|
||||
|
||||
class MyWorkload(Workload):
|
||||
#..
|
||||
@once
|
||||
def initialize(self, context):
|
||||
resource = Executable(self, self.target.abi, 'my_executable')
|
||||
host_binary = context.resolver.get(resource)
|
||||
MyWorkload.target_binary = self.target.install(host_binary)
|
||||
#..
|
||||
|
||||
def setup(self, context):
|
||||
self.command = "{} -a -b -c".format(self.target_binary)
|
||||
self.target.execute(self.command)
|
||||
#..
|
||||
|
||||
@once
|
||||
def finalize(self, context):
|
||||
self.target.uninstall('my_executable')
|
||||
|
||||
|
||||
.. _adding-a-workload-example:
|
||||
|
||||
Adding a Workload
|
||||
=================
|
||||
|
||||
The easiest way to create a new workload is to use the
|
||||
:ref:`create <create-command>` command. ``wa create workload <args>``. This
|
||||
will use predefined templates to create a workload based on the options that are
|
||||
supplied to be used as a starting point for the workload. For more information
|
||||
on using the create workload command see ``wa create workload -h``
|
||||
|
||||
The first thing to decide is the type of workload you want to create depending
|
||||
on the OS you will be using and the aim of the workload. The are currently 6
|
||||
available workload types to choose as detailed in the
|
||||
:ref:`Developer Reference <workload-types>`.
|
||||
|
||||
Once you have decided what type of workload you wish to choose this can be
|
||||
specified with ``-k <workload_kind>`` followed by the workload name. This
|
||||
will automatically generate a workload in the your ``WA_CONFIG_DIR/plugins``. If
|
||||
you wish to specify a custom location this can be provided with ``-p
|
||||
<path>``
|
||||
|
||||
A typical invocation of the :ref:`create <create-command>` command would be in
|
||||
the form::
|
||||
|
||||
wa create workload -k <workload_kind> <workload_name>
|
||||
|
||||
|
||||
.. _adding-a-basic-workload-example:
|
||||
|
||||
Adding a Basic Workload
|
||||
-----------------------
|
||||
|
||||
To add a ``basic`` workload template for our example workload we can simply use the
|
||||
command::
|
||||
|
||||
wa create workload -k basic ziptest
|
||||
|
||||
This will generate a very basic workload with dummy methods for the each method in
|
||||
the workload interface and it is left to the developer to add any required functionality.
|
||||
|
||||
Not all the methods from the interface are required to be implemented, this
|
||||
example shows how a subset might be used to implement a simple workload that
|
||||
times how long it takes to compress a file of a particular size on the device.
|
||||
|
||||
|
||||
.. note:: This is intended as an example of how to implement the Workload
|
||||
:ref:`interface <workload-api>`. The methodology used to
|
||||
perform the actual measurement is not necessarily sound, and this
|
||||
Workload should not be used to collect real measurements.
|
||||
|
||||
The first step is to subclass our desired
|
||||
:ref:`workload type <workload-types>` depending on the purpose of our workload,
|
||||
in this example we are implementing a very simple workload and do not
|
||||
require any additional feature so shall inherit directly from the the base
|
||||
:class:`Workload` class. We then need to provide a ``name`` for our workload
|
||||
which is what will be used to identify your workload for example in an
|
||||
agenda or via the show command, if you used the `create` command this will
|
||||
already be populated for you.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import os
|
||||
from wa import Workload, Parameter
|
||||
|
||||
class ZipTest(Workload):
|
||||
|
||||
name = 'ziptest'
|
||||
|
||||
The ``description`` attribute should be a string in the structure of a short
|
||||
summary of the purpose of the workload, and will be shown when using the
|
||||
:ref:`list command <list-command>`, followed by a more in- depth explanation
|
||||
separated by a new line.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
description = '''
|
||||
Times how long it takes to gzip a file of a particular size on a device.
|
||||
|
||||
This workload was created for illustration purposes only. It should not be
|
||||
used to collect actual measurements.
|
||||
'''
|
||||
|
||||
In order to allow for additional configuration of the workload from a user a
|
||||
list of :ref:`parameters <plugin-parameters>` can be supplied. These can be
|
||||
configured in a variety of different ways. For example here we are ensuring that
|
||||
the value of the parameter is an integer and larger than 0 using the ``kind``
|
||||
and ``constraint`` options, also if no value is provided we are providing a
|
||||
``default`` value of 2000000. These parameters will automatically have their
|
||||
value set as an attribute of the workload so later on we will be able to use the
|
||||
value provided here as ``self.file_size``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
parameters = [
|
||||
Parameter('file_size', kind=int, default=2000000,
|
||||
constraint=lambda x: 0 < x,
|
||||
description='Size of the file (in bytes) to be gzipped.')
|
||||
]
|
||||
|
||||
Next we will implement our ``setup`` method. This is where we do any preparation
|
||||
that is required before the workload is ran, this is usually things like setting
|
||||
up required files on the device and generating commands from user input. In this
|
||||
case we will generate our input file on the host system and then push it to a
|
||||
known location on the target for use in the 'run' stage.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def setup(self, context):
|
||||
super(ZipTestWorkload, self).setup(context)
|
||||
# Generate a file of the specified size containing random garbage.
|
||||
host_infile = os.path.join(context.output_directory, 'infile')
|
||||
command = 'openssl rand -base64 {} > {}'.format(self.file_size, host_infile)
|
||||
os.system(command)
|
||||
# Set up on-device paths
|
||||
devpath = self.target.path # os.path equivalent for the target
|
||||
self.target_infile = devpath.join(self.target.working_directory, 'infile')
|
||||
self.target_outfile = devpath.join(self.target.working_directory, 'outfile')
|
||||
# Push the file to the target
|
||||
self.target.push(host_infile, self.target_infile)
|
||||
|
||||
|
||||
The ``run`` method is where the actual 'work' of the workload takes place and is
|
||||
what is measured by any instrumentation. So for this example this is the
|
||||
execution of creating the zip file on the target.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def run(self, context):
|
||||
cmd = 'cd {} && (time gzip {}) &>> {}'
|
||||
self.target.execute(cmd.format(self.target.working_directory,
|
||||
self.target_infile,
|
||||
self.target_outfile))
|
||||
|
||||
The ``extract_results`` method is used to extract any results from the target
|
||||
for example we want to pull the file containing the timing information that we
|
||||
will use to generate metrics for our workload and then we add this file as an
|
||||
artifact with a 'raw' kind, which means once WA has finished processing it will
|
||||
allow it to decide whether to keep the file or not.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def extract_results(self, context):
|
||||
super(ZipTestWorkload, self).extract_results(context)
|
||||
# Pull the results file to the host
|
||||
self.host_outfile = os.path.join(context.output_directory, 'timing_results')
|
||||
self.target.pull(self.target_outfile, self.host_outfile)
|
||||
context.add_artifact('ziptest-results', self.host_outfile, kind='raw')
|
||||
|
||||
The ``update_output`` method we can do any generation of metrics that we wish to
|
||||
for our workload. In this case we are going to simply convert the times reported
|
||||
into seconds and add them as 'metrics' to WA which can then be displayed to the
|
||||
user along with any others in a format dependant on which output processors they
|
||||
have enabled for the run.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def update_output(self, context):
|
||||
super(ZipTestWorkload, self).update_output(context)
|
||||
# Extract metrics form the file's contents and update the result
|
||||
# with them.
|
||||
content = iter(open(self.host_outfile).read().strip().split())
|
||||
for value, metric in zip(content, content):
|
||||
mins, secs = map(float, value[:-1].split('m'))
|
||||
context.add_metric(metric, secs + 60 * mins, 'seconds')
|
||||
|
||||
Finally in the ``teardown`` method we will perform any required clean up for the
|
||||
workload so we will delete the input and output files from the device.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def teardown(self, context):
|
||||
super(ZipTestWorkload, self).teardown(context)
|
||||
self.target.remove(self.target_infile)
|
||||
self.target.remove(self.target_outfile)
|
||||
|
||||
The full implementation of this workload would look something like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import os
|
||||
from wa import Workload, Parameter
|
||||
|
||||
class ZipTestWorkload(Workload):
|
||||
|
||||
name = 'ziptest'
|
||||
|
||||
description = '''
|
||||
Times how long it takes to gzip a file of a particular size on a device.
|
||||
|
||||
This workload was created for illustration purposes only. It should not be
|
||||
used to collect actual measurements.
|
||||
'''
|
||||
|
||||
parameters = [
|
||||
Parameter('file_size', kind=int, default=2000000,
|
||||
constraint=lambda x: 0 < x,
|
||||
description='Size of the file (in bytes) to be gzipped.')
|
||||
]
|
||||
|
||||
def setup(self, context):
|
||||
super(ZipTestWorkload, self).setup(context)
|
||||
# Generate a file of the specified size containing random garbage.
|
||||
host_infile = os.path.join(context.output_directory, 'infile')
|
||||
command = 'openssl rand -base64 {} > {}'.format(self.file_size, host_infile)
|
||||
os.system(command)
|
||||
# Set up on-device paths
|
||||
devpath = self.target.path # os.path equivalent for the target
|
||||
self.target_infile = devpath.join(self.target.working_directory, 'infile')
|
||||
self.target_outfile = devpath.join(self.target.working_directory, 'outfile')
|
||||
# Push the file to the target
|
||||
self.target.push(host_infile, self.target_infile)
|
||||
|
||||
def run(self, context):
|
||||
cmd = 'cd {} && (time gzip {}) &>> {}'
|
||||
self.target.execute(cmd.format(self.target.working_directory,
|
||||
self.target_infile,
|
||||
self.target_outfile))
|
||||
def extract_results(self, context):
|
||||
super(ZipTestWorkload, self).extract_results(context)
|
||||
# Pull the results file to the host
|
||||
self.host_outfile = os.path.join(context.output_directory, 'timing_results')
|
||||
self.target.pull(self.target_outfile, self.host_outfile)
|
||||
context.add_artifact('ziptest-results', self.host_outfile, kind='raw')
|
||||
|
||||
def update_output(self, context):
|
||||
super(ZipTestWorkload, self).update_output(context)
|
||||
# Extract metrics form the file's contents and update the result
|
||||
# with them.
|
||||
content = iter(open(self.host_outfile).read().strip().split())
|
||||
for value, metric in zip(content, content):
|
||||
mins, secs = map(float, value[:-1].split('m'))
|
||||
context.add_metric(metric, secs + 60 * mins, 'seconds')
|
||||
|
||||
def teardown(self, context):
|
||||
super(ZipTestWorkload, self).teardown(context)
|
||||
self.target.remove(self.target_infile)
|
||||
self.target.remove(self.target_outfile)
|
||||
|
||||
|
||||
|
||||
.. _apkuiautomator-example:
|
||||
|
||||
Adding a ApkUiAutomator Workload
|
||||
--------------------------------
|
||||
|
||||
If we wish to create a workload to automate the testing of the Google Docs
|
||||
android app, we would choose to perform the automation using UIAutomator and we
|
||||
would want to automatically deploy and install the apk file to the target,
|
||||
therefore we would choose the :ref:`ApkUiAutomator workload
|
||||
<apkuiautomator-workload>` type with the following command::
|
||||
|
||||
$ wa create workload -k apkuiauto google_docs
|
||||
Workload created in $WA_USER_DIRECTORY/plugins/google_docs
|
||||
|
||||
|
||||
From here you can navigate to the displayed directory and you will find your
|
||||
``__init__.py`` and a ``uiauto`` directory. The former is your python WA
|
||||
workload and will look something like this. For an example of what should be
|
||||
done in each of the main method please see
|
||||
:ref:`adding a basic example <adding-a-basic-workload-example>` above.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from wa import Parameter, ApkUiautoWorkload
|
||||
class GoogleDocs(ApkUiautoWorkload):
|
||||
name = 'google_docs'
|
||||
description = "This is an placeholder description"
|
||||
# Replace with a list of supported package names in the APK file(s).
|
||||
package_names = ['package_name']
|
||||
|
||||
parameters = [
|
||||
# Workload parameters go here e.g.
|
||||
Parameter('example_parameter', kind=int, allowed_values=[1,2,3],
|
||||
default=1, override=True, mandatory=False,
|
||||
description='This is an example parameter')
|
||||
]
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
super(GoogleDocs, self).__init__(target, **kwargs)
|
||||
# Define any additional attributes required for the workload
|
||||
|
||||
def init_resources(self, resolver):
|
||||
super(GoogleDocs, self).init_resources(resolver)
|
||||
# This method may be used to perform early resource discovery and
|
||||
# initialization. This is invoked during the initial loading stage and
|
||||
# before the device is ready, so cannot be used for any device-dependent
|
||||
# initialization. This method is invoked before the workload instance is
|
||||
# validated.
|
||||
|
||||
def initialize(self, context):
|
||||
super(GoogleDocs, self).initialize(context)
|
||||
# This method should be used to perform once-per-run initialization of a
|
||||
# workload instance.
|
||||
|
||||
def validate(self):
|
||||
super(GoogleDocs, self).validate()
|
||||
# Validate inter-parameter assumptions etc
|
||||
|
||||
def setup(self, context):
|
||||
super(GoogleDocs, self).setup(context)
|
||||
# Perform any necessary setup before starting the UI automation
|
||||
|
||||
def extract_results(self, context):
|
||||
super(GoogleDocs, self).extract_results(context)
|
||||
# Extract results on the target
|
||||
|
||||
def update_output(self, context):
|
||||
super(GoogleDocs, self).update_output(context)
|
||||
# Update the output within the specified execution context with the
|
||||
# metrics and artifacts form this workload iteration.
|
||||
|
||||
def teardown(self, context):
|
||||
super(GoogleDocs, self).teardown(context)
|
||||
# Perform any final clean up for the Workload.
|
||||
|
||||
|
||||
Depending on the purpose of your workload you can choose to implement which
|
||||
methods you require. The main things that need setting are the list of
|
||||
``package_names`` which must be a list of strings containing the android package
|
||||
name that will be used during resource resolution to locate the relevant apk
|
||||
file for the workload. Additionally the the workload parameters will need to
|
||||
updating to any relevant parameters required by the workload as well as the
|
||||
description.
|
||||
|
||||
|
||||
The latter will contain a framework for performing the UI automation on the
|
||||
target, the files you will be most interested in will be
|
||||
``uiauto/app/src/main/java/arm/wa/uiauto/UiAutomation.java`` which will contain
|
||||
the actual code of the automation and will look something like:
|
||||
|
||||
.. code-block:: java
|
||||
|
||||
package com.arm.wa.uiauto.google_docs;
|
||||
|
||||
import android.app.Activity;
|
||||
import android.os.Bundle;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import android.support.test.runner.AndroidJUnit4;
|
||||
|
||||
import android.util.Log;
|
||||
import android.view.KeyEvent;
|
||||
|
||||
// Import the uiautomator libraries
|
||||
import android.support.test.uiautomator.UiObject;
|
||||
import android.support.test.uiautomator.UiObjectNotFoundException;
|
||||
import android.support.test.uiautomator.UiScrollable;
|
||||
import android.support.test.uiautomator.UiSelector;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import com.arm.wa.uiauto.BaseUiAutomation;
|
||||
|
||||
@RunWith(AndroidJUnit4.class)
|
||||
public class UiAutomation extends BaseUiAutomation {
|
||||
|
||||
protected Bundle parameters;
|
||||
protected int example_parameter;
|
||||
|
||||
public static String TAG = "google_docs";
|
||||
|
||||
@Before
|
||||
public void initilize() throws Exception {
|
||||
// Perform any parameter initialization here
|
||||
parameters = getParams(); // Required to decode passed parameters.
|
||||
packageID = getPackageID(parameters);
|
||||
example_parameter = parameters.getInt("example_parameter");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setup() throws Exception {
|
||||
// Optional: Perform any setup required before the main workload
|
||||
// is ran, e.g. dismissing welcome screens
|
||||
}
|
||||
|
||||
@Test
|
||||
public void runWorkload() throws Exception {
|
||||
// The main UI Automation code goes here
|
||||
}
|
||||
|
||||
@Test
|
||||
public void extractResults() throws Exception {
|
||||
// Optional: Extract any relevant results from the workload,
|
||||
}
|
||||
|
||||
@Test
|
||||
public void teardown() throws Exception {
|
||||
// Optional: Perform any clean up for the workload
|
||||
}
|
||||
}
|
||||
|
||||
A few items to note from the template:
|
||||
- Each of the stages of execution for example ``setup``, ``runWorkload`` etc
|
||||
are decorated with the ``@Test`` decorator, this is important to allow
|
||||
these methods to be called at the appropriate time however any additional
|
||||
methods you may add do not require this decorator.
|
||||
- The ``initialize`` method has the ``@Before`` decorator, this is there to
|
||||
ensure that this method is called before executing any of the workload
|
||||
stages and therefore is used to decode and initialize any parameters that
|
||||
are passed in.
|
||||
- The code currently retrieves the ``example_parameter`` that was
|
||||
provided to the python workload as an Integer, there are similar calls to
|
||||
retrieve parameters of different types e.g. ``getString``, ``getBoolean``,
|
||||
``getDouble`` etc.
|
||||
|
||||
Once you have implemented your java workload you can use the file
|
||||
``uiauto/build.sh`` to compile your automation into an apk file to perform the
|
||||
automation. The generated apk will be generated with the package name
|
||||
``com.arm.wa.uiauto.<workload_name>`` which when running your workload will be
|
||||
automatically detected by the resource getters and deployed to the device.
|
||||
|
||||
|
||||
Adding a ReventApk Workload
|
||||
---------------------------
|
||||
|
||||
If we wish to create a workload to automate the testing of a UI based workload
|
||||
that we cannot / do not wish to use UiAutomator then we can perform the
|
||||
automation using revent. In this example we would want to automatically deploy
|
||||
and install an apk file to the target, therefore we would choose the
|
||||
:ref:`ApkRevent workload <apkrevent-workload>` type with the following
|
||||
command::
|
||||
|
||||
$ wa create workload -k apkrevent my_game
|
||||
Workload created in $WA_USER_DIRECTORY/plugins/my_game
|
||||
|
||||
This will generate a revent based workload you will end up with a very similar
|
||||
python file as to the one outlined in generating a :ref:`UiAutomator based
|
||||
workload <apkuiautomator-example>` however without the accompanying java
|
||||
automation files.
|
||||
|
||||
The main difference between the two is that this workload will subclass
|
||||
``ApkReventWorkload`` instead of ``ApkUiautomatorWorkload`` as shown below.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from wa import ApkReventWorkload
|
||||
|
||||
class MyGame(ApkReventWorkload):
|
||||
|
||||
name = 'mygame'
|
||||
package_names = ['com.mylogo.mygame']
|
||||
|
||||
# ..
|
||||
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
.. _adding-an-instrument-example:
|
||||
|
||||
Adding an Instrument
|
||||
====================
|
||||
This is an example of how we would create a instrument which will trace device
|
||||
errors using a custom "trace" binary file. For more detailed information please see the
|
||||
:ref:`Instrument Reference <instrument-reference>`. The first thing to do is to create
|
||||
a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass
|
||||
:class:`Instrument`. Make sure to overwrite the variable name with what we want our instrument
|
||||
to be called and then locate our binary for the instrument.
|
||||
|
||||
::
|
||||
|
||||
class TraceErrorsInstrument(Instrument):
|
||||
|
||||
name = 'trace-errors'
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
super(TraceErrorsInstrument, self).__init__(target, **kwargs)
|
||||
self.binary_name = 'trace'
|
||||
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
|
||||
self.trace_on_target = None
|
||||
|
||||
We then declare and implement the required methods as detailed in the
|
||||
:ref:`Instrument API <instrument-api>`. For the ``initialize`` method, we want to install
|
||||
the executable file to the target so we can use the target's ``install``
|
||||
method which will try to copy the file to a location on the device that
|
||||
supports execution, change the file mode appropriately and return the
|
||||
file path on the target. ::
|
||||
|
||||
def initialize(self, context):
|
||||
self.trace_on_target = self.target.install(self.binary_file)
|
||||
|
||||
Then we implemented the start method, which will simply run the file to start
|
||||
tracing. Supposing that the call to this binary requires some overhead to begin
|
||||
collecting errors we might want to decorate the method with the ``@slow``
|
||||
decorator to try and reduce the impact on other running instruments. For more
|
||||
information on prioritization please see the
|
||||
:ref:`Developer Reference <prioritization>`. ::
|
||||
|
||||
@slow
|
||||
def start(self, context):
|
||||
self.target.execute('{} start'.format(self.trace_on_target))
|
||||
|
||||
Lastly, we need to stop tracing once the workload stops and this happens in the
|
||||
stop method, assuming stopping the collection also require some overhead we have
|
||||
again decorated the method. ::
|
||||
|
||||
@slow
|
||||
def stop(self, context):
|
||||
self.target.execute('{} stop'.format(self.trace_on_target))
|
||||
|
||||
Once we have generated our result data we need to retrieve it from the device
|
||||
for further processing or adding directly to WA's output for that job. For
|
||||
example for trace data we will want to pull it to the device and add it as a
|
||||
:ref:`artifact <artifact>` to WA's :ref:`context <context>`. Once we have
|
||||
retrieved the data, we can now do any further processing and add any relevant
|
||||
:ref:`Metrics <metrics>` to the :ref:`context <context>`. For this we will use
|
||||
the the ``add_metric`` method to add the results to the final output for that
|
||||
workload. The method can be passed 4 params, which are the metric `key`,
|
||||
`value`, `unit` and `lower_is_better`. ::
|
||||
|
||||
def update_output(self, context):
|
||||
# pull the trace file from the target
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.outfile = os.path.join(context.output_directory, 'trace.txt')
|
||||
self.target.pull(self.result, self.outfile)
|
||||
context.add_artifact('error_trace', self.outfile, kind='export')
|
||||
|
||||
# parse the file if needs to be parsed, or add result directly to
|
||||
# context.
|
||||
|
||||
metric = # ..
|
||||
context.add_metric('number_of_errors', metric, lower_is_better=True
|
||||
|
||||
At the end of each job we might want to delete any files generated by the
|
||||
instruments and the code to clear these file goes in teardown method. ::
|
||||
|
||||
def teardown(self, context):
|
||||
self.target.remove(os.path.join(self.target.working_directory, 'trace.txt'))
|
||||
|
||||
At the very end of the run we would want to uninstall the binary we deployed earlier. ::
|
||||
|
||||
def finalize(self, context):
|
||||
self.target.uninstall(self.binary_name)
|
||||
|
||||
So the full example would look something like::
|
||||
|
||||
from wa import Instrument
|
||||
|
||||
class TraceErrorsInstrument(Instrument):
|
||||
|
||||
name = 'trace-errors'
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
super(TraceErrorsInstrument, self).__init__(target, **kwargs)
|
||||
self.binary_name = 'trace'
|
||||
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
|
||||
self.trace_on_target = None
|
||||
|
||||
def initialize(self, context):
|
||||
self.trace_on_target = self.target.install(self.binary_file)
|
||||
|
||||
@slow
|
||||
def start(self, context):
|
||||
self.target.execute('{} start'.format(self.trace_on_target))
|
||||
|
||||
@slow
|
||||
def stop(self, context):
|
||||
self.target.execute('{} stop'.format(self.trace_on_target))
|
||||
|
||||
def update_output(self, context):
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.outfile = os.path.join(context.output_directory, 'trace.txt')
|
||||
self.target.pull(self.result, self.outfile)
|
||||
context.add_artifact('error_trace', self.outfile, kind='export')
|
||||
|
||||
metric = # ..
|
||||
context.add_metric('number_of_errors', metric, lower_is_better=True
|
||||
|
||||
def teardown(self, context):
|
||||
self.target.remove(os.path.join(self.target.working_directory, 'trace.txt'))
|
||||
|
||||
def finalize(self, context):
|
||||
self.target.uninstall(self.binary_name)
|
||||
|
||||
.. _adding-an-output-processor-example:
|
||||
|
||||
Adding an Output Processor
|
||||
==========================
|
||||
|
||||
This is an example of how we would create an output processor which will format
|
||||
the run metrics as a column-aligned table. The first thing to do is to create
|
||||
a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass
|
||||
:class:`OutputProcessor`. Make sure to overwrite the variable name with what we want our
|
||||
processor to be called and provide a short description.
|
||||
|
||||
Next we need to implement any relevant methods, (please see
|
||||
:ref:`adding an output processor <adding-an-output-processor>` for all the
|
||||
available methods). In this case we only want to implement the
|
||||
``export_run_output`` method as we are not generating any new artifacts and
|
||||
we only care about the overall output rather than the individual job
|
||||
outputs. The implementation is very simple, it just loops through all
|
||||
the available metrics for all the available jobs and adds them to a list
|
||||
which is written to file and then added as an :ref:`artifact <artifact>` to
|
||||
the :ref:`context <context>`.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import os
|
||||
from wa import OutputProcessor
|
||||
from wa.utils.misc import write_table
|
||||
|
||||
|
||||
class Table(OutputProcessor):
|
||||
|
||||
name = 'table'
|
||||
description = 'Generates a text file containing a column-aligned table of run results.'
|
||||
|
||||
def export_run_output(self, output, target_info):
|
||||
rows = []
|
||||
|
||||
for job in output.jobs:
|
||||
for metric in job.metrics:
|
||||
rows.append([metric.name, str(metric.value), metric.units or '',
|
||||
metric.lower_is_better and '-' or '+'])
|
||||
|
||||
outfile = output.get_path('table.txt')
|
||||
with open(outfile, 'w') as wfh:
|
||||
write_table(rows, wfh)
|
||||
output.add_artifact('results_table', 'table.txt', 'export')
|
||||
|
||||
|
||||
.. _adding-custom-target-example:
|
||||
|
||||
Adding a Custom Target
|
||||
======================
|
||||
This is an example of how we would create a customised target, this is typically
|
||||
used where we would need to augment the existing functionality for example on
|
||||
development boards where we need to perform additional actions to implement some
|
||||
functionality. In this example we are going to assume that this particular
|
||||
device is running Android and requires a special "wakeup" command to be sent before it
|
||||
can execute any other command.
|
||||
|
||||
To add a new target to WA we will first create a new file in
|
||||
``$WA_USER_DIRECTORY/plugins/example_target.py``. In order to facilitate with
|
||||
creating a new target WA provides a helper function to create a description for
|
||||
the specified target class, and specified components. For components that are
|
||||
not explicitly specified it will attempt to guess sensible defaults based on the target
|
||||
class' bases.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Import our helper function
|
||||
from wa import add_description_for_target
|
||||
|
||||
# Import the Target that our custom implementation will be based on
|
||||
from devlib import AndroidTarget
|
||||
|
||||
class ExampleTarget(AndroidTarget):
|
||||
# Provide the name that will be used to identify your custom target
|
||||
name = 'example_target'
|
||||
|
||||
# Override our custom method(s)
|
||||
def execute(self, *args, **kwargs):
|
||||
super(ExampleTarget, self).execute('wakeup', check_exit_code=False)
|
||||
return super(ExampleTarget, self).execute(*args, **kwargs)
|
||||
|
||||
|
||||
description = '''An Android target which requires an explicit "wakeup" command
|
||||
to be sent before accepting any other command'''
|
||||
# Call the helper function with our newly created function and its description.
|
||||
add_description_for_target(ExampleTarget, description)
|
||||
|
@ -1,395 +0,0 @@
|
||||
.. _processing_output:
|
||||
|
||||
Processing WA Output
|
||||
====================
|
||||
|
||||
This section will illustrate the use of WA's :ref:`output processing API
|
||||
<output_processing_api>` by creating a simple ASCII report generator. To make
|
||||
things concrete, this how-to will be processing the output from running the
|
||||
following agenda::
|
||||
|
||||
sections:
|
||||
- runtime_params:
|
||||
frequency: min
|
||||
classifiers:
|
||||
frequency: min
|
||||
- runtime_params:
|
||||
frequency: max
|
||||
classifiers:
|
||||
frequency: max
|
||||
workloads:
|
||||
- sysbench
|
||||
- deepbench
|
||||
|
||||
This runs two workloads under two different configurations each -- once with
|
||||
CPU frequency fixed to max, and once with CPU frequency fixed to min.
|
||||
Classifiers are used to indicate the configuration in the output.
|
||||
|
||||
First, create the :class:`RunOutput` object, which is the main interface for
|
||||
interacting with WA outputs. Or alternatively a :class:`RunDatabaseOutput`
|
||||
if storing your results in a postgres database.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import sys
|
||||
|
||||
from wa import RunOutput
|
||||
|
||||
# Path to the output directory specified in the first argument
|
||||
ro = RunOutput(sys.argv[1])
|
||||
|
||||
|
||||
Run Info
|
||||
--------
|
||||
|
||||
Next, we're going to print out an overall summary of the run.
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from __future__ import print_function # for Python 2 compat.
|
||||
|
||||
from wa.utils.misc import format_duration
|
||||
|
||||
print('-'*20)
|
||||
print('Run ID:', ro.info.uuid)
|
||||
print('Run status:', ro.status)
|
||||
print('Run started at:', ro.info.start_time.isoformat())
|
||||
print('Run completed at:', ro.info.end_time.isoformat())
|
||||
print('Run duration:', format_duration(ro.info.duration))
|
||||
print('Ran', len(ro.jobs), 'jobs')
|
||||
print('-'*20)
|
||||
print()
|
||||
|
||||
``RunOutput.info`` is an instance of :class:`RunInfo` which encapsulates
|
||||
Overall-run metadata, such as the duration.
|
||||
|
||||
|
||||
Target Info
|
||||
-----------
|
||||
|
||||
Next, some information about the device the results where collected on.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
print(' Target Information ')
|
||||
print(' ------------------- ')
|
||||
print('hostname:', ro.target_info.hostname)
|
||||
if ro.target_info.os == 'android':
|
||||
print('Android ID:', ro.target_info.android_id)
|
||||
else:
|
||||
print('host ID:', ro.target_info.hostid)
|
||||
print('CPUs:', ', '.join(cpu.name for cpu in ro.target_info.cpus))
|
||||
print()
|
||||
|
||||
print('OS:', ro.target_info.os)
|
||||
print('ABI:', ro.target_info.abi)
|
||||
print('rooted:', ro.target_info.is_rooted)
|
||||
print('kernel version:', ro.target_info.kernel_version)
|
||||
print('os version:')
|
||||
for k, v in ro.target_info.os_version.items():
|
||||
print('\t', k+':', v)
|
||||
print()
|
||||
print('-'*27)
|
||||
print()
|
||||
|
||||
``RunOutput.target_info`` is an instance of :class:`TargetInfo` that contains
|
||||
information collected from the target during the run.
|
||||
|
||||
|
||||
Jobs Summary
|
||||
------------
|
||||
|
||||
Next, show a summary of executed jobs.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from wa.utils.misc import write_table
|
||||
|
||||
print(' Jobs ')
|
||||
print(' ---- ')
|
||||
print()
|
||||
rows = []
|
||||
for job in ro.jobs:
|
||||
rows.append([job.id, job.label, job.iteration, job.status])
|
||||
write_table(rows, sys.stdout, align='<<><',
|
||||
headers=['ID', 'LABEL', 'ITER.', 'STATUS'])
|
||||
print()
|
||||
print('-'*27)
|
||||
print()
|
||||
|
||||
``RunOutput.jobs`` is a list of :class:`JobOutput` objects. These contain
|
||||
information about that particular job, including its execution status, and
|
||||
:ref:`metrics` and :ref:`artifact` generated by the job.
|
||||
|
||||
|
||||
Compare Metrics
|
||||
---------------
|
||||
|
||||
Finally, collect metrics, sort them by the "frequency" classifier. Classifiers
|
||||
that are present in the metric but not its job have been added by the workload.
|
||||
For the purposes of this report, they will be used to augment the metric's name.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
print()
|
||||
print(' Metrics Comparison ')
|
||||
print(' ------------------ ')
|
||||
print()
|
||||
scores = defaultdict(lambda: defaultdict(lambda: defaultdict()))
|
||||
for job in ro.jobs:
|
||||
for metric in job.metrics:
|
||||
workload = job.label
|
||||
name = metric.name
|
||||
freq = job.classifiers['frequency']
|
||||
for cname, cval in sorted(metric.classifiers.items()):
|
||||
if cname not in job.classifiers:
|
||||
# was not propagated from the job, therefore was
|
||||
# added by the workload
|
||||
name += '/{}={}'.format(cname, cval)
|
||||
|
||||
scores[workload][name][freq] = metric
|
||||
|
||||
Once the metrics have been sorted, generate the report showing the delta
|
||||
between the two configurations (indicated by the "frequency" classifier) and
|
||||
highlight any unexpected deltas (based on the ``lower_is_better`` attribute of
|
||||
the metric). (In practice, you will want to run multiple iterations of each
|
||||
configuration, calculate averages and standard deviations, and only highlight
|
||||
statically significant deltas.)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
rows = []
|
||||
for workload in sorted(scores.keys()):
|
||||
wldata = scores[workload]
|
||||
|
||||
for name in sorted(wldata.keys()):
|
||||
min_score = wldata[name]['min'].value
|
||||
max_score = wldata[name]['max'].value
|
||||
delta = max_score - min_score
|
||||
units = wldata[name]['min'].units or ''
|
||||
lib = wldata[name]['min'].lower_is_better
|
||||
|
||||
warn = ''
|
||||
if (lib and delta > 0) or (not lib and delta < 0):
|
||||
warn = '!!!'
|
||||
|
||||
rows.append([workload, name,
|
||||
'{:.3f}'.format(min_score), '{:.3f}'.format(max_score),
|
||||
'{:.3f}'.format(delta), units, warn])
|
||||
|
||||
# separate workloads with a blank row
|
||||
rows.append(['', '', '', '', '', '', ''])
|
||||
|
||||
|
||||
write_table(rows, sys.stdout, align='<<>>><<',
|
||||
headers=['WORKLOAD', 'METRIC', 'MIN.', 'MAX', 'DELTA', 'UNITS', ''])
|
||||
print()
|
||||
print('-'*27)
|
||||
|
||||
This concludes this how-to. For more information, please see :ref:`output
|
||||
processing API documentation <output_processing_api>`.
|
||||
|
||||
|
||||
Complete Example
|
||||
----------------
|
||||
|
||||
Below is the complete example code, and a report it generated for a sample run.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from __future__ import print_function # for Python 2 compat.
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
|
||||
from wa import RunOutput
|
||||
from wa.utils.misc import format_duration, write_table
|
||||
|
||||
|
||||
|
||||
# Path to the output directory specified in the first argument
|
||||
ro = RunOutput(sys.argv[1])
|
||||
|
||||
print('-'*27)
|
||||
print('Run ID:', ro.info.uuid)
|
||||
print('Run status:', ro.status)
|
||||
print('Run started at:', ro.info.start_time.isoformat())
|
||||
print('Run completed at:', ro.info.end_time.isoformat())
|
||||
print('Run duration:', format_duration(ro.info.duration))
|
||||
print('Ran', len(ro.jobs), 'jobs')
|
||||
print('-'*27)
|
||||
print()
|
||||
|
||||
print(' Target Information ')
|
||||
print(' ------------------- ')
|
||||
print('hostname:', ro.target_info.hostname)
|
||||
if ro.target_info.os == 'android':
|
||||
print('Android ID:', ro.target_info.android_id)
|
||||
else:
|
||||
print('host ID:', ro.target_info.hostid)
|
||||
print('CPUs:', ', '.join(cpu.name for cpu in ro.target_info.cpus))
|
||||
print()
|
||||
|
||||
print('OS:', ro.target_info.os)
|
||||
print('ABI:', ro.target_info.abi)
|
||||
print('rooted:', ro.target_info.is_rooted)
|
||||
print('kernel version:', ro.target_info.kernel_version)
|
||||
print('OS version:')
|
||||
for k, v in ro.target_info.os_version.items():
|
||||
print('\t', k+':', v)
|
||||
print()
|
||||
print('-'*27)
|
||||
print()
|
||||
|
||||
print(' Jobs ')
|
||||
print(' ---- ')
|
||||
print()
|
||||
rows = []
|
||||
for job in ro.jobs:
|
||||
rows.append([job.id, job.label, job.iteration, job.status])
|
||||
write_table(rows, sys.stdout, align='<<><',
|
||||
headers=['ID', 'LABEL', 'ITER.', 'STATUS'])
|
||||
print()
|
||||
print('-'*27)
|
||||
|
||||
print()
|
||||
print(' Metrics Comparison ')
|
||||
print(' ------------------ ')
|
||||
print()
|
||||
scores = defaultdict(lambda: defaultdict(lambda: defaultdict()))
|
||||
for job in ro.jobs:
|
||||
for metric in job.metrics:
|
||||
workload = job.label
|
||||
name = metric.name
|
||||
freq = job.classifiers['frequency']
|
||||
for cname, cval in sorted(metric.classifiers.items()):
|
||||
if cname not in job.classifiers:
|
||||
# was not propagated from the job, therefore was
|
||||
# added by the workload
|
||||
name += '/{}={}'.format(cname, cval)
|
||||
|
||||
scores[workload][name][freq] = metric
|
||||
|
||||
rows = []
|
||||
for workload in sorted(scores.keys()):
|
||||
wldata = scores[workload]
|
||||
|
||||
for name in sorted(wldata.keys()):
|
||||
min_score = wldata[name]['min'].value
|
||||
max_score = wldata[name]['max'].value
|
||||
delta = max_score - min_score
|
||||
units = wldata[name]['min'].units or ''
|
||||
lib = wldata[name]['min'].lower_is_better
|
||||
|
||||
warn = ''
|
||||
if (lib and delta > 0) or (not lib and delta < 0):
|
||||
warn = '!!!'
|
||||
|
||||
rows.append([workload, name,
|
||||
'{:.3f}'.format(min_score), '{:.3f}'.format(max_score),
|
||||
'{:.3f}'.format(delta), units, warn])
|
||||
|
||||
# separate workloads with a blank row
|
||||
rows.append(['', '', '', '', '', '', ''])
|
||||
|
||||
|
||||
write_table(rows, sys.stdout, align='<<>>><<',
|
||||
headers=['WORKLOAD', 'METRIC', 'MIN.', 'MAX', 'DELTA', 'UNITS', ''])
|
||||
print()
|
||||
print('-'*27)
|
||||
|
||||
Sample output::
|
||||
|
||||
---------------------------
|
||||
Run ID: 78aef931-cd4c-429b-ac9f-61f6893312e6
|
||||
Run status: OK
|
||||
Run started at: 2018-06-27T12:55:23.746941
|
||||
Run completed at: 2018-06-27T13:04:51.067309
|
||||
Run duration: 9 minutes 27 seconds
|
||||
Ran 4 jobs
|
||||
---------------------------
|
||||
|
||||
Target Information
|
||||
-------------------
|
||||
hostname: localhost
|
||||
Android ID: b9d1d8b48cfba007
|
||||
CPUs: A53, A53, A53, A53, A73, A73, A73, A73
|
||||
|
||||
OS: android
|
||||
ABI: arm64
|
||||
rooted: True
|
||||
kernel version: 4.9.75-04208-g2c913991a83d-dirty 114 SMP PREEMPT Wed May 9 10:33:36 BST 2018
|
||||
OS version:
|
||||
all_codenames: O
|
||||
base_os:
|
||||
codename: O
|
||||
incremental: eng.valsch.20170517.180115
|
||||
preview_sdk: 0
|
||||
release: O
|
||||
sdk: 25
|
||||
security_patch: 2017-04-05
|
||||
|
||||
---------------------------
|
||||
|
||||
Jobs
|
||||
----
|
||||
|
||||
ID LABEL ITER. STATUS
|
||||
-- ----- ----- ------
|
||||
s1-wk1 sysbench 1 OK
|
||||
s1-wk2 deepbench 1 OK
|
||||
s2-wk1 sysbench 1 OK
|
||||
s2-wk2 deepbench 1 OK
|
||||
|
||||
---------------------------
|
||||
|
||||
Metrics Comparison
|
||||
------------------
|
||||
|
||||
WORKLOAD METRIC MIN. MAX DELTA UNITS
|
||||
-------- ------ ---- --- ----- -----
|
||||
deepbench GOPS/a_t=n/b_t=n/k=1024/m=128/n=1 0.699 0.696 -0.003 !!!
|
||||
deepbench GOPS/a_t=n/b_t=n/k=1024/m=3072/n=1 0.471 0.715 0.244
|
||||
deepbench GOPS/a_t=n/b_t=n/k=1024/m=3072/n=1500 23.514 36.432 12.918
|
||||
deepbench GOPS/a_t=n/b_t=n/k=1216/m=64/n=1 0.333 0.333 -0.000 !!!
|
||||
deepbench GOPS/a_t=n/b_t=n/k=128/m=3072/n=1 0.405 1.073 0.668
|
||||
deepbench GOPS/a_t=n/b_t=n/k=128/m=3072/n=1500 19.914 34.966 15.052
|
||||
deepbench GOPS/a_t=n/b_t=n/k=128/m=4224/n=1 0.232 0.486 0.255
|
||||
deepbench GOPS/a_t=n/b_t=n/k=1280/m=128/n=1500 20.721 31.654 10.933
|
||||
deepbench GOPS/a_t=n/b_t=n/k=1408/m=128/n=1 0.701 0.702 0.001
|
||||
deepbench GOPS/a_t=n/b_t=n/k=1408/m=176/n=1500 19.902 29.116 9.214
|
||||
deepbench GOPS/a_t=n/b_t=n/k=176/m=4224/n=1500 26.030 39.550 13.519
|
||||
deepbench GOPS/a_t=n/b_t=n/k=2048/m=35/n=700 10.884 23.615 12.731
|
||||
deepbench GOPS/a_t=n/b_t=n/k=2048/m=5124/n=700 26.740 37.334 10.593
|
||||
deepbench execution_time 318.758 220.629 -98.129 seconds !!!
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=1024/m=128/n=1 0.375 0.377 0.002 !!!
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=1024/m=3072/n=1 13.358 8.793 -4.565
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=1024/m=3072/n=1500 401.338 259.036 -142.302
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=1216/m=64/n=1 0.467 0.467 0.000 !!!
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=128/m=3072/n=1 1.943 0.733 -1.210
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=128/m=3072/n=1500 59.237 33.737 -25.500
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=128/m=4224/n=1 4.666 2.224 -2.442
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=1280/m=128/n=1500 23.721 15.528 -8.193
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=1408/m=128/n=1 0.514 0.513 -0.001
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=1408/m=176/n=1500 37.354 25.533 -11.821
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=176/m=4224/n=1500 85.679 56.391 -29.288
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=2048/m=35/n=700 9.220 4.249 -4.970
|
||||
deepbench time (msec)/a_t=n/b_t=n/k=2048/m=5124/n=700 549.413 393.517 -155.896
|
||||
|
||||
sysbench approx. 95 percentile 3.800 1.450 -2.350 ms
|
||||
sysbench execution_time 1.790 1.437 -0.353 seconds !!!
|
||||
sysbench response time avg 1.400 1.120 -0.280 ms
|
||||
sysbench response time max 40.740 42.760 2.020 ms !!!
|
||||
sysbench response time min 0.710 0.710 0.000 ms
|
||||
sysbench thread fairness events avg 1250.000 1250.000 0.000
|
||||
sysbench thread fairness events stddev 772.650 213.040 -559.610
|
||||
sysbench thread fairness execution time avg 1.753 1.401 -0.352 !!!
|
||||
sysbench thread fairness execution time stddev 0.000 0.000 0.000
|
||||
sysbench total number of events 10000.000 10000.000 0.000
|
||||
sysbench total time 1.761 1.409 -0.352 s
|
||||
|
||||
|
||||
---------------------------
|
||||
|
407
doc/source/device_setup.rst
Normal file
407
doc/source/device_setup.rst
Normal file
@ -0,0 +1,407 @@
|
||||
Setting Up A Device
|
||||
===================
|
||||
|
||||
WA should work with most Android devices out-of-the box, as long as the device
|
||||
is discoverable by ``adb`` (i.e. gets listed when you run ``adb devices``). For
|
||||
USB-attached devices, that should be the case; for network devices, ``adb connect``
|
||||
would need to be invoked with the IP address of the device. If there is only one
|
||||
device connected to the host running WA, then no further configuration should be
|
||||
necessary (though you may want to :ref:`tweak some Android settings <configuring-android>`\ ).
|
||||
|
||||
If you have multiple devices connected, have a non-standard Android build (e.g.
|
||||
on a development board), or want to use of the more advanced WA functionality,
|
||||
further configuration will be required.
|
||||
|
||||
Android
|
||||
+++++++
|
||||
|
||||
General Device Setup
|
||||
--------------------
|
||||
|
||||
You can specify the device interface by setting ``device`` setting in
|
||||
``~/.workload_automation/config.py``. Available interfaces can be viewed by
|
||||
running ``wa list devices`` command. If you don't see your specific device
|
||||
listed (which is likely unless you're using one of the ARM-supplied platforms), then
|
||||
you should use ``generic_android`` interface (this is set in the config by
|
||||
default).
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
device = 'generic_android'
|
||||
|
||||
The device interface may be configured through ``device_config`` setting, who's
|
||||
value is a ``dict`` mapping setting names to their values. You can find the full
|
||||
list of available parameter by looking up your device interface in the
|
||||
:ref:`devices` section of the documentation. Some of the most common parameters
|
||||
you might want to change are outlined below.
|
||||
|
||||
.. confval:: adb_name
|
||||
|
||||
If you have multiple Android devices connected to the host machine, you will
|
||||
need to set this to indicate to WA which device you want it to use.
|
||||
|
||||
.. confval:: working_directory
|
||||
|
||||
WA needs a "working" directory on the device which it will use for collecting
|
||||
traces, caching assets it pushes to the device, etc. By default, it will
|
||||
create one under ``/sdcard`` which should be mapped and writable on standard
|
||||
Android builds. If this is not the case for your device, you will need to
|
||||
specify an alternative working directory (e.g. under ``/data/local``).
|
||||
|
||||
.. confval:: scheduler
|
||||
|
||||
This specifies the scheduling mechanism (from the perspective of core layout)
|
||||
utilized by the device). For recent big.LITTLE devices, this should generally
|
||||
be "hmp" (ARM Hetrogeneous Mutli-Processing); some legacy development
|
||||
platforms might have Linaro IKS kernels, in which case it should be "iks".
|
||||
For homogeneous (single-cluster) devices, it should be "smp". Please see
|
||||
``scheduler`` parameter in the ``generic_android`` device documentation for
|
||||
more details.
|
||||
|
||||
.. confval:: core_names
|
||||
|
||||
This and ``core_clusters`` need to be set if you want to utilize some more
|
||||
advanced WA functionality (like setting of core-related runtime parameters
|
||||
such as governors, frequencies, etc). ``core_names`` should be a list of
|
||||
core names matching the order in which they are exposed in sysfs. For
|
||||
example, ARM TC2 SoC is a 2x3 big.LITTLE system; its core_names would be
|
||||
``['a7', 'a7', 'a7', 'a15', 'a15']``, indicating that cpu0-cpu2 in cpufreq
|
||||
sysfs structure are A7's and cpu3 and cpu4 are A15's.
|
||||
|
||||
.. confval:: core_clusters
|
||||
|
||||
If ``core_names`` is defined, this must also be defined. This is a list of
|
||||
integer values indicating the cluster the corresponding core in
|
||||
``cores_names`` belongs to. For example, for TC2, this would be
|
||||
``[0, 0, 0, 1, 1]``, indicating that A7's are on cluster 0 and A15's are on
|
||||
cluster 1.
|
||||
|
||||
A typical ``device_config`` inside ``config.py`` may look something like
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
device_config = dict(
|
||||
'adb_name'='0123456789ABCDEF',
|
||||
'working_direcory'='/sdcard/wa-working',
|
||||
'core_names'=['a7', 'a7', 'a7', 'a15', 'a15'],
|
||||
'core_clusters'=[0, 0, 0, 1, 1],
|
||||
# ...
|
||||
)
|
||||
|
||||
.. _configuring-android:
|
||||
|
||||
Configuring Android
|
||||
-------------------
|
||||
|
||||
There are a few additional tasks you may need to perform once you have a device
|
||||
booted into Android (especially if this is an initial boot of a fresh OS
|
||||
deployment):
|
||||
|
||||
- You have gone through FTU (first time usage) on the home screen and
|
||||
in the apps menu.
|
||||
- You have disabled the screen lock.
|
||||
- You have set sleep timeout to the highest possible value (30 mins on
|
||||
most devices).
|
||||
- You have disabled brightness auto-adjust and have set the brightness
|
||||
to a fixed level.
|
||||
- You have set the locale language to "English" (this is important for
|
||||
some workloads in which UI automation looks for specific text in UI
|
||||
elements).
|
||||
|
||||
TC2 Setup
|
||||
---------
|
||||
|
||||
This section outlines how to setup ARM TC2 development platform to work with WA.
|
||||
|
||||
Pre-requisites
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
You can obtain the full set of images for TC2 from Linaro:
|
||||
|
||||
https://releases.linaro.org/latest/android/vexpress-lsk.
|
||||
|
||||
For the easiest setup, follow the instructions on the "Firmware" and "Binary
|
||||
Image Installation" tabs on that page.
|
||||
|
||||
.. note:: The default ``reboot_policy`` in ``config.py`` is to not reboot. With
|
||||
this WA will assume that the device is already booted into Android
|
||||
prior to WA being invoked. If you want to WA to do the initial boot of
|
||||
the TC2, you will have to change reboot policy to at least
|
||||
``initial``.
|
||||
|
||||
|
||||
Setting Up Images
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. note:: Make sure that both DIP switches near the black reset button on TC2
|
||||
are up (this is counter to the Linaro guide that instructs to lower
|
||||
one of the switches).
|
||||
|
||||
.. note:: The TC2 must have an Ethernet connection.
|
||||
|
||||
|
||||
If you have followed the setup instructions on the Linaro page, you should have
|
||||
a USB stick or an SD card with the file system, and internal microSD on the
|
||||
board (VEMSD) with the firmware images. The default Linaro configuration is to
|
||||
boot from the image on the boot partition in the file system you have just
|
||||
created. This is not supported by WA, which expects the image to be in NOR flash
|
||||
on the board. This requires you to copy the images from the boot partition onto
|
||||
the internal microSD card.
|
||||
|
||||
Assuming the boot partition of the Linaro file system is mounted on
|
||||
``/media/boot`` and the internal microSD is mounted on ``/media/VEMSD``, copy
|
||||
the following images::
|
||||
|
||||
cp /media/boot/zImage /media/VEMSD/SOFTWARE/kern_mp.bin
|
||||
cp /media/boot/initrd /media/VEMSD/SOFTWARE/init_mp.bin
|
||||
cp /media/boot/v2p-ca15-tc2.dtb /media/VEMSD/SOFTWARE/mp_a7bc.dtb
|
||||
|
||||
Optionally
|
||||
##########
|
||||
|
||||
The default device tree configuration the TC2 is to boot on the A7 cluster. It
|
||||
is also possible to configure the device tree to boot on the A15 cluster, or to
|
||||
boot with one of the clusters disabled (turning TC2 into an A7-only or A15-only
|
||||
device). Please refer to the "Firmware" tab on the Linaro paged linked above for
|
||||
instructions on how to compile the appropriate device tree configurations.
|
||||
|
||||
WA allows selecting between these configurations using ``os_mode`` boot
|
||||
parameter of the TC2 device interface. In order for this to work correctly,
|
||||
device tree files for the A15-bootcluster, A7-only and A15-only configurations
|
||||
should be copied into ``/media/VEMSD/SOFTWARE/`` as ``mp_a15bc.dtb``,
|
||||
``mp_a7.dtb`` and ``mp_a15.dtb`` respectively.
|
||||
|
||||
This is entirely optional. If you're not planning on switching boot cluster
|
||||
configuration, those files do not need to be present in VEMSD.
|
||||
|
||||
config.txt
|
||||
##########
|
||||
|
||||
Also, make sure that ``USB_REMOTE`` setting in ``/media/VEMSD/config.txt`` is set
|
||||
to ``TRUE`` (this will allow rebooting the device by writing reboot.txt to
|
||||
VEMSD). ::
|
||||
|
||||
USB_REMOTE: TRUE ;Selects remote command via USB
|
||||
|
||||
|
||||
TC2-specific device_config settings
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are a few settings that may need to be set in ``device_config`` inside
|
||||
your ``config.py`` which are specific to TC2:
|
||||
|
||||
.. note:: TC2 *does not* accept most "standard" android ``device_config``
|
||||
settings.
|
||||
|
||||
adb_name
|
||||
If you're running WA with reboots disabled (which is the default reboot
|
||||
policy), you will need to manually run ``adb connect`` with TC2's IP
|
||||
address and set this.
|
||||
|
||||
root_mount
|
||||
WA expects TC2's internal microSD to be mounted on the host under
|
||||
``/media/VEMSD``. If this location is different, it needs to be specified
|
||||
using this setting.
|
||||
|
||||
boot_firmware
|
||||
WA defaults to try booting using UEFI, which will require some additional
|
||||
firmware from ARM that may not be provided with Linaro releases (see the
|
||||
UEFI and PSCI section below). If you do not have those images, you will
|
||||
need to set ``boot_firmware`` to ``bootmon``.
|
||||
|
||||
fs_medium
|
||||
TC2's file system can reside either on an SD card or on a USB stick. Boot
|
||||
configuration is different depending on this. By default, WA expects it
|
||||
to be on ``usb``; if you are using and SD card, you should set this to
|
||||
``sd``.
|
||||
|
||||
bm_image
|
||||
Bootmon image that comes as part of TC2 firmware periodically gets
|
||||
updated. At the time of the release, ``bm_v519r.axf`` was used by
|
||||
ARM. If you are using a more recent image, you will need to set this
|
||||
indicating the image name (just the name of the actual file, *not* the
|
||||
path). Note: this setting only applies if using ``bootmon`` boot
|
||||
firmware.
|
||||
|
||||
serial_device
|
||||
WA will assume TC2 is connected on ``/dev/ttyS0`` by default. If the
|
||||
serial port is different, you will need to set this.
|
||||
|
||||
|
||||
UEFI and PSCI
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
UEFI is a boot firmware alternative to bootmon. Currently UEFI is coupled with PSCI (Power State Coordination Interface). That means
|
||||
that in order to use PSCI, UEFI has to be the boot firmware. Currently the reverse dependency is true as well (for TC2). Therefore
|
||||
using UEFI requires enabling PSCI.
|
||||
|
||||
In case you intend to use uefi/psci mode instead of bootmon, you will need two additional files: tc2_sec.bin and tc2_uefi.bin.
|
||||
after obtaining those files, place them inside /media/VEMSD/SOFTWARE/ directory as such::
|
||||
|
||||
cp tc2_sec.bin /media/VEMSD/SOFTWARE/
|
||||
cp tc2_uefi.bin /media/VEMSD/SOFTWARE/
|
||||
|
||||
|
||||
Juno Setup
|
||||
----------
|
||||
|
||||
.. note:: At the time of writing, the Android software stack on Juno was still
|
||||
very immature. Some workloads may not run, and there maybe stability
|
||||
issues with the device.
|
||||
|
||||
|
||||
The full software stack can be obtained from Linaro:
|
||||
|
||||
https://releases.linaro.org/14.08/members/arm/android/images/armv8-android-juno-lsk
|
||||
|
||||
Please follow the instructions on the "Binary Image Installation" tab on that
|
||||
page. More up-to-date firmware and kernel may also be obtained by registered
|
||||
members from ARM Connected Community: http://www.arm.com/community/ (though this
|
||||
is not guaranteed to work with the Linaro file system).
|
||||
|
||||
UEFI
|
||||
~~~~
|
||||
|
||||
Juno uses UEFI_ to boot the kernel image. UEFI supports multiple boot
|
||||
configurations, and presents a menu on boot to select (in default configuration
|
||||
it will automatically boot the first entry in the menu if not interrupted before
|
||||
a timeout). WA will look for a specific entry in the UEFI menu
|
||||
(``'WA'`` by default, but that may be changed by setting ``uefi_entry`` in the
|
||||
``device_config``). When following the UEFI instructions on the above Linaro
|
||||
page, please make sure to name the entry appropriately (or to correctly set the
|
||||
``uefi_entry``).
|
||||
|
||||
.. _UEFI: http://en.wikipedia.org/wiki/UEFI
|
||||
|
||||
There are two supported way for Juno to discover kernel images through UEFI. It
|
||||
can either load them from NOR flash on the board, or form boot partition on the
|
||||
file system. The setup described on the Linaro page uses the boot partition
|
||||
method.
|
||||
|
||||
If WA does not find the UEFI entry it expects, it will create one. However, it
|
||||
will assume that the kernel image resides in NOR flash, which means it will not
|
||||
work with Linaro file system. So if you're replicating the Linaro setup exactly,
|
||||
you will need to create the entry manually, as outline on the above-linked page.
|
||||
|
||||
Rebooting
|
||||
~~~~~~~~~
|
||||
|
||||
At the time of writing, normal Android reboot did not work properly on Juno
|
||||
Android, causing the device to crash into an irrecoverable state. Therefore, WA
|
||||
will perform a hard reset to reboot the device. It will attempt to do this by
|
||||
toggling the DTR line on the serial connection to the device. In order for this
|
||||
to work, you need to make sure that SW1 configuration switch on the back panel of
|
||||
the board (the right-most DIP switch) is toggled *down*.
|
||||
|
||||
|
||||
Linux
|
||||
+++++
|
||||
|
||||
General Device Setup
|
||||
--------------------
|
||||
|
||||
You can specify the device interface by setting ``device`` setting in
|
||||
``~/.workload_automation/config.py``. Available interfaces can be viewed by
|
||||
running ``wa list devices`` command. If you don't see your specific device
|
||||
listed (which is likely unless you're using one of the ARM-supplied platforms), then
|
||||
you should use ``generic_linux`` interface (this is set in the config by
|
||||
default).
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
device = 'generic_linux'
|
||||
|
||||
The device interface may be configured through ``device_config`` setting, who's
|
||||
value is a ``dict`` mapping setting names to their values. You can find the full
|
||||
list of available parameter by looking up your device interface in the
|
||||
:ref:`devices` section of the documentation. Some of the most common parameters
|
||||
you might want to change are outlined below.
|
||||
|
||||
Currently, the only only supported method for talking to a Linux device is over
|
||||
SSH. Device configuration must specify the parameters need to establish the
|
||||
connection.
|
||||
|
||||
.. confval:: host
|
||||
|
||||
This should be either the the DNS name or IP address of the device.
|
||||
|
||||
.. confval:: username
|
||||
|
||||
The login name of the user on the device that WA will use. This user should
|
||||
have a home directory (unless an alternative working directory is specified
|
||||
using ``working_directory`` config -- see below), and, for full
|
||||
functionality, the user should have sudo rights (WA will be able to use
|
||||
sudo-less acounts but some instruments or workload may not work).
|
||||
|
||||
.. confval:: password
|
||||
|
||||
Password for the account on the device. Either this of a ``keyfile`` (see
|
||||
below) must be specified.
|
||||
|
||||
.. confval:: keyfile
|
||||
|
||||
If key-based authentication is used, this may be used to specify the SSH identity
|
||||
file instead of the password.
|
||||
|
||||
.. confval:: property_files
|
||||
|
||||
This is a list of paths that will be pulled for each WA run into the __meta
|
||||
subdirectory in the results. The intention is to collect meta-data about the
|
||||
device that may aid in reporducing the results later. The paths specified do
|
||||
not have to exist on the device (they will be ignored if they do not). The
|
||||
default list is ``['/proc/version', '/etc/debian_version', '/etc/lsb-release', '/etc/arch-release']``
|
||||
|
||||
|
||||
In addition, ``working_directory``, ``scheduler``, ``core_names``, and
|
||||
``core_clusters`` can also be specified and have the same meaning as for Android
|
||||
devices (see above).
|
||||
|
||||
A typical ``device_config`` inside ``config.py`` may look something like
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
device_config = dict(
|
||||
host='192.168.0.7',
|
||||
username='guest',
|
||||
password='guest',
|
||||
core_names=['a7', 'a7', 'a7', 'a15', 'a15'],
|
||||
core_clusters=[0, 0, 0, 1, 1],
|
||||
# ...
|
||||
)
|
||||
|
||||
|
||||
Related Settings
|
||||
++++++++++++++++
|
||||
|
||||
Reboot Policy
|
||||
-------------
|
||||
|
||||
This indicates when during WA execution the device will be rebooted. By default
|
||||
this is set to ``never``, indicating that WA will not reboot the device. Please
|
||||
see ``reboot_policy`` documentation in :ref:`configuration-specification` for
|
||||
|
||||
more details.
|
||||
|
||||
Execution Order
|
||||
---------------
|
||||
|
||||
``execution_order`` defines the order in which WA will execute workloads.
|
||||
``by_iteration`` (set by default) will execute the first iteration of each spec
|
||||
first, followed by the second iteration of each spec (that defines more than one
|
||||
iteration) and so forth. The alternative will loop through all iterations for
|
||||
the first first spec first, then move on to second spec, etc. Again, please see
|
||||
:ref:`configuration-specification` for more details.
|
||||
|
||||
|
||||
Adding a new device interface
|
||||
+++++++++++++++++++++++++++++
|
||||
|
||||
If you are working with a particularly unusual device (e.g. a early stage
|
||||
development board) or need to be able to handle some quirk of your Android build,
|
||||
configuration available in ``generic_android`` interface may not be enough for
|
||||
you. In that case, you may need to write a custom interface for your device. A
|
||||
device interface is an ``Extension`` (a plug-in) type in WA and is implemented
|
||||
similar to other extensions (such as workloads or instruments). Pleaser refer to
|
||||
:ref:`adding_a_device` section for information on how this may be done.
|
115
doc/source/execution_model.rst
Normal file
115
doc/source/execution_model.rst
Normal file
@ -0,0 +1,115 @@
|
||||
++++++++++++++++++
|
||||
Framework Overview
|
||||
++++++++++++++++++
|
||||
|
||||
Execution Model
|
||||
===============
|
||||
|
||||
At the high level, the execution model looks as follows:
|
||||
|
||||
.. image:: wa-execution.png
|
||||
:scale: 50 %
|
||||
|
||||
After some initial setup, the framework initializes the device, loads and initialized
|
||||
instrumentation and begins executing jobs defined by the workload specs in the agenda. Each job
|
||||
executes in four basic stages:
|
||||
|
||||
setup
|
||||
Initial setup for the workload is performed. E.g. required assets are deployed to the
|
||||
devices, required services or applications are launched, etc. Run time configuration of the
|
||||
device for the workload is also performed at this time.
|
||||
|
||||
run
|
||||
This is when the workload actually runs. This is defined as the part of the workload that is
|
||||
to be measured. Exactly what happens at this stage depends entirely on the workload.
|
||||
|
||||
result processing
|
||||
Results generated during the execution of the workload, if there are any, are collected,
|
||||
parsed and extracted metrics are passed up to the core framework.
|
||||
|
||||
teardown
|
||||
Final clean up is performed, e.g. applications may closed, files generated during execution
|
||||
deleted, etc.
|
||||
|
||||
Signals are dispatched (see signal_dispatch_ below) at each stage of workload execution,
|
||||
which installed instrumentation can hook into in order to collect measurements, alter workload
|
||||
execution, etc. Instrumentation implementation usually mirrors that of workloads, defining
|
||||
setup, teardown and result processing stages for a particular instrument. Instead of a ``run``,
|
||||
instruments usually implement a ``start`` and a ``stop`` which get triggered just before and just
|
||||
after a workload run. However, the signal dispatch mechanism give a high degree of flexibility
|
||||
to instruments allowing them to hook into almost any stage of a WA run (apart from the very
|
||||
early initialization).
|
||||
|
||||
Metrics and artifacts generated by workloads and instrumentation are accumulated by the framework
|
||||
and are then passed to active result processors. This happens after each individual workload
|
||||
execution and at the end of the run. A result process may chose to act at either or both of these
|
||||
points.
|
||||
|
||||
|
||||
Control Flow
|
||||
============
|
||||
|
||||
This section goes into more detail explaining the relationship between the major components of the
|
||||
framework and how control passes between them during a run. It will only go through the major
|
||||
transition and interactions and will not attempt to describe very single thing that happens.
|
||||
|
||||
.. note:: This is the control flow for the ``wa run`` command which is the main functionality
|
||||
of WA. Other commands are much simpler and most of what is described below does not
|
||||
apply to them.
|
||||
|
||||
#. ``wlauto.core.entry_point`` parses the command form the arguments and executes the run command
|
||||
(``wlauto.commands.run.RunCommand``).
|
||||
#. Run command initializes the output directory and creates a ``wlauto.core.agenda.Agenda`` based on
|
||||
the command line arguments. Finally, it instantiates a ``wlauto.core.execution.Executor`` and
|
||||
passes it the Agenda.
|
||||
#. The Executor uses the Agenda to create a ``wlauto.core.configuraiton.RunConfiguration`` fully
|
||||
defines the configuration for the run (it will be serialised into ``__meta`` subdirectory under
|
||||
the output directory.
|
||||
#. The Executor proceeds to instantiate and install instrumentation, result processors and the
|
||||
device interface, based on the RunConfiguration. The executor also initialise a
|
||||
``wlauto.core.execution.ExecutionContext`` which is used to track the current state of the run
|
||||
execution and also serves as a means of communication between the core framework and the
|
||||
extensions.
|
||||
#. Finally, the Executor instantiates a ``wlauto.core.execution.Runner``, initializes its job
|
||||
queue with workload specs from the RunConfiguraiton, and kicks it off.
|
||||
#. The Runner performs the run time initialization of the device and goes through the workload specs
|
||||
(in the order defined by ``execution_order`` setting), running each spec according to the
|
||||
execution model described in the previous section. The Runner sends signals (see below) at
|
||||
appropriate points during execution.
|
||||
#. At the end of the run, the control is briefly passed back to the Executor, which outputs a
|
||||
summary for the run.
|
||||
|
||||
|
||||
.. _signal_dispatch:
|
||||
|
||||
Signal Dispatch
|
||||
===============
|
||||
|
||||
WA uses the `louie <https://pypi.python.org/pypi/Louie/1.1>`_ (formerly, pydispatcher) library
|
||||
for signal dispatch. Callbacks can be registered for signals emitted during the run. WA uses a
|
||||
version of louie that has been modified to introduce priority to registered callbacks (so that
|
||||
callbacks that are know to be slow can be registered with a lower priority so that they do not
|
||||
interfere with other callbacks).
|
||||
|
||||
This mechanism is abstracted for instrumentation. Methods of an :class:`wlauto.core.Instrument`
|
||||
subclass automatically get hooked to appropriate signals based on their names when the instrument
|
||||
is "installed" for the run. Priority can be specified by adding ``very_fast_``, ``fast_`` ,
|
||||
``slow_`` or ``very_slow_`` prefixes to method names.
|
||||
|
||||
The full list of method names and the signals they map to may be viewed
|
||||
:ref:`here <instrumentation_method_map>`.
|
||||
|
||||
Signal dispatching mechanism may also be used directly, for example to dynamically register
|
||||
callbacks at runtime or allow extensions other than ``Instruments`` to access stages of the run
|
||||
they are normally not aware of.
|
||||
|
||||
The sending of signals is the responsibility of the Runner. Signals gets sent during transitions
|
||||
between execution stages and when special evens, such as errors or device reboots, occur.
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
instrumentation_method_map
|
@ -1,140 +0,0 @@
|
||||
.. _faq:
|
||||
|
||||
FAQ
|
||||
===
|
||||
|
||||
.. contents::
|
||||
:depth: 1
|
||||
:local:
|
||||
|
||||
---------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
**Q:** I receive the error: ``"<<Workload> file <file_name> file> could not be found."``
|
||||
-----------------------------------------------------------------------------------------
|
||||
|
||||
**A:** Some workload e.g. AdobeReader, GooglePhotos etc require external asset
|
||||
files. We host some additional workload dependencies in the `WA Assets Repo
|
||||
<https://github.com/ARM-software/workload-automation-assets>`_. To allow WA to
|
||||
try and automatically download required assets from the repository please add
|
||||
the following to your configuration:
|
||||
|
||||
.. code-block:: YAML
|
||||
|
||||
remote_assets_url: https://raw.githubusercontent.com/ARM-software/workload-automation-assets/master/dependencies
|
||||
|
||||
------------
|
||||
|
||||
**Q:** I receive the error: ``"No matching package found for workload <workload>"``
|
||||
------------------------------------------------------------------------------------
|
||||
|
||||
**A:** WA cannot locate the application required for the workload. Please either
|
||||
install the application onto the device or source the apk and place into
|
||||
``$WA_USER_DIRECTORY/dependencies/<workload>``
|
||||
|
||||
------------
|
||||
|
||||
**Q:** I am trying to set a valid runtime parameters however I still receive the error ``"Unknown runtime parameter"``
|
||||
-------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
**A:** Please ensure you have the corresponding module loaded on the device.
|
||||
See :ref:`Runtime Parameters <runtime-parameters>` for the list of
|
||||
runtime parameters and their containing modules, and the appropriate section in
|
||||
:ref:`setting up a device <setting-up-a-device>` for ensuring it is installed.
|
||||
|
||||
-------------
|
||||
|
||||
**Q:** I have a big.LITTLE device but am unable to set parameters corresponding to the big or little core and receive the error ``"Unknown runtime parameter"``
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
**A:** Please ensure you have the hot plugging module enabled for your device (Please see question above).
|
||||
|
||||
|
||||
**A:** This can occur if the device uses dynamic hot-plugging and although WA
|
||||
will try to online all cores to perform discovery sometimes this can fail
|
||||
causing to WA to incorrectly assume that only one cluster is present. To
|
||||
workaround this please set the ``core_names`` :ref:`parameter <core-names>` in the configuration for
|
||||
your device.
|
||||
|
||||
|
||||
**Q:** I receive the error ``Could not find plugin or alias "standard"``
|
||||
------------------------------------------------------------------------
|
||||
|
||||
**A:** Upon first use of WA3, your WA2 config file typically located at
|
||||
``$USER_HOME/config.py`` will have been converted to a WA3 config file located at
|
||||
``$USER_HOME/config.yaml``. The "standard" output processor, present in WA2, has
|
||||
been merged into the core framework and therefore no longer exists. To fix this
|
||||
error please remove the "standard" entry from the "augmentations" list in the
|
||||
WA3 config file.
|
||||
|
||||
**Q:** My Juno board keeps resetting upon starting WA even if it hasn't crashed.
|
||||
--------------------------------------------------------------------------------
|
||||
**A** Please ensure that you do not have any other terminals (e.g. ``screen``
|
||||
sessions) connected to the board's UART. When WA attempts to open the connection
|
||||
for its own use this can cause the board to reset if a connection is already
|
||||
present.
|
||||
|
||||
|
||||
**Q:** I'm using the FPS instrument but I do not get any/correct results for my workload
|
||||
-----------------------------------------------------------------------------------------
|
||||
|
||||
**A:** If your device is running with Android 6.0 + then the default utility for
|
||||
collecting fps metrics will be ``gfxinfo`` however this does not seem to be able
|
||||
to extract any meaningful information for some workloads. In this case please
|
||||
try setting the ``force_surfaceflinger`` parameter for the ``fps`` augmentation
|
||||
to ``True``. This will attempt to guess the "View" for the workload
|
||||
automatically however this is device specific and therefore may need
|
||||
customizing. If this is required please open the application and execute
|
||||
``dumpsys SurfaceFlinger --list`` on the device via adb. This will provide a
|
||||
list of all views available for measuring.
|
||||
|
||||
As an example, when trying to find the view for the AngryBirds Rio workload you
|
||||
may get something like:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
...
|
||||
AppWindowToken{41dfe54 token=Token{77819a7 ActivityRecord{a151266 u0 com.rovio.angrybirdsrio/com.rovio.fusion.App t506}}}#0
|
||||
a3d001c com.rovio.angrybirdsrio/com.rovio.fusion.App#0
|
||||
Background for -SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0
|
||||
SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0
|
||||
com.rovio.angrybirdsrio/com.rovio.fusion.App#0
|
||||
boostedAnimationLayer#0
|
||||
mAboveAppWindowsContainers#0
|
||||
...
|
||||
|
||||
From these ``"SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0"`` is
|
||||
the mostly likely the View that needs to be set as the ``view`` workload
|
||||
parameter and will be picked up be the ``fps`` augmentation.
|
||||
|
||||
|
||||
**Q:** I am getting an error which looks similar to ``'CONFIG_SND_BT87X is not exposed in kernel config'...``
|
||||
-------------------------------------------------------------------------------------------------------------
|
||||
**A:** If you are receiving this under normal operation this can be caused by a
|
||||
mismatch of your WA and devlib versions. Please update both to their latest
|
||||
versions and delete your ``$USER_HOME/.workload_automation/cache/targets.json``
|
||||
(or equivalent) file.
|
||||
|
||||
**Q:** I get an error which looks similar to ``UnicodeDecodeError('ascii' codec can't decode byte...``
|
||||
------------------------------------------------------------------------------------------------------
|
||||
**A:** If you receive this error or a similar warning about your environment,
|
||||
please ensure that you configure your environment to use a locale which supports
|
||||
UTF-8. Otherwise this can cause issues when attempting to parse files containing
|
||||
none ascii characters.
|
||||
|
||||
**Q:** I get the error ``Module "X" failed to install on target``
|
||||
------------------------------------------------------------------------------------------------------
|
||||
**A:** By default a set of devlib modules will be automatically loaded onto the
|
||||
target designed to add additional functionality. If the functionality provided
|
||||
by the module is not required then the module can be safely disabled by setting
|
||||
``load_default_modules`` to ``False`` in the ``device_config`` entry of the
|
||||
:ref:`agenda <config-agenda-entry>` and then re-enabling any specific modules
|
||||
that are still required. An example agenda snippet is shown below:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
config:
|
||||
device: generic_android
|
||||
device_config:
|
||||
load_default_modules: False
|
||||
modules: ['list', 'of', 'modules', 'to', 'enable']
|
@ -1,120 +0,0 @@
|
||||
.. _glossary:
|
||||
|
||||
Glossary
|
||||
========
|
||||
|
||||
.. glossary::
|
||||
|
||||
Agenda
|
||||
An agenda specifies what is to be done during a Workload Automation
|
||||
run. This includes which workloads will be run, with what configuration
|
||||
and which augmentations will be enabled, etc. (For more information
|
||||
please see the :ref:`Agenda Reference <agenda-reference>`.)
|
||||
|
||||
Alias
|
||||
An alias associated with a workload or a parameter. In case of
|
||||
parameters, this is simply an alternative name for a parameter; Usually
|
||||
these are employed to provide backward compatibility for renamed
|
||||
parameters, or in cases where a there are several commonly used terms,
|
||||
each equally valid, for something.
|
||||
|
||||
In case of Workloads, aliases can also be merely alternatives to the
|
||||
workload name, however they can also alter the default values for the
|
||||
parameters the Workload is instantiated with. A common scenario is when
|
||||
a single workload can be run under several distinct configurations (e.g.
|
||||
has several alternative tests that might be run) that are configurable
|
||||
via a parameter. An alias may be added for each such configuration. In
|
||||
order to see the available aliases for a workload, one can use :ref:`show
|
||||
command <show-command>`\ .
|
||||
|
||||
.. seealso:: :term:`Global Alias`
|
||||
|
||||
Artifact
|
||||
An artifact is something that was been generated as part of the run
|
||||
for example a file containing output or meta data in the form of log
|
||||
files. WA supports multiple "kinds" of artifacts and will handle them
|
||||
accordingly, for more information please see the
|
||||
:ref:`Developer Reference <artifact>`.
|
||||
|
||||
Augmentation
|
||||
Augmentations are plugins that augment the execution of
|
||||
workload jobs with additional functionality; usually, that takes the
|
||||
form of generating additional metrics and/or artifacts, such as traces
|
||||
or logs. For more information please see
|
||||
:ref:`augmentations <augmentations>`.
|
||||
|
||||
Classifier
|
||||
An arbitrary key-value pair that may associated with a :term:`job`\ , a
|
||||
:term:`metric`\ , or an :term:`artifact`. The key must be a string. The
|
||||
value can be any simple scalar type (string, integer, boolean, etc).
|
||||
These have no pre-defined meaning but may be used to aid
|
||||
filtering/grouping of metrics and artifacts during output processing.
|
||||
|
||||
.. seealso:: :ref:`classifiers`.
|
||||
|
||||
Global Alias
|
||||
Typically, values for plugin parameters are specified name spaced under
|
||||
the plugin's name in the configuration. A global alias is an alias that
|
||||
may be specified at the top level in configuration.
|
||||
|
||||
There two common reasons for this. First, several plugins might
|
||||
specify the same global alias for the same parameter, thus allowing all
|
||||
of them to be configured with one settings. Second, a plugin may not be
|
||||
exposed directly to the user (e.g. resource getters) so it makes more
|
||||
sense to treat its parameters as global configuration values.
|
||||
|
||||
.. seealso:: :term:`Alias`
|
||||
|
||||
Instrument
|
||||
A WA "Instrument" can be quite diverse in its functionality, but
|
||||
the majority of those available in are there to collect some kind of
|
||||
additional data (such as trace, energy readings etc.) from the device
|
||||
during workload execution. To see available instruments please use the
|
||||
:ref:`list command <list-command>` or see the
|
||||
:ref:`Plugin Reference <instruments>`.
|
||||
|
||||
Job
|
||||
An single execution of a workload. A job is defined by an associated
|
||||
:term:`spec`. However, multiple jobs can share the same spec;
|
||||
E.g. Even if you only have 1 workload to run but wanted 5 iterations
|
||||
then 5 individual jobs will be generated to be run.
|
||||
|
||||
Metric
|
||||
A single numeric measurement or score collected during job execution.
|
||||
|
||||
Output Processor
|
||||
An "Output Processor" is what is used to process the output
|
||||
generated by a workload. They can simply store the results in a presentable
|
||||
format or use the information collected to generate additional metrics.
|
||||
To see available output processors please use the
|
||||
:ref:`list command <list-command>` or see the
|
||||
:ref:`Plugin Reference <output-processors>`.
|
||||
|
||||
Run
|
||||
A single execution of `wa run` command. A run consists of one or more
|
||||
:term:`job`\ s, and results in a single output directory structure
|
||||
containing job results and metadata.
|
||||
|
||||
Section
|
||||
A set of configurations for how jobs should be run. The
|
||||
settings in them take less precedence than workload-specific settings. For
|
||||
every section, all jobs will be run again, with the changes
|
||||
specified in the section's agenda entry. Sections
|
||||
are useful for several runs in which global settings change.
|
||||
|
||||
Spec
|
||||
A specification of a workload. For example you can have a single
|
||||
workload specification that is then executed multiple times if you
|
||||
desire multiple iterations but the configuration for the workload will
|
||||
remain the same. In WA2 the term "iteration" used to refer to the same
|
||||
underlying idea as spec now does. It should be noted however, that this
|
||||
is no longer the case and an iteration is merely a configuration point
|
||||
in WA3. Spec is to blueprint as job is to product.
|
||||
|
||||
WA
|
||||
Workload Automation. The full name of this framework.
|
||||
|
||||
Workload
|
||||
A workload is the lowest level specification for tasks that need to be run
|
||||
on a target. A workload can have multiple iterations, and be run additional
|
||||
multiples of times dependent on the number of sections.
|
@ -1,93 +1,138 @@
|
||||
.. Workload Automation 3 documentation master file,
|
||||
.. Workload Automation 2 documentation master file, created by
|
||||
sphinx-quickstart on Mon Jul 15 09:00:46 2013.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
================================================
|
||||
Welcome to Documentation for Workload Automation
|
||||
================================================
|
||||
|
||||
Workload Automation (WA) is a framework for executing workloads and collecting
|
||||
measurements on Android and Linux devices. WA includes automation for nearly 40
|
||||
workloads and supports some common instrumentation (ftrace, hwmon) along with a
|
||||
number of output formats.
|
||||
Workload Automation (WA) is a framework for running workloads on real hardware devices. WA
|
||||
supports a number of output formats as well as additional instrumentation (such as Streamline
|
||||
traces). A number of workloads are included with the framework.
|
||||
|
||||
WA is designed primarily as a developer tool/framework to facilitate data driven
|
||||
development by providing a method of collecting measurements from a device in a
|
||||
repeatable way.
|
||||
|
||||
WA is highly extensible. Most of the concrete functionality is
|
||||
implemented via :ref:`plug-ins <plugin-reference>`, and it is easy to
|
||||
:ref:`write new plug-ins <writing-plugins>` to support new device types,
|
||||
workloads, instruments or output processing.
|
||||
|
||||
.. note:: To see the documentation of individual plugins please see the
|
||||
:ref:`Plugin Reference <plugin-reference>`.
|
||||
|
||||
.. contents:: Contents
|
||||
|
||||
|
||||
What's New
|
||||
==========
|
||||
~~~~~~~~~~
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
changes
|
||||
migration_guide
|
||||
|
||||
User Information
|
||||
================
|
||||
|
||||
This section lists general usage documentation. If you're new to WA3, it is
|
||||
recommended you start with the :ref:`User Guide <user-guide>` page. This section also contains
|
||||
Usage
|
||||
~~~~~
|
||||
|
||||
This section lists general usage documentation. If you're new to WA2, it is
|
||||
recommended you start with the :doc:`quickstart` page. This section also contains
|
||||
installation and configuration guides.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 3
|
||||
:maxdepth: 2
|
||||
|
||||
user_information
|
||||
quickstart
|
||||
installation
|
||||
device_setup
|
||||
invocation
|
||||
agenda
|
||||
configuration
|
||||
|
||||
|
||||
Extensions
|
||||
~~~~~~~~~~
|
||||
|
||||
This section lists extensions that currently come with WA2. Each package below
|
||||
represents a particular type of extension (e.g. a workload); each sub-package of
|
||||
that package is a particular instance of that extension (e.g. the Andebench
|
||||
workload). Clicking on a link will show what the individual extension does,
|
||||
what configuration parameters it takes, etc.
|
||||
|
||||
For how to implement you own extensions, please refer to the guides in the
|
||||
:ref:`in-depth` section.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<style>
|
||||
td {
|
||||
vertical-align: text-top;
|
||||
}
|
||||
</style>
|
||||
<table <tr><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
extensions/workloads
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
extensions/instruments
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
extensions/result_processors
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
extensions/devices
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td></tr></table>
|
||||
|
||||
.. _in-depth:
|
||||
|
||||
Developer Information
|
||||
=====================
|
||||
In-depth
|
||||
~~~~~~~~
|
||||
|
||||
This section contains more advanced topics, such how to write your own Plugins
|
||||
This section contains more advanced topics, such how to write your own extensions
|
||||
and detailed descriptions of how WA functions under the hood.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 3
|
||||
|
||||
developer_information
|
||||
|
||||
|
||||
Plugin Reference
|
||||
================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins
|
||||
conventions
|
||||
writing_extensions
|
||||
execution_model
|
||||
resources
|
||||
additional_topics
|
||||
daq_device_setup
|
||||
revent
|
||||
contributing
|
||||
|
||||
API
|
||||
===
|
||||
API Reference
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:maxdepth: 5
|
||||
|
||||
api
|
||||
api/modules
|
||||
|
||||
Glossary
|
||||
========
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
Indices and tables
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
glossary
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
||||
FAQ
|
||||
====
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
faq
|
||||
|
326
doc/source/installation.rst
Normal file
326
doc/source/installation.rst
Normal file
@ -0,0 +1,326 @@
|
||||
============
|
||||
Installation
|
||||
============
|
||||
|
||||
.. module:: wlauto
|
||||
|
||||
This page describes how to install Workload Automation 2.
|
||||
|
||||
|
||||
Prerequisites
|
||||
=============
|
||||
|
||||
Operating System
|
||||
----------------
|
||||
|
||||
WA runs on a native Linux install. It was tested with Ubuntu 12.04,
|
||||
but any recent Linux distribution should work. It should run on either
|
||||
32-bit or 64-bit OS, provided the correct version of Android (see below)
|
||||
was installed. Officially, **other environments are not supported**. WA
|
||||
has been known to run on Linux Virtual machines and in Cygwin environments,
|
||||
though additional configuration may be required in both cases (known issues
|
||||
include makings sure USB/serial connections are passed to the VM, and wrong
|
||||
python/pip binaries being picked up in Cygwin). WA *should* work on other
|
||||
Unix-based systems such as BSD or Mac OS X, but it has not been tested
|
||||
in those environments. WA *does not* run on Windows (though it should be
|
||||
possible to get limited functionality with minimal porting effort).
|
||||
|
||||
.. Note:: If you plan to run Workload Automation on Linux devices only,
|
||||
SSH is required, and Android SDK is optional if you wish
|
||||
to run WA on Android devices at a later time. Then follow the
|
||||
steps to install the necessary python packages to set up WA.
|
||||
|
||||
However, you would be starting off with a limited number of
|
||||
workloads that will run on Linux devices.
|
||||
|
||||
Android SDK
|
||||
-----------
|
||||
|
||||
You need to have the Android SDK with at least one platform installed.
|
||||
To install it, download the ADT Bundle from here_. Extract it
|
||||
and add ``<path_to_android_sdk>/sdk/platform-tools`` and ``<path_to_android_sdk>/sdk/tools``
|
||||
to your ``PATH``. To test that you've installed it properly, run ``adb
|
||||
version``. The output should be similar to this::
|
||||
|
||||
adb version
|
||||
Android Debug Bridge version 1.0.31
|
||||
|
||||
.. _here: https://developer.android.com/sdk/index.html
|
||||
|
||||
Once that is working, run ::
|
||||
|
||||
android update sdk
|
||||
|
||||
This will open up a dialog box listing available android platforms and
|
||||
corresponding API levels, e.g. ``Android 4.3 (API 18)``. For WA, you will need
|
||||
at least API level 18 (i.e. Android 4.3), though installing the latest is
|
||||
usually the best bet.
|
||||
|
||||
Optionally (but recommended), you should also set ``ANDROID_HOME`` to point to
|
||||
the install location of the SDK (i.e. ``<path_to_android_sdk>/sdk``).
|
||||
|
||||
.. note:: You may need to install 32-bit compatibility libararies for the SDK
|
||||
to work properly. On Ubuntu you need to run::
|
||||
|
||||
sudo apt-get install lib32stdc++6 lib32z1
|
||||
|
||||
|
||||
Python
|
||||
------
|
||||
|
||||
Workload Automation 2 requires Python 2.7 (Python 3 is not supported at the moment).
|
||||
|
||||
|
||||
pip
|
||||
---
|
||||
|
||||
pip is the recommended package manager for Python. It is not part of standard
|
||||
Python distribution and would need to be installed separately. On Ubuntu and
|
||||
similar distributions, this may be done with APT::
|
||||
|
||||
sudo apt-get install python-pip
|
||||
|
||||
.. note:: Some versions of pip (in particluar v1.5.4 which comes with Ubuntu
|
||||
14.04) are know to set the wrong permissions when installing
|
||||
packages, resulting in WA failing to import them. To avoid this it
|
||||
is recommended that you update pip and setuptools before proceeding
|
||||
with installation::
|
||||
|
||||
sudo -H pip install --upgrade pip
|
||||
sudo -H pip install --upgrade setuptools
|
||||
|
||||
If you do run into this issue after already installing some packages,
|
||||
you can resolve it by running ::
|
||||
|
||||
sudo chmod -R a+r /usr/local/lib/python2.7/dist-packagessudo
|
||||
find /usr/local/lib/python2.7/dist-packages -type d -exec chmod a+x {} \;
|
||||
|
||||
(The paths above will work for Ubuntu; they may need to be adjusted
|
||||
for other distros).
|
||||
|
||||
Python Packages
|
||||
---------------
|
||||
|
||||
.. note:: pip should automatically download and install missing dependencies,
|
||||
so if you're using pip, you can skip this section.
|
||||
|
||||
Workload Automation 2 depends on the following additional libraries:
|
||||
|
||||
* pexpect
|
||||
* docutils
|
||||
* pySerial
|
||||
* pyYAML
|
||||
* python-dateutil
|
||||
|
||||
You can install these with pip::
|
||||
|
||||
sudo -H pip install pexpect
|
||||
sudo -H pip install pyserial
|
||||
sudo -H pip install pyyaml
|
||||
sudo -H pip install docutils
|
||||
sudo -H pip install python-dateutil
|
||||
|
||||
Some of these may also be available in your distro's repositories, e.g. ::
|
||||
|
||||
sudo apt-get install python-serial
|
||||
|
||||
Distro package versions tend to be older, so pip installation is recommended.
|
||||
However, pip will always download and try to build the source, so in some
|
||||
situations distro binaries may provide an easier fall back. Please also note that
|
||||
distro package names may differ from pip packages.
|
||||
|
||||
|
||||
Optional Python Packages
|
||||
------------------------
|
||||
|
||||
.. note:: unlike the mandatory dependencies in the previous section,
|
||||
pip will *not* install these automatically, so you will have
|
||||
to explicitly install them if/when you need them.
|
||||
|
||||
In addition to the mandatory packages listed in the previous sections, some WA
|
||||
functionality (e.g. certain extensions) may have additional dependencies. Since
|
||||
they are not necessary to be able to use most of WA, they are not made mandatory
|
||||
to simplify initial WA installation. If you try to use an extension that has
|
||||
additional, unmet dependencies, WA will tell you before starting the run, and
|
||||
you can install it then. They are listed here for those that would rather
|
||||
install them upfront (e.g. if you're planning to use WA to an environment that
|
||||
may not always have Internet access).
|
||||
|
||||
* nose
|
||||
* pandas
|
||||
* PyDAQmx
|
||||
* pymongo
|
||||
* jinja2
|
||||
|
||||
|
||||
.. note:: Some packages have C extensions and will require Python development
|
||||
headers to install. You can get those by installing ``python-dev``
|
||||
package in apt on Ubuntu (or the equivalent for your distribution).
|
||||
|
||||
|
||||
Installing
|
||||
==========
|
||||
|
||||
Installing the latest released version from PyPI (Python Package Index)::
|
||||
|
||||
sudo -H pip install wlauto
|
||||
|
||||
This will install WA along with its mandatory dependencies. If you would like to
|
||||
install all optional dependencies at the same time, do the following instead::
|
||||
|
||||
sudo -H pip install wlauto[all]
|
||||
|
||||
Alternatively, you can also install the latest development version from GitHub
|
||||
(you will need git installed for this to work)::
|
||||
|
||||
git clone git@github.com:ARM-software/workload-automation.git workload-automation
|
||||
sudo -H pip install ./workload-automation
|
||||
|
||||
|
||||
|
||||
If the above succeeds, try ::
|
||||
|
||||
wa --version
|
||||
|
||||
Hopefully, this should output something along the lines of "Workload Automation
|
||||
version $version".
|
||||
|
||||
|
||||
(Optional) Post Installation
|
||||
============================
|
||||
|
||||
Some WA extensions have additional dependencies that need to be
|
||||
statisfied before they can be used. Not all of these can be provided with WA and
|
||||
so will need to be supplied by the user. They should be placed into
|
||||
``~/.workload_uatomation/dependencies/<extenion name>`` so that WA can find
|
||||
them (you may need to create the directory if it doesn't already exist). You
|
||||
only need to provide the dependencies for workloads you want to use.
|
||||
|
||||
|
||||
APK Files
|
||||
---------
|
||||
|
||||
APKs are applicaton packages used by Android. These are necessary to install an
|
||||
application onto devices that do not have Google Play (e.g. devboards running
|
||||
AOSP). The following is a list of workloads that will need one, including the
|
||||
version(s) for which UI automation has been tested. Automation may also work
|
||||
with other versions (especially if it's only a minor or revision difference --
|
||||
major version differens are more likely to contain incompatible UI changes) but
|
||||
this has not been tested.
|
||||
|
||||
================ ============================================ ========================= ============ ============
|
||||
workload package name version code version name
|
||||
================ ============================================ ========================= ============ ============
|
||||
andebench com.eembc.coremark AndEBench v1383a 1383
|
||||
angrybirds com.rovio.angrybirds Angry Birds 2.1.1 2110
|
||||
angrybirds_rio com.rovio.angrybirdsrio Angry Birds 1.3.2 1320
|
||||
anomaly2 com.elevenbitstudios.anomaly2Benchmark A2 Benchmark 1.1 50
|
||||
antutu com.antutu.ABenchMark AnTuTu Benchmark 5.3 5030000
|
||||
antutu com.antutu.ABenchMark AnTuTu Benchmark 3.3.2 3322
|
||||
antutu com.antutu.ABenchMark AnTuTu Benchmark 4.0.3 4000300
|
||||
benchmarkpi gr.androiddev.BenchmarkPi BenchmarkPi 1.11 5
|
||||
caffeinemark com.flexycore.caffeinemark CaffeineMark 1.2.4 9
|
||||
castlebuilder com.ettinentertainment.castlebuilder Castle Builder 1.0 1
|
||||
castlemaster com.alphacloud.castlemaster Castle Master 1.09 109
|
||||
cfbench eu.chainfire.cfbench CF-Bench 1.2 7
|
||||
citadel com.epicgames.EpicCitadel Epic Citadel 1.07 901107
|
||||
dungeondefenders com.trendy.ddapp Dungeon Defenders 5.34 34
|
||||
facebook com.facebook.katana Facebook 3.4 258880
|
||||
geekbench ca.primatelabs.geekbench2 Geekbench 2 2.2.7 202007
|
||||
geekbench com.primatelabs.geekbench3 Geekbench 3 3.0.0 135
|
||||
glb_corporate net.kishonti.gfxbench GFXBench 3.0.0 1
|
||||
glbenchmark com.glbenchmark.glbenchmark25 GLBenchmark 2.5 2.5 4
|
||||
glbenchmark com.glbenchmark.glbenchmark27 GLBenchmark 2.7 2.7 1
|
||||
gunbros2 com.glu.gunbros2 GunBros2 1.2.2 122
|
||||
ironman com.gameloft.android.ANMP.GloftIMHM Iron Man 3 1.3.1 1310
|
||||
krazykart com.polarbit.sg2.krazyracers Krazy Kart Racing 1.2.7 127
|
||||
linpack com.greenecomputing.linpackpro Linpack Pro for Android 1.2.9 31
|
||||
nenamark se.nena.nenamark2 NenaMark2 2.4 5
|
||||
peacekeeper com.android.chrome Chrome 18.0.1025469 1025469
|
||||
peacekeeper org.mozilla.firefox Firefox 23.0 2013073011
|
||||
quadrant com.aurorasoftworks.quadrant.ui.professional Quadrant Professional 2.0 2000000
|
||||
realracing3 com.ea.games.r3_row Real Racing 3 1.3.5 1305
|
||||
smartbench com.smartbench.twelve Smartbench 2012 1.0.0 5
|
||||
sqlite com.redlicense.benchmark.sqlite RL Benchmark 1.3 5
|
||||
templerun com.imangi.templerun Temple Run 1.0.8 11
|
||||
thechase com.unity3d.TheChase The Chase 1.0 1
|
||||
truckerparking3d com.tapinator.truck.parking.bus3d Truck Parking 3D 2.5 7
|
||||
vellamo com.quicinc.vellamo Vellamo 3.0 3001
|
||||
vellamo com.quicinc.vellamo Vellamo 2.0.3 2003
|
||||
videostreaming tw.com.freedi.youtube.player FREEdi YT Player 2.1.13 79
|
||||
================ ============================================ ========================= ============ ============
|
||||
|
||||
Gaming Workloads
|
||||
----------------
|
||||
|
||||
Some workloads (games, demos, etc) cannot be automated using Android's
|
||||
UIAutomator framework because they render the entire UI inside a single OpenGL
|
||||
surface. For these, an interaction session needs to be recorded so that it can
|
||||
be played back by WA. These recordings are device-specific, so they would need
|
||||
to be done for each device you're planning to use. The tool for doing is
|
||||
``revent`` and it is packaged with WA. You can find instructions on how to use
|
||||
it :ref:`here <revent_files_creation>`.
|
||||
|
||||
This is the list of workloads that rely on such recordings:
|
||||
|
||||
+------------------+
|
||||
| angrybirds |
|
||||
+------------------+
|
||||
| angrybirds_rio |
|
||||
+------------------+
|
||||
| anomaly2 |
|
||||
+------------------+
|
||||
| castlebuilder |
|
||||
+------------------+
|
||||
| castlemastera |
|
||||
+------------------+
|
||||
| citadel |
|
||||
+------------------+
|
||||
| dungeondefenders |
|
||||
+------------------+
|
||||
| gunbros2 |
|
||||
+------------------+
|
||||
| ironman |
|
||||
+------------------+
|
||||
| krazykart |
|
||||
+------------------+
|
||||
| realracing3 |
|
||||
+------------------+
|
||||
| templerun |
|
||||
+------------------+
|
||||
| truckerparking3d |
|
||||
+------------------+
|
||||
|
||||
.. _assets_repository:
|
||||
|
||||
Maintaining Centralized Assets Repository
|
||||
-----------------------------------------
|
||||
|
||||
If there are multiple users within an organization that may need to deploy
|
||||
assets for WA extensions, that organization may wish to maintain a centralized
|
||||
repository of assets that individual WA installs will be able to automatically
|
||||
retrieve asset files from as they are needed. This repository can be any
|
||||
directory on a network filer that mirrors the structure of
|
||||
``~/.workload_automation/dependencies``, i.e. has a subdirectories named after
|
||||
the extensions which assets they contain. Individual WA installs can then set
|
||||
``remote_assets_path`` setting in their config to point to the local mount of
|
||||
that location.
|
||||
|
||||
|
||||
(Optional) Uninstalling
|
||||
=======================
|
||||
|
||||
If you have installed Workload Automation via ``pip`` and wish to remove it, run this command to
|
||||
uninstall it::
|
||||
|
||||
sudo -H pip uninstall wlauto
|
||||
|
||||
.. Note:: This will *not* remove any user configuration (e.g. the ~/.workload_automation directory)
|
||||
|
||||
|
||||
(Optional) Upgrading
|
||||
====================
|
||||
|
||||
To upgrade Workload Automation to the latest version via ``pip``, run::
|
||||
|
||||
sudo -H pip install --upgrade --no-deps wlauto
|
@ -1,35 +0,0 @@
|
||||
.. _instruments_method_map:
|
||||
|
||||
Instrumentation Signal-Method Mapping
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Instrument methods get automatically hooked up to signals based on their names.
|
||||
Mostly, the method name corresponds to the name of the signal, however there are
|
||||
a few convenience aliases defined (listed first) to make easier to relate
|
||||
instrumentation code to the workload execution model. For an overview on when
|
||||
these signals are dispatched during execution please see the
|
||||
:ref:`Developer Reference <signal_dispatch>`.
|
||||
|
||||
$signal_names
|
||||
|
||||
The methods above may be decorated with on the listed decorators to set the
|
||||
priority (a value in the ``wa.framework.signal.CallbackPriority`` enum) of the
|
||||
Instrument method relative to other callbacks registered for the signal (within
|
||||
the same priority level, callbacks are invoked in the order they were
|
||||
registered). The table below shows the mapping of the decorator to the
|
||||
corresponding priority name and level:
|
||||
|
||||
$priority_prefixes
|
||||
|
||||
|
||||
Unresponsive Targets
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If a target is believed to be unresponsive, instrument callbacks will be
|
||||
disabled to prevent a cascade of errors and potential corruptions of state, as
|
||||
it is generally assumed that instrument callbacks will want to do something with
|
||||
the target.
|
||||
|
||||
If your callback only does something with the host, and does not require an
|
||||
active target connection, you can decorate it with ``@hostside`` decorator to
|
||||
ensure it gets invoked even if the target becomes unresponsive.
|
73
doc/source/instrumentation_method_map.rst
Normal file
73
doc/source/instrumentation_method_map.rst
Normal file
@ -0,0 +1,73 @@
|
||||
Instrumentation Signal-Method Mapping
|
||||
=====================================
|
||||
|
||||
.. _instrumentation_method_map:
|
||||
|
||||
Instrument methods get automatically hooked up to signals based on their names. Mostly, the method
|
||||
name correponds to the name of the signal, however there are a few convienience aliases defined
|
||||
(listed first) to make easier to relate instrumenation code to the workload execution model.
|
||||
|
||||
======================================== =========================================
|
||||
method name signal
|
||||
======================================== =========================================
|
||||
initialize run-init-signal
|
||||
setup successful-workload-setup-signal
|
||||
start before-workload-execution-signal
|
||||
stop after-workload-execution-signal
|
||||
process_workload_result successful-iteration-result-update-signal
|
||||
update_result after-iteration-result-update-signal
|
||||
teardown after-workload-teardown-signal
|
||||
finalize run-fin-signal
|
||||
on_run_start start-signal
|
||||
on_run_end end-signal
|
||||
on_workload_spec_start workload-spec-start-signal
|
||||
on_workload_spec_end workload-spec-end-signal
|
||||
on_iteration_start iteration-start-signal
|
||||
on_iteration_end iteration-end-signal
|
||||
before_initial_boot before-initial-boot-signal
|
||||
on_successful_initial_boot successful-initial-boot-signal
|
||||
after_initial_boot after-initial-boot-signal
|
||||
before_first_iteration_boot before-first-iteration-boot-signal
|
||||
on_successful_first_iteration_boot successful-first-iteration-boot-signal
|
||||
after_first_iteration_boot after-first-iteration-boot-signal
|
||||
before_boot before-boot-signal
|
||||
on_successful_boot successful-boot-signal
|
||||
after_boot after-boot-signal
|
||||
on_spec_init spec-init-signal
|
||||
on_run_init run-init-signal
|
||||
on_iteration_init iteration-init-signal
|
||||
before_workload_setup before-workload-setup-signal
|
||||
on_successful_workload_setup successful-workload-setup-signal
|
||||
after_workload_setup after-workload-setup-signal
|
||||
before_workload_execution before-workload-execution-signal
|
||||
on_successful_workload_execution successful-workload-execution-signal
|
||||
after_workload_execution after-workload-execution-signal
|
||||
before_workload_result_update before-iteration-result-update-signal
|
||||
on_successful_workload_result_update successful-iteration-result-update-signal
|
||||
after_workload_result_update after-iteration-result-update-signal
|
||||
before_workload_teardown before-workload-teardown-signal
|
||||
on_successful_workload_teardown successful-workload-teardown-signal
|
||||
after_workload_teardown after-workload-teardown-signal
|
||||
before_overall_results_processing before-overall-results-process-signal
|
||||
on_successful_overall_results_processing successful-overall-results-process-signal
|
||||
after_overall_results_processing after-overall-results-process-signal
|
||||
on_error error_logged
|
||||
on_warning warning_logged
|
||||
======================================== =========================================
|
||||
|
||||
|
||||
The names above may be prefixed with one of pre-defined prefixes to set the priority of the
|
||||
Instrument method realive to other callbacks registered for the signal (within the same priority
|
||||
level, callbacks are invoked in the order they were registered). The table below shows the mapping
|
||||
of the prifix to the corresponding priority:
|
||||
|
||||
=========== ========
|
||||
prefix priority
|
||||
=========== ========
|
||||
very_fast\_ 20
|
||||
fast\_ 10
|
||||
normal\_ 0
|
||||
slow\_ -10
|
||||
very_slow\_ -20
|
||||
=========== ========
|
||||
|
17
doc/source/instrumentation_method_map.template
Normal file
17
doc/source/instrumentation_method_map.template
Normal file
@ -0,0 +1,17 @@
|
||||
Instrumentation Signal-Method Mapping
|
||||
=====================================
|
||||
|
||||
.. _instrumentation_method_map:
|
||||
|
||||
Instrument methods get automatically hooked up to signals based on their names. Mostly, the method
|
||||
name correponds to the name of the signal, however there are a few convienience aliases defined
|
||||
(listed first) to make easier to relate instrumenation code to the workload execution model.
|
||||
|
||||
$signal_names
|
||||
|
||||
The names above may be prefixed with one of pre-defined prefixes to set the priority of the
|
||||
Instrument method realive to other callbacks registered for the signal (within the same priority
|
||||
level, callbacks are invoked in the order they were registered). The table below shows the mapping
|
||||
of the prifix to the corresponding priority:
|
||||
|
||||
$priority_prefixes
|
193
doc/source/invocation.rst
Normal file
193
doc/source/invocation.rst
Normal file
@ -0,0 +1,193 @@
|
||||
.. _invocation:
|
||||
|
||||
========
|
||||
Commands
|
||||
========
|
||||
|
||||
Installing the wlauto package will add ``wa`` command to your system,
|
||||
which you can run from anywhere. This has a number of sub-commands, which can
|
||||
be viewed by executing ::
|
||||
|
||||
wa -h
|
||||
|
||||
Individual sub-commands are discussed in detail below.
|
||||
|
||||
run
|
||||
---
|
||||
|
||||
The most common sub-command you will use is ``run``. This will run specfied
|
||||
workload(s) and process resulting output. This takes a single mandatory
|
||||
argument that specifies what you want WA to run. This could be either a
|
||||
workload name, or a path to an "agenda" file that allows to specify multiple
|
||||
workloads as well as a lot additional configuration (see :ref:`agenda`
|
||||
section for details). Executing ::
|
||||
|
||||
wa run -h
|
||||
|
||||
Will display help for this subcommand that will look somehtign like this::
|
||||
|
||||
usage: run [-d DIR] [-f] AGENDA
|
||||
|
||||
Execute automated workloads on a remote device and process the resulting
|
||||
output.
|
||||
|
||||
positional arguments:
|
||||
AGENDA Agenda for this workload automation run. This defines
|
||||
which workloads will be executed, how many times, with
|
||||
which tunables, etc. See /usr/local/lib/python2.7
|
||||
/dist-packages/wlauto/agenda-example.csv for an
|
||||
example of how this file should be structured.
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.py
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--version Output the version of Workload Automation and exit.
|
||||
--debug Enable debug mode. Note: this implies --verbose.
|
||||
-d DIR, --output-directory DIR
|
||||
Specify a directory where the output will be
|
||||
generated. If the directoryalready exists, the script
|
||||
will abort unless -f option (see below) is used,in
|
||||
which case the contents of the directory will be
|
||||
overwritten. If this optionis not specified, then
|
||||
wa_output will be used instead.
|
||||
-f, --force Overwrite output directory if it exists. By default,
|
||||
the script will abort in thissituation to prevent
|
||||
accidental data loss.
|
||||
-i ID, --id ID Specify a workload spec ID from an agenda to run. If
|
||||
this is specified, only that particular spec will be
|
||||
run, and other workloads in the agenda will be
|
||||
ignored. This option may be used to specify multiple
|
||||
IDs.
|
||||
|
||||
|
||||
Output Directory
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
The exact contents on the output directory will depend on configuration options
|
||||
used, instrumentation and output processors enabled, etc. Typically, the output
|
||||
directory will contain a results file at the top level that lists all
|
||||
measurements that were collected (currently, csv and json formats are
|
||||
supported), along with a subdirectory for each iteration executed with output
|
||||
for that specific iteration.
|
||||
|
||||
At the top level, there will also be a run.log file containing the complete log
|
||||
output for the execution. The contents of this file is equivalent to what you
|
||||
would get in the console when using --verbose option.
|
||||
|
||||
Finally, there will be a __meta subdirectory. This will contain a copy of the
|
||||
agenda file used to run the workloads along with any other device-specific
|
||||
configuration files used during execution.
|
||||
|
||||
|
||||
list
|
||||
----
|
||||
|
||||
This lists all extensions of a particular type. For example ::
|
||||
|
||||
wa list workloads
|
||||
|
||||
will list all workloads currently included in WA. The list will consist of
|
||||
extension names and short descriptions of the functionality they offer.
|
||||
|
||||
|
||||
show
|
||||
----
|
||||
|
||||
This will show detailed information about an extension, including more in-depth
|
||||
description and any parameters/configuration that are available. For example
|
||||
executing ::
|
||||
|
||||
wa show andebench
|
||||
|
||||
will produce something like ::
|
||||
|
||||
|
||||
andebench
|
||||
|
||||
AndEBench is an industry standard Android benchmark provided by The Embedded Microprocessor Benchmark Consortium
|
||||
(EEMBC).
|
||||
|
||||
parameters:
|
||||
|
||||
number_of_threads
|
||||
Number of threads that will be spawned by AndEBench.
|
||||
type: int
|
||||
|
||||
single_threaded
|
||||
If ``true``, AndEBench will run with a single thread. Note: this must not be specified if ``number_of_threads``
|
||||
has been specified.
|
||||
type: bool
|
||||
|
||||
http://www.eembc.org/andebench/about.php
|
||||
|
||||
From the website:
|
||||
|
||||
- Initial focus on CPU and Dalvik interpreter performance
|
||||
- Internal algorithms concentrate on integer operations
|
||||
- Compares the difference between native and Java performance
|
||||
- Implements flexible multicore performance analysis
|
||||
- Results displayed in Iterations per second
|
||||
- Detailed log file for comprehensive engineering analysis
|
||||
|
||||
.. _record-command:
|
||||
|
||||
record
|
||||
------
|
||||
|
||||
This command simplifies the process of recording an revent file. It
|
||||
will automatically deploy revent and even has the option of automatically
|
||||
opening apps. WA uses two parts to the names of revent recordings in the
|
||||
format, {device_name}.{suffix}.revent. - device_name can either be specified
|
||||
manually with the ``-d`` argument or it can be automatically determined. On
|
||||
Android device it will be obtained from ``build.prop``, on Linux devices it is
|
||||
obtained from ``/proc/device-tree/model``. - suffix is used by WA to determine
|
||||
which part of the app execution the recording is for, currently these are
|
||||
either ``setup`` or ``run``. This should be specified with the ``-s``
|
||||
argument. The full set of options for this command are::
|
||||
|
||||
usage: wa record [-h] [-c CONFIG] [-v] [--debug] [--version] [-d DEVICE]
|
||||
[-s SUFFIX] [-o OUTPUT] [-p PACKAGE] [-C]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.py
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--debug Enable debug mode. Note: this implies --verbose.
|
||||
--version show program's version number and exit
|
||||
-d DEVICE, --device DEVICE
|
||||
The name of the device
|
||||
-s SUFFIX, --suffix SUFFIX
|
||||
The suffix of the revent file, e.g. ``setup``
|
||||
-o OUTPUT, --output OUTPUT
|
||||
Directory to save the recording in
|
||||
-p PACKAGE, --package PACKAGE
|
||||
Package to launch before recording
|
||||
-C, --clear Clear app cache before launching it
|
||||
|
||||
.. _replay-command:
|
||||
|
||||
replay
|
||||
------
|
||||
|
||||
Along side ``record`` wa also has a command to playback recorded revent files.
|
||||
It behaves very similar to the ``record`` command taking many of the same options::
|
||||
|
||||
usage: wa replay [-h] [-c CONFIG] [-v] [--debug] [--version] [-p PACKAGE] [-C]
|
||||
revent
|
||||
|
||||
positional arguments:
|
||||
revent The name of the file to replay
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.py
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--debug Enable debug mode. Note: this implies --verbose.
|
||||
--version show program's version number and exit
|
||||
-p PACKAGE, --package PACKAGE
|
||||
Package to launch before recording
|
||||
-C, --clear Clear app cache before launching it
|
@ -1,239 +0,0 @@
|
||||
.. _migration-guide:
|
||||
|
||||
Migration Guide
|
||||
================
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 4
|
||||
:local:
|
||||
|
||||
Users
|
||||
"""""
|
||||
|
||||
Configuration
|
||||
--------------
|
||||
|
||||
Default configuration file change
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Instead of the standard ``config.py`` file located at
|
||||
``$WA_USER_DIRECTORY/config.py`` WA now uses a ``confg.yaml`` file (at the same
|
||||
location) which is written in the YAML format instead of python. Additionally
|
||||
upon first invocation WA3 will automatically try and detect whether a WA2 config
|
||||
file is present and convert it to use the new WA3 format. During this process
|
||||
any known parameter name changes should be detected and updated accordingly.
|
||||
|
||||
Plugin Changes
|
||||
^^^^^^^^^^^^^^^
|
||||
Please note that not all plugins that were available for WA2 are currently
|
||||
available for WA3 so you may need to remove plugins that are no longer present
|
||||
from your config files. One plugin of note is the ``standard`` results
|
||||
processor, this has been removed and it's functionality built into the core
|
||||
framework.
|
||||
|
||||
--------------------------------------------------------
|
||||
|
||||
Agendas
|
||||
-------
|
||||
|
||||
WA3 is designed to keep configuration as backwards compatible as possible so
|
||||
most agendas should work out of the box, however the main changes in the style
|
||||
of WA3 agendas are:
|
||||
|
||||
Global Section
|
||||
^^^^^^^^^^^^^^
|
||||
The ``global`` and ``config`` sections have been merged so now all configuration
|
||||
that was specified under the "global" keyword can now also be specified under
|
||||
"config". Although "global" is still a valid keyword you will need to ensure that
|
||||
there are not duplicated entries in each section.
|
||||
|
||||
Instrumentation and Results Processors merged
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``instrumentation`` and ``results_processors`` sections from WA2 have now
|
||||
been merged into a single ``augmentations`` section to simplify the
|
||||
configuration process. Although for backwards compatibility, support for the old
|
||||
sections has be retained.
|
||||
|
||||
|
||||
Per workload enabling of augmentations
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
All augmentations can now been enabled and disabled on a per workload basis.
|
||||
|
||||
|
||||
Setting Runtime Parameters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
:ref:`Runtime Parameters <runtime-parameters>` are now the preferred way of
|
||||
configuring, cpufreq, hotplug and cpuidle rather setting the corresponding
|
||||
sysfile values as this will perform additional validation and ensure the nodes
|
||||
are set in the correct order to avoid any conflicts.
|
||||
|
||||
Parameter Changes
|
||||
^^^^^^^^^^^^^^^^^
|
||||
Any parameter names changes listed below will also have their old names
|
||||
specified as aliases and should continue to work as normal, however going forward
|
||||
the new parameter names should be preferred:
|
||||
|
||||
- The workload parameter :confval:`clean_up` has be renamed to :confval:`cleanup_assets` to
|
||||
better reflect its purpose.
|
||||
|
||||
- The workload parameter :confval:`check_apk` has been renamed to
|
||||
:confval:`prefer_host_package` to be more explicit in it's functionality to indicated
|
||||
whether a package on the target or the host should have priority when
|
||||
searching for a suitable package.
|
||||
|
||||
- The execution order ``by_spec`` is now called ``by_workload`` for clarity of
|
||||
purpose. For more information please see :ref:`configuration-specification`.
|
||||
|
||||
- The ``by_spec`` reboot policy has been removed as this is no longer relevant
|
||||
and the ``each_iteration`` reboot policy has been renamed to ``each_job``,
|
||||
please see :ref:`configuration-specification` for more information.
|
||||
|
||||
Individual workload parameters have been attempted to be standardized for the
|
||||
more common operations e.g.:
|
||||
|
||||
- :confval:`iterations` is now :confval:`loops` to indicate the how many
|
||||
'tight loops' of the workload should be performed, e.g. without the
|
||||
setup/teardown method calls.
|
||||
- :confval:`num_threads` is now consistently :confval:`threads` across workloads.
|
||||
- :confval:`run_timeout` is now consistently :confval:`timeout` across workloads.
|
||||
- :confval:`taskset_mask` and :confval:`cpus` have been changed to
|
||||
consistently be referred to as :confval:`cpus` and its types is now
|
||||
a :class:`cpu_mask` type allowing configuration to be supplied either
|
||||
directly as a mask, as a list of a list of cpu indexes or as a sysfs-style
|
||||
string.
|
||||
|
||||
Output
|
||||
^^^^^^^
|
||||
Output Directory
|
||||
~~~~~~~~~~~~~~~~
|
||||
The :ref:`output directory <output_directory>`'s structure has changed layout
|
||||
and now includes additional subdirectories. There is now a ``__meta`` directory
|
||||
that contains copies of the agenda and config files supplied to WA for that
|
||||
particular run so that all the relevant config is self contained. Additionally
|
||||
if one or more jobs fail during a run then corresponding output directory will be
|
||||
moved into a ``__failed`` subdirectory to allow for quicker analysis.
|
||||
|
||||
|
||||
Output API
|
||||
~~~~~~~~~~
|
||||
There is now an Output API which can be used to more easily post process the
|
||||
output from a run. For more information please see the
|
||||
:ref:`Output API <output_processing_api>` documentation.
|
||||
|
||||
|
||||
-----------------------------------------------------------
|
||||
|
||||
Developers
|
||||
""""""""""""
|
||||
|
||||
Framework
|
||||
---------
|
||||
|
||||
Imports
|
||||
^^^^^^^
|
||||
|
||||
To distinguish between the different versions of WA, WA3's package name has been
|
||||
renamed to ``wa``. This means that all the old ``wlauto`` imports will need to
|
||||
be updated. For more information please see the corresponding section in the
|
||||
:ref:`developer reference section<developer_reference>`
|
||||
|
||||
Asset Deployment
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
WA3 now contains a generic assets deployment and clean up mechanism so if a
|
||||
workload was previously doing this in an ad-hoc manner this should be updated to
|
||||
utilize the new functionality. To make use of this functionality a list of
|
||||
assets should be set as the workload ``deployable_assets`` attribute, these will
|
||||
be automatically retrieved via WA's resource getters and deployed either to the
|
||||
targets working directory or a custom directory specified as the workloads
|
||||
``assets_directory`` attribute. If a custom implementation is required the
|
||||
``deploy_assets`` method should be overridden inside the workload. To allow for
|
||||
the removal of the additional assets any additional file paths should be added
|
||||
to the ``self.deployed_assets`` list which is used to keep track of any assets
|
||||
that have been deployed for the workload. This is what is used by the generic
|
||||
``remove_assets`` method to clean up any files deployed to the target.
|
||||
Optionally if the file structure of the deployed assets requires additional
|
||||
logic then the ``remove_assets`` method can be overridden for a particular
|
||||
workload as well.
|
||||
|
||||
--------------------------------------------------------
|
||||
|
||||
Workloads
|
||||
---------
|
||||
|
||||
Python Workload Structure
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
- The ``update_results`` method has been split out into 2 stages. There is now
|
||||
``extract_results`` and ``update_output`` which should be used for extracting
|
||||
any results from the target back to the host system and to update the output
|
||||
with any metrics or artefacts for the specific workload iteration respectively.
|
||||
|
||||
- WA now features :ref:`execution decorators <execution-decorators>` which can
|
||||
be used to allow for more efficient binary deployment and that they are only
|
||||
installed to the device once per run. For more information of implementing
|
||||
this please see
|
||||
:ref:`deploying executables to a target <deploying-executables>`.
|
||||
|
||||
|
||||
APK Functionality
|
||||
^^^^^^^^^^^^^^^^^
|
||||
All apk functionality has re-factored into an APKHandler object which is
|
||||
available as the apk attribute of the workload. This means that for example
|
||||
``self.launchapplication()`` would now become ``self.apk.start_activity()``
|
||||
|
||||
|
||||
UiAutomator Java Structure
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Instead of a single ``runUiAutomation`` method to perform all of the UiAutomation,
|
||||
the structure has been refactored into 5 methods that can optionally be overridden.
|
||||
The available methods are ``initialize``, ``setup``, ``runWorkload``, ``extactResults``
|
||||
and ``teardown`` to better mimic the different stages in the python workload.
|
||||
|
||||
|
||||
- ``initialize`` should be used to retrieve
|
||||
and set any relevant parameters required during the workload.
|
||||
- ``setup`` should be used to perform any setup required for the workload, for
|
||||
example dismissing popups or configuring and required settings.
|
||||
- ``runWorkload`` should be used to perform the actual measurable work of the workload.
|
||||
- ``extractResults`` should be used to extract any relevant results from the
|
||||
target after the workload has been completed.
|
||||
- ``teardown`` should be used to perform any final clean up of the workload on the target.
|
||||
|
||||
.. note:: The ``initialize`` method should have the ``@Before`` tag attached
|
||||
to the method which will cause it to be ran before each of the stages of
|
||||
the workload. The remaining method should all have the ``@Test`` tag
|
||||
attached to the method to indicate that this is a test stage that should be
|
||||
called at the appropriate time.
|
||||
|
||||
GUI Functionality
|
||||
^^^^^^^^^^^^^^^^^
|
||||
For UI based applications all UI functionality has been re-factored to into a
|
||||
``gui`` attribute which currently will be either a ``UiAutomatorGUI`` object or
|
||||
a ``ReventGUI`` depending on the workload type. This means that for example if
|
||||
you wish to pass parameters to a UiAuotmator workload you will now need to use
|
||||
``self.gui.uiauto_params['Parameter Name'] = value``
|
||||
|
||||
Attributes
|
||||
^^^^^^^^^^
|
||||
- The old ``package`` attribute has been replaced by ``package_names`` which
|
||||
expects a list of strings which allows for multiple package names to be
|
||||
specified if required. It is also no longer required to explicitly state the
|
||||
launch-able activity, this will be automatically discovered from the apk so this
|
||||
workload attribute can be removed.
|
||||
|
||||
- The ``device`` attribute of the workload is now a devlib ``target``. Some of the
|
||||
command names remain the same, however there will be differences. The API can be
|
||||
found at http://devlib.readthedocs.io/en/latest/target.html however some of
|
||||
the more common changes can be found below:
|
||||
|
||||
|
||||
+----------------------------------------------+---------------------------------+
|
||||
| Original Method | New Method |
|
||||
+----------------------------------------------+---------------------------------+
|
||||
|``self.device.pull_file(file)`` | ``self.target.pull(file)`` |
|
||||
+----------------------------------------------+---------------------------------+
|
||||
|``self.device.push_file(file)`` | ``self.target.push(file)`` |
|
||||
+----------------------------------------------+---------------------------------+
|
||||
|``self.device.install_executable(file)`` | ``self.target.install(file)`` |
|
||||
+----------------------------------------------+---------------------------------+
|
||||
|``self.device.execute(cmd, background=True)`` | ``self.target.background(cmd)``|
|
||||
+----------------------------------------------+---------------------------------+
|
@ -1,67 +0,0 @@
|
||||
.. _plugin-reference:
|
||||
|
||||
=================
|
||||
Plugin Reference
|
||||
=================
|
||||
|
||||
This section lists Plugins that currently come with WA3. Each package below
|
||||
represents a particular type of extension (e.g. a workload); each sub-package of
|
||||
that package is a particular instance of that extension (e.g. the Andebench
|
||||
workload). Clicking on a link will show what the individual extension does,
|
||||
what configuration parameters it takes, etc.
|
||||
|
||||
For how to implement you own Plugins, please refer to the guides in the
|
||||
:ref:`writing plugins <writing-plugins>` section.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<style>
|
||||
td {
|
||||
vertical-align: text-top;
|
||||
}
|
||||
</style>
|
||||
<table <tr><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/workloads
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/instruments
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/energy_instrument_backends
|
||||
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/output_processors
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/targets
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td></tr></table>
|
||||
|
284
doc/source/quickstart.rst
Normal file
284
doc/source/quickstart.rst
Normal file
@ -0,0 +1,284 @@
|
||||
==========
|
||||
Quickstart
|
||||
==========
|
||||
|
||||
This guide will show you how to quickly start running workloads using
|
||||
Workload Automation 2.
|
||||
|
||||
|
||||
Install
|
||||
=======
|
||||
|
||||
.. note:: This is a quick summary. For more detailed instructions, please see
|
||||
the :doc:`installation` section.
|
||||
|
||||
Make sure you have Python 2.7 and a recent Android SDK with API level 18 or above
|
||||
installed on your system. A complete install of the Android SDK is required, as
|
||||
WA uses a number of its utilities, not just adb. For the SDK, make sure that either
|
||||
``ANDROID_HOME`` environment variable is set, or that ``adb`` is in your ``PATH``.
|
||||
|
||||
.. Note:: If you plan to run Workload Automation on Linux devices only, SSH is required,
|
||||
and Android SDK is optional if you wish to run WA on Android devices at a
|
||||
later time.
|
||||
|
||||
However, you would be starting off with a limited number of workloads that
|
||||
will run on Linux devices.
|
||||
|
||||
In addition to the base Python 2.7 install, you will also need to have ``pip``
|
||||
(Python's package manager) installed as well. This is usually a separate package.
|
||||
|
||||
Once you have those, you can install WA with::
|
||||
|
||||
sudo -H pip install wlauto
|
||||
|
||||
This will install Workload Automation on your system, along with its mandatory
|
||||
dependencies.
|
||||
|
||||
(Optional) Verify installation
|
||||
-------------------------------
|
||||
|
||||
Once the tarball has been installed, try executing ::
|
||||
|
||||
wa -h
|
||||
|
||||
You should see a help message outlining available subcommands.
|
||||
|
||||
|
||||
(Optional) APK files
|
||||
--------------------
|
||||
|
||||
A large number of WA workloads are installed as APK files. These cannot be
|
||||
distributed with WA and so you will need to obtain those separately.
|
||||
|
||||
For more details, please see the :doc:`installation` section.
|
||||
|
||||
|
||||
Configure Your Device
|
||||
=====================
|
||||
|
||||
Locate the device configuration file, config.py, under the
|
||||
~/.workload_automation directory. Then adjust the device
|
||||
configuration settings accordingly to the device you are using.
|
||||
|
||||
Android
|
||||
-------
|
||||
|
||||
By default, the device is set to 'generic_android'. WA is configured to work
|
||||
with a generic Android device through ``adb``. If you only have one device listed
|
||||
when you execute ``adb devices``, and your device has a standard Android
|
||||
configuration, then no extra configuration is required.
|
||||
|
||||
However, if your device is connected via network, you will have to manually execute
|
||||
``adb connect <device ip>`` so that it appears in the device listing.
|
||||
|
||||
If you have multiple devices connected, you will need to tell WA which one you
|
||||
want it to use. You can do that by setting ``adb_name`` in device_config section.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# ...
|
||||
|
||||
device_config = dict(
|
||||
adb_name = 'abcdef0123456789',
|
||||
# ...
|
||||
)
|
||||
|
||||
# ...
|
||||
|
||||
Linux
|
||||
-----
|
||||
|
||||
First, set the device to 'generic_linux'
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# ...
|
||||
device = 'generic_linux'
|
||||
# ...
|
||||
|
||||
Find the device_config section and add these parameters
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# ...
|
||||
|
||||
device_config = dict(
|
||||
host = '192.168.0.100',
|
||||
username = 'root',
|
||||
password = 'password'
|
||||
# ...
|
||||
)
|
||||
|
||||
# ...
|
||||
|
||||
Parameters:
|
||||
|
||||
- Host is the IP of your target Linux device
|
||||
- Username is the user for the device
|
||||
- Password is the password for the device
|
||||
|
||||
Enabling and Disabling Instrumentation
|
||||
---------------------------------------
|
||||
|
||||
Some instrumentation tools are enabled after your initial install of WA.
|
||||
|
||||
.. note:: Some Linux devices may not be able to run certain instruments
|
||||
provided by WA (e.g. cpufreq is disabled or unsupported by the
|
||||
device).
|
||||
|
||||
As a start, keep the 'execution_time' instrument enabled while commenting out
|
||||
the rest to disable them.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# ...
|
||||
|
||||
Instrumentation = [
|
||||
# Records the time it took to run the workload
|
||||
'execution_time',
|
||||
|
||||
# Collects /proc/interrupts before and after execution and does a diff.
|
||||
# 'interrupts',
|
||||
|
||||
# Collects the contents of/sys/devices/system/cpu before and after execution and does a diff.
|
||||
# 'cpufreq',
|
||||
|
||||
# ...
|
||||
)
|
||||
|
||||
|
||||
|
||||
This should give you basic functionality. If you are working with a development
|
||||
board or you need some advanced functionality (e.g. big.LITTLE tuning parameters),
|
||||
additional configuration may be required. Please see the :doc:`device_setup`
|
||||
section for more details.
|
||||
|
||||
|
||||
Running Your First Workload
|
||||
===========================
|
||||
|
||||
The simplest way to run a workload is to specify it as a parameter to WA ``run``
|
||||
sub-command::
|
||||
|
||||
wa run dhrystone
|
||||
|
||||
You will see INFO output from WA as it executes each stage of the run. A
|
||||
completed run output should look something like this::
|
||||
|
||||
INFO Initializing
|
||||
INFO Running workloads
|
||||
INFO Connecting to device
|
||||
INFO Initializing device
|
||||
INFO Running workload 1 dhrystone (iteration 1)
|
||||
INFO Setting up
|
||||
INFO Executing
|
||||
INFO Processing result
|
||||
INFO Tearing down
|
||||
INFO Processing overall results
|
||||
INFO Status available in wa_output/status.txt
|
||||
INFO Done.
|
||||
INFO Ran a total of 1 iterations: 1 OK
|
||||
INFO Results can be found in wa_output
|
||||
|
||||
Once the run has completed, you will find a directory called ``wa_output``
|
||||
in the location where you have invoked ``wa run``. Within this directory,
|
||||
you will find a "results.csv" file which will contain results obtained for
|
||||
dhrystone, as well as a "run.log" file containing detailed log output for
|
||||
the run. You will also find a sub-directory called 'drystone_1_1' that
|
||||
contains the results for that iteration. Finally, you will find a copy of the
|
||||
agenda file in the ``wa_output/__meta`` subdirectory. The contents of
|
||||
iteration-specific subdirectories will vary from workload to workload, and,
|
||||
along with the contents of the main output directory, will depend on the
|
||||
instrumentation and result processors that were enabled for that run.
|
||||
|
||||
The ``run`` sub-command takes a number of options that control its behavior,
|
||||
you can view those by executing ``wa run -h``. Please see the :doc:`invocation`
|
||||
section for details.
|
||||
|
||||
|
||||
Create an Agenda
|
||||
================
|
||||
|
||||
Simply running a single workload is normally of little use. Typically, you would
|
||||
want to specify several workloads, setup the device state and, possibly, enable
|
||||
additional instrumentation. To do this, you would need to create an "agenda" for
|
||||
the run that outlines everything you want WA to do.
|
||||
|
||||
Agendas are written using YAML_ markup language. A simple agenda might look
|
||||
like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
instrumentation: [~execution_time]
|
||||
result_processors: [json]
|
||||
global:
|
||||
iterations: 2
|
||||
workloads:
|
||||
- memcpy
|
||||
- name: dhrystone
|
||||
params:
|
||||
mloops: 5
|
||||
threads: 1
|
||||
|
||||
This agenda
|
||||
|
||||
- Specifies two workloads: memcpy and dhrystone.
|
||||
- Specifies that dhrystone should run in one thread and execute five million loops.
|
||||
- Specifies that each of the two workloads should be run twice.
|
||||
- Enables json result processor, in addition to the result processors enabled in
|
||||
the config.py.
|
||||
- Disables execution_time instrument, if it is enabled in the config.py
|
||||
|
||||
An agenda can be created in a text editor and saved as a YAML file. Please make note of
|
||||
where you have saved the agenda.
|
||||
|
||||
Please see :doc:`agenda` section for more options.
|
||||
|
||||
.. _YAML: http://en.wikipedia.org/wiki/YAML
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
These examples show some useful options with the ``wa run`` command.
|
||||
|
||||
To run your own agenda::
|
||||
|
||||
wa run <path/to/agenda> (e.g. wa run ~/myagenda.yaml)
|
||||
|
||||
To redirect the output to a different directory other than wa_output::
|
||||
|
||||
wa run dhrystone -d my_output_directory
|
||||
|
||||
To use a different config.py file::
|
||||
|
||||
wa run -c myconfig.py dhrystone
|
||||
|
||||
To use the same output directory but override existing contents to
|
||||
store new dhrystone results::
|
||||
|
||||
wa run -f dhrystone
|
||||
|
||||
To display verbose output while running memcpy::
|
||||
|
||||
wa run --verbose memcpy
|
||||
|
||||
Uninstall
|
||||
=========
|
||||
|
||||
If you have installed Workload Automation via ``pip``, then run this command to
|
||||
uninstall it::
|
||||
|
||||
sudo pip uninstall wlauto
|
||||
|
||||
|
||||
.. Note:: It will *not* remove any user configuration (e.g. the ~/.workload_automation
|
||||
directory).
|
||||
|
||||
Upgrade
|
||||
=======
|
||||
|
||||
To upgrade Workload Automation to the latest version via ``pip``, run::
|
||||
|
||||
sudo pip install --upgrade --no-deps wlauto
|
||||
|
47
doc/source/resources.rst
Normal file
47
doc/source/resources.rst
Normal file
@ -0,0 +1,47 @@
|
||||
.. _resources:
|
||||
|
||||
Dynamic Resource Resolution
|
||||
===========================
|
||||
|
||||
Introduced in version 2.1.3.
|
||||
|
||||
The idea is to decouple resource identification from resource discovery.
|
||||
Workloads/instruments/devices/etc state *what* resources they need, and not
|
||||
*where* to look for them -- this instead is left to the resource resolver that
|
||||
is now part of the execution context. The actual discovery of resources is
|
||||
performed by resource getters that are registered with the resolver.
|
||||
|
||||
A resource type is defined by a subclass of
|
||||
:class:`wlauto.core.resource.Resource`. An instance of this class describes a
|
||||
resource that is to be obtained. At minimum, a ``Resource`` instance has an
|
||||
owner (which is typically the object that is looking for the resource), but
|
||||
specific resource types may define other parameters that describe an instance of
|
||||
that resource (such as file names, URLs, etc).
|
||||
|
||||
An object looking for a resource invokes a resource resolver with an instance of
|
||||
``Resource`` describing the resource it is after. The resolver goes through the
|
||||
getters registered for that resource type in priority order attempting to obtain
|
||||
the resource; once the resource is obtained, it is returned to the calling
|
||||
object. If none of the registered getters could find the resource, ``None`` is
|
||||
returned instead.
|
||||
|
||||
The most common kind of object looking for resources is a ``Workload``, and
|
||||
since v2.1.3, ``Workload`` class defines
|
||||
:py:meth:`wlauto.core.workload.Workload.init_resources` method that may be
|
||||
overridden by subclasses to perform resource resolution. For example, a workload
|
||||
looking for an APK file would do so like this::
|
||||
|
||||
from wlauto import Workload
|
||||
from wlauto.common.resources import ApkFile
|
||||
|
||||
class AndroidBenchmark(Workload):
|
||||
|
||||
# ...
|
||||
|
||||
def init_resources(self, context):
|
||||
self.apk_file = context.resource.get(ApkFile(self))
|
||||
|
||||
# ...
|
||||
|
||||
|
||||
Currently available resource types are defined in :py:mod:`wlauto.common.resources`.
|
108
doc/source/revent.rst
Normal file
108
doc/source/revent.rst
Normal file
@ -0,0 +1,108 @@
|
||||
.. _revent_files_creation:
|
||||
|
||||
revent
|
||||
======
|
||||
|
||||
revent utility can be used to record and later play back a sequence of user
|
||||
input events, such as key presses and touch screen taps. This is an alternative
|
||||
to Android UI Automator for providing automation for workloads. ::
|
||||
|
||||
|
||||
usage:
|
||||
revent [record time file|replay file|info] [verbose]
|
||||
record: stops after either return on stdin
|
||||
or time (in seconds)
|
||||
and stores in file
|
||||
replay: replays eventlog from file
|
||||
info:shows info about each event char device
|
||||
any additional parameters make it verbose
|
||||
|
||||
Recording
|
||||
---------
|
||||
|
||||
WA features a ``record`` command that will automatically deploy and start
|
||||
revent on the target device::
|
||||
|
||||
wa record
|
||||
INFO Connecting to device...
|
||||
INFO Press Enter when you are ready to record...
|
||||
[Pressed Enter]
|
||||
INFO Press Enter when you have finished recording...
|
||||
[Pressed Enter]
|
||||
INFO Pulling files from device
|
||||
|
||||
Once started, you will need to get the target device ready to record (e.g.
|
||||
unlock screen, navigate menus and launch an app) then press ``ENTER``.
|
||||
The recording has now started and button presses, taps, etc you perform on
|
||||
the device will go into the .revent file. To stop the recording simply press
|
||||
``ENTER`` again.
|
||||
|
||||
Once you have finished recording the revent file will be pulled from the device
|
||||
to the current directory. It will be named ``{device_model}.revent``. When
|
||||
recording revent files for a ``GameWorkload`` you can use the ``-s`` option to
|
||||
add ``run`` or ``setup`` suffixes.
|
||||
|
||||
For more information run please read :ref:`record-command`
|
||||
|
||||
|
||||
Replaying
|
||||
---------
|
||||
|
||||
To replay a recorded file, run ``wa replay``, giving it the file you want to
|
||||
replay::
|
||||
|
||||
wa replay my_recording.revent
|
||||
|
||||
For more information run please read :ref:`replay-command`
|
||||
|
||||
Using revent With Workloads
|
||||
---------------------------
|
||||
|
||||
Some workloads (pretty much all games) rely on recorded revents for their
|
||||
execution. :class:`wlauto.common.GameWorkload`-derived workloads expect two
|
||||
revent files -- one for performing the initial setup (navigating menus,
|
||||
selecting game modes, etc), and one for the actual execution of the game.
|
||||
Because revents are very device-specific\ [*]_, these two files would need to
|
||||
be recorded for each device.
|
||||
|
||||
The files must be called ``<device name>.(setup|run).revent``, where
|
||||
``<device name>`` is the name of your device (as defined by the ``name``
|
||||
attribute of your device's class). WA will look for these files in two
|
||||
places: ``<install dir>/wlauto/workloads/<workload name>/revent_files``
|
||||
and ``~/.workload_automation/dependencies/<workload name>``. The first
|
||||
location is primarily intended for revent files that come with WA (and if
|
||||
you did a system-wide install, you'll need sudo to add files there), so it's
|
||||
probably easier to use the second location for the files you record. Also,
|
||||
if revent files for a workload exist in both locations, the files under
|
||||
``~/.workload_automation/dependencies`` will be used in favor of those
|
||||
installed with WA.
|
||||
|
||||
For example, if you wanted to run angrybirds workload on "Acme" device, you would
|
||||
record the setup and run revent files using the method outlined in the section
|
||||
above and then pull them for the devices into the following locations::
|
||||
|
||||
~/workload_automation/dependencies/angrybirds/Acme.setup.revent
|
||||
~/workload_automation/dependencies/angrybirds/Acme.run.revent
|
||||
|
||||
(you may need to create the intermediate directories if they don't already
|
||||
exist).
|
||||
|
||||
.. [*] It's not just about screen resolution -- the event codes may be different
|
||||
even if devices use the same screen.
|
||||
|
||||
|
||||
revent vs. UiAutomator
|
||||
----------------------
|
||||
|
||||
In general, Android UI Automator is the preferred way of automating user input
|
||||
for workloads because, unlike revent, UI Automator does not depend on a
|
||||
particular screen resolution, and so is more portable across different devices.
|
||||
It also gives better control and can potentially be faster for ling UI
|
||||
manipulations, as input events are scripted based on the available UI elements,
|
||||
rather than generated by human input.
|
||||
|
||||
On the other hand, revent can be used to manipulate pretty much any workload,
|
||||
where as UI Automator only works for Android UI elements (such as text boxes or
|
||||
radio buttons), which makes the latter useless for things like games. Recording
|
||||
revent sequence is also faster than writing automation code (on the other hand,
|
||||
one would need maintain a different revent log for each screen resolution).
|
@ -1,12 +0,0 @@
|
||||
================
|
||||
User Information
|
||||
================
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 4
|
||||
:local:
|
||||
|
||||
.. include:: user_information/installation.rst
|
||||
.. include:: user_information/user_guide.rst
|
||||
.. include:: user_information/how_to.rst
|
||||
.. include:: user_information/user_reference.rst
|
@ -1,11 +0,0 @@
|
||||
*******
|
||||
How Tos
|
||||
*******
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 4
|
||||
:local:
|
||||
|
||||
.. include:: user_information/how_tos/agenda.rst
|
||||
.. include:: user_information/how_tos/device_setup.rst
|
||||
.. include:: user_information/how_tos/revent.rst
|
@ -1,792 +0,0 @@
|
||||
.. _agenda:
|
||||
|
||||
Defining Experiments With an Agenda
|
||||
===================================
|
||||
|
||||
An agenda specifies what is to be done during a Workload Automation run,
|
||||
including which workloads will be run, with what configuration, which
|
||||
augmentations will be enabled, etc. Agenda syntax is designed to be both
|
||||
succinct and expressive.
|
||||
|
||||
Agendas are specified using YAML_ notation. It is recommended that you
|
||||
familiarize yourself with the linked page.
|
||||
|
||||
.. _YAML: http://en.wikipedia.org/wiki/YAML
|
||||
|
||||
Specifying which workloads to run
|
||||
---------------------------------
|
||||
|
||||
The central purpose of an agenda is to specify what workloads to run. A
|
||||
minimalist agenda contains a single entry at the top level called "workloads"
|
||||
that maps onto a list of workload names to run:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
- rt_app
|
||||
|
||||
This specifies a WA run consisting of ``dhrystone`` followed by ``memcpy``, followed by
|
||||
``rt_app`` workloads, and using the augmentations specified in
|
||||
config.yaml (see :ref:`configuration-specification` section).
|
||||
|
||||
.. note:: If you're familiar with YAML, you will recognize the above as a single-key
|
||||
associative array mapping onto a list. YAML has two notations for both
|
||||
associative arrays and lists: block notation (seen above) and also
|
||||
in-line notation. This means that the above agenda can also be
|
||||
written in a single line as ::
|
||||
|
||||
workloads: [dhrystone, memcpy, rt-app]
|
||||
|
||||
(with the list in-lined), or ::
|
||||
|
||||
{workloads: [dhrystone, memcpy, rt-app]}
|
||||
|
||||
(with both the list and the associative array in-line). WA doesn't
|
||||
care which of the notations is used as they all get parsed into the
|
||||
same structure by the YAML parser. You can use whatever format you
|
||||
find easier/clearer.
|
||||
|
||||
.. note:: WA plugin names are case-insensitive, and dashes (``-``) and
|
||||
underscores (``_``) are treated identically. So all of the following
|
||||
entries specify the same workload: ``rt_app``, ``rt-app``, ``RT-app``.
|
||||
|
||||
Multiple iterations
|
||||
-------------------
|
||||
|
||||
There will normally be some variability in workload execution when running on a
|
||||
real device. In order to quantify it, multiple iterations of the same workload
|
||||
are usually performed. You can specify the number of iterations for each
|
||||
workload by adding ``iterations`` field to the workload specifications (or
|
||||
"specs"):
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
iterations: 5
|
||||
- name: memcpy
|
||||
iterations: 5
|
||||
- name: cyclictest
|
||||
iterations: 5
|
||||
|
||||
Now that we're specifying both the workload name and the number of iterations in
|
||||
each spec, we have to explicitly name each field of the spec.
|
||||
|
||||
It is often the case that, as in in the example above, you will want to run all
|
||||
workloads for the same number of iterations. Rather than having to specify it
|
||||
for each and every spec, you can do with a single entry by adding `iterations`
|
||||
to your ``config`` section in your agenda:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
- cyclictest
|
||||
|
||||
If the same field is defined both in config section and in a spec, then the
|
||||
value in the spec will overwrite the value. For example, suppose we
|
||||
wanted to run all our workloads for five iterations, except cyclictest which we
|
||||
want to run for ten (e.g. because we know it to be particularly unstable). This
|
||||
can be specified like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
- name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
Again, because we are now specifying two fields for cyclictest spec, we have to
|
||||
explicitly name them.
|
||||
|
||||
Configuring Workloads
|
||||
---------------------
|
||||
|
||||
Some workloads accept configuration parameters that modify their behaviour. These
|
||||
parameters are specific to a particular workload and can alter the workload in
|
||||
any number of ways, e.g. set the duration for which to run, or specify a media
|
||||
file to be used, etc. The vast majority of workload parameters will have some
|
||||
default value, so it is only necessary to specify the name of the workload in
|
||||
order for WA to run it. However, sometimes you want more control over how a
|
||||
workload runs.
|
||||
|
||||
For example, by default, dhrystone will execute 10 million loops across four
|
||||
threads. Suppose your device has six cores available and you want the workload to
|
||||
load them all. You also want to increase the total number of loops accordingly
|
||||
to 15 million. You can specify this using dhrystone's parameters:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- memcpy
|
||||
- name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
.. note:: You can find out what parameters a workload accepts by looking it up
|
||||
in the :ref:`Workloads` section or using WA itself with "show"
|
||||
command::
|
||||
|
||||
wa show dhrystone
|
||||
|
||||
see the :ref:`Invocation` section for details.
|
||||
|
||||
In addition to configuring the workload itself, we can also specify
|
||||
configuration for the underlying device which can be done by setting runtime
|
||||
parameters in the workload spec. Explicit runtime parameters have been exposed for
|
||||
configuring cpufreq, hotplug and cpuidle. For more detailed information on Runtime
|
||||
Parameters see the :ref:`runtime parameters <runtime-parameters>` section. For
|
||||
example, suppose we want to ensure the maximum score for our benchmarks, at the
|
||||
expense of power consumption so we want to set the cpufreq governor to
|
||||
"performance" and enable all of the cpus on the device, (assuming there are 8
|
||||
cpus available), which can be done like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
runtime_params:
|
||||
governor: performance
|
||||
num_cores: 8
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- memcpy
|
||||
- name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
|
||||
I've renamed ``params`` to ``workload_params`` for clarity,
|
||||
but that wasn't strictly necessary as ``params`` is interpreted as
|
||||
``workload_params`` inside a workload spec.
|
||||
|
||||
Runtime parameters do not automatically reset at the end of workload spec
|
||||
execution, so all subsequent iterations will also be affected unless they
|
||||
explicitly change the parameter (in the example above, performance governor will
|
||||
also be used for ``memcpy`` and ``cyclictest``. There are two ways around this:
|
||||
either set ``reboot_policy`` WA setting (see :ref:`configuration-specification`
|
||||
section) such that the device gets rebooted between job executions, thus being
|
||||
returned to its initial state, or set the default runtime parameter values in
|
||||
the ``config`` section of the agenda so that they get set for every spec that
|
||||
doesn't explicitly override them.
|
||||
|
||||
If additional configuration of the device is required which are not exposed via
|
||||
the built in runtime parameters, you can write a value to any file exposed on
|
||||
the device using ``sysfile_values``, for example we could have also performed
|
||||
the same configuration manually (assuming we have a big.LITTLE system and our
|
||||
cores 0-3 and 4-7 are in 2 separate DVFS domains and so setting the governor for
|
||||
cpu0 and cpu4 will affect all our cores) e.g.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
||||
config:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
/sys/devices/system/cpu/cpu4/cpufreq/scaling_governor: performance
|
||||
/sys/devices/system/cpu/cpu0/online: 1
|
||||
/sys/devices/system/cpu/cpu1/online: 1
|
||||
/sys/devices/system/cpu/cpu2/online: 1
|
||||
/sys/devices/system/cpu/cpu3/online: 1
|
||||
/sys/devices/system/cpu/cpu4/online: 1
|
||||
/sys/devices/system/cpu/cpu5/online: 1
|
||||
/sys/devices/system/cpu/cpu6/online: 1
|
||||
/sys/devices/system/cpu/cpu7/online: 1
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- memcpy
|
||||
- name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
Here, we're specifying a ``sysfile_values`` runtime parameter for the device.
|
||||
For more information please see :ref:`setting sysfiles <setting-sysfiles>`.
|
||||
|
||||
APK Workloads
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
WA has various resource getters that can be configured to locate APK files but
|
||||
for most people APK files should be kept in the
|
||||
``$WA_USER_DIRECTORY/dependencies/SOME_WORKLOAD/`` directory. (by default
|
||||
``~/.workload_automation/dependencies/SOME_WORKLOAD/``). The
|
||||
``WA_USER_DIRECTORY`` environment variable can be used to change the location of
|
||||
this directory. The APK files need to be put into the corresponding directories for
|
||||
the workload they belong to. The name of the file can be anything but as
|
||||
explained below may need to contain certain pieces of information.
|
||||
|
||||
All ApkWorkloads have parameters that affect the way in which APK files are
|
||||
resolved, ``exact_abi``, ``force_install`` and ``prefer_host_package``. Their
|
||||
exact behaviours are outlined below.
|
||||
|
||||
:exact_abi: If this setting is enabled WA's resource resolvers will look for the
|
||||
devices ABI with any native code present in the apk. By default this setting
|
||||
is disabled since most apks will work across all devices. You may wish to
|
||||
enable this feature when working with devices that support multiple ABI's
|
||||
(like 64-bit devices that can run 32-bit APK files) and are specifically
|
||||
trying to test one or the other.
|
||||
|
||||
:force_install: If this setting is enabled WA will *always* use the APK file on
|
||||
the host, and re-install it on every iteration. If there is no APK on the
|
||||
host that is a suitable version and/or ABI for the workload WA will error
|
||||
when ``force_install`` is enabled.
|
||||
|
||||
:prefer_host_package: This parameter is used to specify a preference over host
|
||||
or target versions of the app. When set to ``True`` WA will prefer the host
|
||||
side version of the APK. It will check if the host has the APK and whether it
|
||||
meets the version requirements of the workload. If so, and the target also
|
||||
already has same version nothing will be done, otherwise WA will overwrite
|
||||
the targets installed application with the host version. If the host is
|
||||
missing the APK or it does not meet version requirements WA will fall back to
|
||||
the app on the target if present and is a suitable version. When this
|
||||
parameter is set to ``False`` WA will prefer to use the version already on
|
||||
the target if it meets the workloads version requirements. If it does not it
|
||||
will fall back to searching the host for the correct version. In both modes
|
||||
if neither the host nor target have a suitable version, WA will produce and
|
||||
error and will not run the workload.
|
||||
|
||||
:version: This parameter is used to specify which version of uiautomation for
|
||||
the workload is used. In some workloads e.g. ``geekbench`` multiple versions
|
||||
with drastically different UI's are supported. A APKs version will be
|
||||
automatically extracted therefore it is possible to have multiple apks for
|
||||
different versions of a workload present on the host and select between which
|
||||
is used for a particular job by specifying the relevant version in your
|
||||
:ref:`agenda <agenda>`.
|
||||
|
||||
:variant_name: Some workloads use variants of APK files, this is usually the
|
||||
case with web browser APK files, these work in exactly the same way as the
|
||||
version.
|
||||
|
||||
|
||||
IDs and Labels
|
||||
--------------
|
||||
|
||||
It is possible to list multiple specs with the same workload in an agenda. You
|
||||
may wish to do this if you want to run a workload with different parameter values
|
||||
or under different runtime configurations of the device. The workload name
|
||||
therefore does not uniquely identify a spec. To be able to distinguish between
|
||||
different specs (e.g. in reported results), each spec has an ID which is unique
|
||||
to all specs within an agenda (and therefore with a single WA run). If an ID
|
||||
isn't explicitly specified using ``id`` field (note that the field name is in
|
||||
lower case), one will be automatically assigned to the spec at the beginning of
|
||||
the WA run based on the position of the spec within the list. The first spec
|
||||
*without an explicit ID* will be assigned ID ``wk1``, the second spec *without an
|
||||
explicit ID* will be assigned ID ``wk2``, and so forth.
|
||||
|
||||
Numerical IDs aren't particularly easy to deal with, which is why it is
|
||||
recommended that, for non-trivial agendas, you manually set the ids to something
|
||||
more meaningful (or use labels -- see below). An ID can be pretty much anything
|
||||
that will pass through the YAML parser. The only requirement is that it is
|
||||
unique to the agenda. However, is usually better to keep them reasonably short
|
||||
(they don't need to be *globally* unique), and to stick with alpha-numeric
|
||||
characters and underscores/dashes. While WA can handle other characters as well,
|
||||
getting too adventurous with your IDs may cause issues further down the line
|
||||
when processing WA output (e.g. when uploading them to a database that may have
|
||||
its own restrictions).
|
||||
|
||||
In addition to IDs, you can also specify labels for your workload specs. These
|
||||
are similar to IDs but do not have the uniqueness restriction. If specified,
|
||||
labels will be used by some output processes instead of (or in addition to) the
|
||||
workload name. For example, the ``csv`` output processor will put the label in the
|
||||
"workload" column of the CSV file.
|
||||
|
||||
It is up to you how you chose to use IDs and labels. WA itself doesn't expect
|
||||
any particular format (apart from uniqueness for IDs). Below is the earlier
|
||||
example updated to specify explicit IDs and label dhrystone spec to reflect
|
||||
parameters used.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
cpu0_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
.. _using-classifiers:
|
||||
|
||||
Classifiers
|
||||
------------
|
||||
|
||||
Classifiers can be used in 2 distinct ways, the first use is being supplied in
|
||||
an agenda as a set of key-value pairs which can be used to help identify sub-tests
|
||||
of a run, for example if you have multiple sections in your agenda running
|
||||
your workloads at different frequencies you might want to set a classifier
|
||||
specifying which frequencies are being used. These can then be utilized later,
|
||||
for example with the ``csv`` :ref:`output processor <output-processors>` with
|
||||
``use_all_classifiers`` set to ``True`` and this will add additional columns to
|
||||
the output file for each of the classifier keys that have been specified
|
||||
allowing for quick comparison.
|
||||
|
||||
An example agenda is shown here:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations:
|
||||
- csv
|
||||
iterations: 1
|
||||
device: generic_android
|
||||
csv:
|
||||
use_all_classifiers: True
|
||||
sections:
|
||||
- id: max_speed
|
||||
runtime_parameters:
|
||||
frequency: 1700000
|
||||
classifiers:
|
||||
freq: 1700000
|
||||
- id: min_speed
|
||||
runtime_parameters:
|
||||
frequency: 200000
|
||||
classifiers:
|
||||
freq: 200000
|
||||
workloads:
|
||||
- name: recentfling
|
||||
|
||||
The other way that they can used is by being automatically added by some
|
||||
workloads to identify their results metrics and artifacts. For example some
|
||||
workloads perform multiple tests with the same execution run and therefore will
|
||||
use metrics to differentiate between them, e.g. the ``recentfling`` workload
|
||||
will use classifiers to distinguish between which loop a particular result is
|
||||
for or whether it is an average across all loops ran.
|
||||
|
||||
The output from the agenda above will produce a csv file similar to what is
|
||||
shown below. Some columns have been omitted for clarity however as can been seen
|
||||
the custom **frequency** classifier column has been added and populated, along
|
||||
with the **loop** classifier added by the workload.
|
||||
|
||||
::
|
||||
|
||||
id | workload | metric | freq | loop | value ‖
|
||||
max_speed-wk1 | recentfling | 90th Percentile | 1700000 | 1 | 8 ‖
|
||||
max_speed-wk1 | recentfling | 95th Percentile | 1700000 | 1 | 9 ‖
|
||||
max_speed-wk1 | recentfling | 99th Percentile | 1700000 | 1 | 16 ‖
|
||||
max_speed-wk1 | recentfling | Jank | 1700000 | 1 | 11 ‖
|
||||
max_speed-wk1 | recentfling | Jank% | 1700000 | 1 | 1 ‖
|
||||
# ...
|
||||
max_speed-wk1 | recentfling | Jank | 1700000 | 3 | 1 ‖
|
||||
max_speed-wk1 | recentfling | Jank% | 1700000 | 3 | 0 ‖
|
||||
max_speed-wk1 | recentfling | Average 90th Percentqile | 1700000 | Average | 7 ‖
|
||||
max_speed-wk1 | recentfling | Average 95th Percentile | 1700000 | Average | 8 ‖
|
||||
max_speed-wk1 | recentfling | Average 99th Percentile | 1700000 | Average | 14 ‖
|
||||
max_speed-wk1 | recentfling | Average Jank | 1700000 | Average | 6 ‖
|
||||
max_speed-wk1 | recentfling | Average Jank% | 1700000 | Average | 0 ‖
|
||||
min_speed-wk1 | recentfling | 90th Percentile | 200000 | 1 | 7 ‖
|
||||
min_speed-wk1 | recentfling | 95th Percentile | 200000 | 1 | 8 ‖
|
||||
min_speed-wk1 | recentfling | 99th Percentile | 200000 | 1 | 14 ‖
|
||||
min_speed-wk1 | recentfling | Jank | 200000 | 1 | 5 ‖
|
||||
min_speed-wk1 | recentfling | Jank% | 200000 | 1 | 0 ‖
|
||||
# ...
|
||||
min_speed-wk1 | recentfling | Jank | 200000 | 3 | 5 ‖
|
||||
min_speed-wk1 | recentfling | Jank% | 200000 | 3 | 0 ‖
|
||||
min_speed-wk1 | recentfling | Average 90th Percentile | 200000 | Average | 7 ‖
|
||||
min_speed-wk1 | recentfling | Average 95th Percentile | 200000 | Average | 8 ‖
|
||||
min_speed-wk1 | recentfling | Average 99th Percentile | 200000 | Average | 13 ‖
|
||||
min_speed-wk1 | recentfling | Average Jank | 200000 | Average | 4 ‖
|
||||
min_speed-wk1 | recentfling | Average Jank% | 200000 | Average | 0 ‖
|
||||
|
||||
|
||||
|
||||
.. _sections:
|
||||
|
||||
Sections
|
||||
--------
|
||||
|
||||
It is a common requirement to be able to run the same set of workloads under
|
||||
different device configurations. E.g. you may want to investigate the impact of
|
||||
changing a particular setting to different values on the benchmark scores, or to
|
||||
quantify the impact of enabling a particular feature in the kernel. WA allows
|
||||
this by defining "sections" of configuration with an agenda.
|
||||
|
||||
For example, suppose that we want to measure the impact of using 3 different
|
||||
cpufreq governors on 2 benchmarks. We could create 6 separate workload specs
|
||||
and set the governor runtime parameter for each entry. However, this
|
||||
introduces a lot of duplication; and what if we want to change spec
|
||||
configuration? We would have to change it in multiple places, running the risk
|
||||
of forgetting one.
|
||||
|
||||
A better way is to keep the two workload specs and define a section for each
|
||||
governor:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
iterations: 5
|
||||
augmentations:
|
||||
- ~cpufreq
|
||||
- csv
|
||||
sysfs_extractor:
|
||||
paths: [/proc/meminfo]
|
||||
csv:
|
||||
use_all_classifiers: True
|
||||
sections:
|
||||
- id: perf
|
||||
runtime_params:
|
||||
cpu0_governor: performance
|
||||
- id: inter
|
||||
runtime_params:
|
||||
cpu0_governor: interactive
|
||||
- id: sched
|
||||
runtime_params:
|
||||
cpu0_governor: sched
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
augmentations: [sysfs_extractor]
|
||||
|
||||
A section, just like an workload spec, needs to have a unique ID. Apart from
|
||||
that, a "section" is similar to the ``config`` section we've already seen --
|
||||
everything that goes into a section will be applied to each workload spec.
|
||||
Workload specs defined under top-level ``workloads`` entry will be executed for
|
||||
each of the sections listed under ``sections``.
|
||||
|
||||
.. note:: It is also possible to have a ``workloads`` entry within a section,
|
||||
in which case, those workloads will only be executed for that specific
|
||||
section.
|
||||
|
||||
In order to maintain the uniqueness requirement of workload spec IDs, they will
|
||||
be namespaced under each section by prepending the section ID to the spec ID
|
||||
with a dash. So in the agenda above, we no longer have a workload spec
|
||||
with ID ``01_dhry``, instead there are two specs with IDs ``perf-01-dhry`` and
|
||||
``inter-01_dhry``.
|
||||
|
||||
Note that the ``config`` section still applies to every spec in the agenda. So
|
||||
the precedence order is -- spec settings override section settings, which in
|
||||
turn override global settings.
|
||||
|
||||
|
||||
.. _section-groups:
|
||||
|
||||
Section Groups
|
||||
---------------
|
||||
|
||||
Section groups are a way of grouping sections together and are used to produce a
|
||||
cross product of each of the different groups. This can be useful when you want
|
||||
to run a set of experiments with all the available combinations without having
|
||||
to specify each combination manually.
|
||||
|
||||
For example if we want to investigate the differences between running the
|
||||
maximum and minimum frequency with both the maximum and minimum number of cpus
|
||||
online, we can create an agenda as follows:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
sections:
|
||||
- id: min_freq
|
||||
runtime_parameters:
|
||||
freq: min
|
||||
group: frequency
|
||||
- id: max_freq
|
||||
runtime_parameters:
|
||||
freq: max
|
||||
group: frequency
|
||||
|
||||
- id: min_cpus
|
||||
runtime_parameters:
|
||||
cpus: 1
|
||||
group: cpus
|
||||
- id: max_cpus
|
||||
runtime_parameters:
|
||||
cpus: 8
|
||||
group: cpus
|
||||
|
||||
workloads:
|
||||
- dhrystone
|
||||
|
||||
This will results in 8 jobs being generated for each of the possible combinations.
|
||||
|
||||
::
|
||||
|
||||
min_freq-min_cpus-wk1 (dhrystone)
|
||||
min_freq-max_cpus-wk1 (dhrystone)
|
||||
max_freq-min_cpus-wk1 (dhrystone)
|
||||
max_freq-max_cpus-wk1 (dhrystone)
|
||||
min_freq-min_cpus-wk1 (dhrystone)
|
||||
min_freq-max_cpus-wk1 (dhrystone)
|
||||
max_freq-min_cpus-wk1 (dhrystone)
|
||||
max_freq-max_cpus-wk1 (dhrystone)
|
||||
|
||||
Each of the generated jobs will have :ref:`classifiers <classifiers>` for
|
||||
each group and the associated id automatically added.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# ...
|
||||
print('Job ID: {}'.format(job.id))
|
||||
print('Classifiers:')
|
||||
for k, v in job.classifiers.items():
|
||||
print(' {}: {}'.format(k, v))
|
||||
|
||||
Job ID: min_freq-min_cpus-no_idle-wk1
|
||||
Classifiers:
|
||||
frequency: min_freq
|
||||
cpus: min_cpus
|
||||
|
||||
|
||||
.. _augmentations:
|
||||
|
||||
Augmentations
|
||||
--------------
|
||||
|
||||
Augmentations are plugins that augment the execution of workload jobs with
|
||||
additional functionality; usually, that takes the form of generating additional
|
||||
metrics and/or artifacts, such as traces or logs. There are two types of
|
||||
augmentations:
|
||||
|
||||
Instruments
|
||||
These "instrument" a WA run in order to change it's behaviour (e.g.
|
||||
introducing delays between successive job executions), or collect
|
||||
additional measurements (e.g. energy usage). Some instruments may depend
|
||||
on particular features being enabled on the target (e.g. cpufreq), or
|
||||
on additional hardware (e.g. energy probes).
|
||||
|
||||
Output processors
|
||||
These post-process metrics and artifacts generated by workloads or
|
||||
instruments, as well as target metadata collected by WA, in order to
|
||||
generate additional metrics and/or artifacts (e.g. generating statistics
|
||||
or reports). Output processors are also used to export WA output
|
||||
externally (e.g. upload to a database).
|
||||
|
||||
The main practical difference between instruments and output processors, is that
|
||||
the former rely on an active connection to the target to function, where as the
|
||||
latter only operated on previously collected results and metadata. This means
|
||||
that output processors can run "off-line" using ``wa process`` command.
|
||||
|
||||
Both instruments and output processors are configured in the same way in the
|
||||
agenda, which is why they are grouped together into "augmentations".
|
||||
Augmentations are enabled by listing them under ``augmentations`` entry in a
|
||||
config file or ``config`` section of the agenda.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations: [trace-cmd]
|
||||
|
||||
The code above illustrates an agenda entry to enabled ``trace-cmd`` instrument.
|
||||
|
||||
If your have multiple ``augmentations`` entries (e.g. both, in your config file
|
||||
and in the agenda), then they will be combined, so that the final set of
|
||||
augmentations for the run will be their union.
|
||||
|
||||
.. note:: WA2 did not have have augmentationts, and instead supported
|
||||
"instrumentation" and "result_processors" as distinct configuration
|
||||
enetries. For compantibility, these entries are still supported in
|
||||
WA3, however they should be considered to be depricated, and their
|
||||
use is discouraged.
|
||||
|
||||
|
||||
Configuring augmentations
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Most augmentations will take parameters that modify their behavior. Parameters
|
||||
available for a particular augmentation can be viewed using ``wa show
|
||||
<augmentation name>`` command. This will also show the default values used.
|
||||
Values for these parameters can be specified by creating an entry with the
|
||||
augmentation's name, and specifying parameter values under it.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations: [trace-cmd]
|
||||
trace-cmd:
|
||||
events: ['sched*', 'power*', irq]
|
||||
buffer_size: 100000
|
||||
|
||||
The code above specifies values for ``events`` and ``buffer_size`` parameters
|
||||
for the ``trace-cmd`` instrument, as well as enabling it.
|
||||
|
||||
You may specify configuration for the same augmentation in multiple locations
|
||||
(e.g. your config file and the config section of the agenda). These entries will
|
||||
be combined to form the final configuration for the augmentation used during the
|
||||
run. If different values for the same parameter are present in multiple entries,
|
||||
the ones "more specific" to a particular run will be used (e.g. values in the
|
||||
agenda will override those in the config file).
|
||||
|
||||
.. note:: Creating an entry for an augmentation alone does not enable it! You
|
||||
**must** list it under ``augmentations`` in order for it to be enabed
|
||||
for a run. This makes it easier to quickly enabled and diable
|
||||
augmentations with complex configurations, and also allows defining
|
||||
"static" configuation in top-level config, without actually enabling
|
||||
the augmentation for all runs.
|
||||
|
||||
|
||||
Disabling augmentations
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Sometimes, you may wish to disable an augmentation for a particular run, but you
|
||||
want to keep it enabled in general. You *could* modify your config file to
|
||||
temporarily disable it. However, you must then remember to re-enable it
|
||||
afterwards. This could be inconvenient and error prone, especially if you're
|
||||
running multiple experiments in parallel and only want to disable the
|
||||
augmentation for one of them.
|
||||
|
||||
Instead, you can explicitly disable augmentation by specifying its name prefixed
|
||||
with a tilde (``~``) inside ``augumentations``.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations: [trace-cmd, ~cpufreq]
|
||||
|
||||
The code above enables ``trace-cmd`` instrument and disables ``cpufreq``
|
||||
instrument (which is enabled in the default config).
|
||||
|
||||
If you want to start configuration for an experiment form a "blank slate" and
|
||||
want to disable all previously-enabled augmentations, without necessarily
|
||||
knowing what they are, you can use the special ``~~`` entry.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations: [~~, trace-cmd, csv]
|
||||
|
||||
The code above disables all augmentations enabled up to that point, and enabled
|
||||
``trace-cmd`` and ``csv`` for this run.
|
||||
|
||||
.. note:: The ``~~`` only disables augmentations from previously-processed
|
||||
sources. Its ordering in the list does not matter. For example,
|
||||
specifying ``augmentations: [trace-cmd, ~~, csv]`` will have exactly
|
||||
the same effect as above -- i.e. both trace-cmd *and* csv will be
|
||||
enabled.
|
||||
|
||||
Workload-specific augmentation
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is possible to enable or disable (but not configure) augmentations at
|
||||
workload or section level, as well as in the global config, in which case, the
|
||||
augmentations would only be enabled/disabled for that workload/section. If the
|
||||
same augmentation is enabled at one level and disabled at another, as with all
|
||||
WA configuration, the more specific settings will take precedence over the less
|
||||
specific ones (i.e. workloads override sections that, in turn, override global
|
||||
config).
|
||||
|
||||
|
||||
Augmentations Example
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations: [~~, fps]
|
||||
trace-cmd:
|
||||
events: ['sched*', 'power*', irq]
|
||||
buffer_size: 100000
|
||||
file_poller:
|
||||
files:
|
||||
- /sys/class/thermal/thermal_zone0/temp
|
||||
sections:
|
||||
- classifers:
|
||||
type: energy
|
||||
augmentations: [energy_measurement]
|
||||
- classifers:
|
||||
type: trace
|
||||
augmentations: [trace-cmd, file_poller]
|
||||
workloads:
|
||||
- gmail
|
||||
- geekbench
|
||||
- googleplaybooks
|
||||
- name: dhrystone
|
||||
augmentations: [~fps]
|
||||
|
||||
The example above shows an experiment that runs a number of workloads in order
|
||||
to evaluate their thermal impact and energy usage. All previously-configured
|
||||
augmentations are disabled with ``~~``, so that only configuration specified in
|
||||
this agenda is enabled. Since most of the workloads are "productivity" use cases
|
||||
that do not generate their own metrics, ``fps`` instrument is enabled to get
|
||||
some meaningful performance metrics for them; the only exception is
|
||||
``dhrystone`` which is a benchmark that reports its own metrics and has not GUI,
|
||||
so the instrument is disabled for it using ``~fps``.
|
||||
|
||||
Each workload will be run in two configurations: once, to collect energy
|
||||
measurements, and once to collect thermal data and kernel trace. Trace can give
|
||||
insight into why a workload is using more or less energy than expected, but it
|
||||
can be relatively intrusive and might impact absolute energy and performance
|
||||
metrics, which is why it is collected separately. Classifiers_ are used to
|
||||
separate metrics from the two configurations in the results.
|
||||
|
||||
.. _other-agenda-configuration:
|
||||
|
||||
Other Configuration
|
||||
-------------------
|
||||
|
||||
.. _configuration_in_agenda:
|
||||
|
||||
As mentioned previously, ``config`` section in an agenda can contain anything
|
||||
that can be defined in ``config.yaml``. Certain configuration (e.g. ``run_name``)
|
||||
makes more sense to define in an agenda than a config file. Refer to the
|
||||
:ref:`configuration-specification` section for details.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
project: governor_comparison
|
||||
run_name: performance_vs_interactive
|
||||
|
||||
device: generic_android
|
||||
reboot_policy: never
|
||||
|
||||
iterations: 5
|
||||
augmentations:
|
||||
- ~cpufreq
|
||||
- csv
|
||||
sysfs_extractor:
|
||||
paths: [/proc/meminfo]
|
||||
csv:
|
||||
use_all_classifiers: True
|
||||
sections:
|
||||
- id: perf
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
cpu0_governor: performance
|
||||
- id: inter
|
||||
runtime_params:
|
||||
cpu0_governor: interactive
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
augmentations: [sysfs_extractor]
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
@ -1,308 +0,0 @@
|
||||
.. _setting-up-a-device:
|
||||
|
||||
Setting Up A Device
|
||||
===================
|
||||
|
||||
WA should work with most Android devices out-of-the box, as long as the device
|
||||
is discoverable by ``adb`` (i.e. gets listed when you run ``adb devices``). For
|
||||
USB-attached devices, that should be the case; for network devices, ``adb connect``
|
||||
would need to be invoked with the IP address of the device. If there is only one
|
||||
device connected to the host running WA, then no further configuration should be
|
||||
necessary (though you may want to :ref:`tweak some Android settings <configuring-android>`\ ).
|
||||
|
||||
If you have multiple devices connected, have a non-standard Android build (e.g.
|
||||
on a development board), or want to use of the more advanced WA functionality,
|
||||
further configuration will be required.
|
||||
|
||||
Android
|
||||
-------
|
||||
|
||||
.. _android-general-device-setup:
|
||||
|
||||
General Device Setup
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can specify the device interface by setting ``device`` setting in a
|
||||
``config`` file or section. Available interfaces can be viewed by running ``wa
|
||||
list targets`` command. If you don't see your specific platform listed (which is
|
||||
likely unless you're using one of the Arm-supplied platforms), then you should
|
||||
use ``generic_android`` interface (this is what is used by the default config).
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device: generic_android
|
||||
|
||||
The device interface may be configured through ``device_config`` setting, who's
|
||||
value is a ``dict`` mapping setting names to their values. Some of the most
|
||||
common parameters you might want to change are outlined below.
|
||||
|
||||
:device: If you have multiple Android devices connected to the host machine, you will
|
||||
need to set this to indicate to WA which device you want it to use. The will
|
||||
be the adb name the is displayed when running ``adb devices``
|
||||
|
||||
:working_directory: WA needs a "working" directory on the device which it will use for collecting
|
||||
traces, caching assets it pushes to the device, etc. By default, it will
|
||||
create one under ``/sdcard`` which should be mapped and writable on standard
|
||||
Android builds. If this is not the case for your device, you will need to
|
||||
specify an alternative working directory (e.g. under ``/data/local``).
|
||||
|
||||
:load_default_modules: A number of "default" modules (e.g. for cpufreq
|
||||
subsystem) are loaded automatically, unless explicitly disabled. If you
|
||||
encounter an issue with one of the modules then this setting can be set to
|
||||
``False`` and any specific modules that you require can be request via the
|
||||
``modules`` entry.
|
||||
|
||||
:modules: A list of additional modules to be installed for the target. Devlib
|
||||
implements functionality for particular subsystems as modules. If additional
|
||||
modules need to be loaded, they may be specified using this parameter.
|
||||
|
||||
Please see the `devlib documentation <http://devlib.readthedocs.io/en/latest/modules.html>`_
|
||||
for information on the available modules.
|
||||
|
||||
.. _core-names:
|
||||
|
||||
:core_names: ``core_names`` should be a list of core names matching the order in which
|
||||
they are exposed in sysfs. For example, Arm TC2 SoC is a 2x3 big.LITTLE
|
||||
system; its core_names would be ``['a7', 'a7', 'a7', 'a15', 'a15']``,
|
||||
indicating that cpu0-cpu2 in cpufreq sysfs structure are A7's and cpu3 and
|
||||
cpu4 are A15's.
|
||||
|
||||
.. note:: This should not usually need to be provided as it will be
|
||||
automatically extracted from the target.
|
||||
|
||||
|
||||
A typical ``device_config`` inside ``config.yaml`` may look something like
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device_config:
|
||||
device: 0123456789ABCDEF
|
||||
# ...
|
||||
|
||||
|
||||
or a more specific config could be:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device_config:
|
||||
device: 0123456789ABCDEF
|
||||
working_direcory: '/sdcard/wa-working'
|
||||
load_default_modules: True
|
||||
modules: ['hotplug', 'cpufreq']
|
||||
core_names : ['a7', 'a7', 'a7', 'a15', 'a15']
|
||||
# ...
|
||||
|
||||
.. _configuring-android:
|
||||
|
||||
Configuring Android
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
There are a few additional tasks you may need to perform once you have a device
|
||||
booted into Android (especially if this is an initial boot of a fresh OS
|
||||
deployment):
|
||||
|
||||
- You have gone through FTU (first time usage) on the home screen and
|
||||
in the apps menu.
|
||||
- You have disabled the screen lock.
|
||||
- You have set sleep timeout to the highest possible value (30 mins on
|
||||
most devices).
|
||||
- You have set the locale language to "English" (this is important for
|
||||
some workloads in which UI automation looks for specific text in UI
|
||||
elements).
|
||||
|
||||
|
||||
Juno Setup
|
||||
----------
|
||||
|
||||
.. note:: At the time of writing, the Android software stack on Juno was still
|
||||
very immature. Some workloads may not run, and there maybe stability
|
||||
issues with the device.
|
||||
|
||||
|
||||
The full software stack can be obtained from Linaro:
|
||||
|
||||
https://releases.linaro.org/android/images/lcr-reference-juno/latest/
|
||||
|
||||
Please follow the instructions on the "Binary Image Installation" tab on that
|
||||
page. More up-to-date firmware and kernel may also be obtained by registered
|
||||
members from ARM Connected Community: http://www.arm.com/community/ (though this
|
||||
is not guaranteed to work with the Linaro file system).
|
||||
|
||||
UEFI
|
||||
^^^^
|
||||
|
||||
Juno uses UEFI_ to boot the kernel image. UEFI supports multiple boot
|
||||
configurations, and presents a menu on boot to select (in default configuration
|
||||
it will automatically boot the first entry in the menu if not interrupted before
|
||||
a timeout). WA will look for a specific entry in the UEFI menu
|
||||
(``'WA'`` by default, but that may be changed by setting ``uefi_entry`` in the
|
||||
``device_config``). When following the UEFI instructions on the above Linaro
|
||||
page, please make sure to name the entry appropriately (or to correctly set the
|
||||
``uefi_entry``).
|
||||
|
||||
.. _UEFI: http://en.wikipedia.org/wiki/UEFI
|
||||
|
||||
There are two supported ways for Juno to discover kernel images through UEFI. It
|
||||
can either load them from NOR flash on the board, or from the boot partition on
|
||||
the file system. The setup described on the Linaro page uses the boot partition
|
||||
method.
|
||||
|
||||
If WA does not find the UEFI entry it expects, it will create one. However, it
|
||||
will assume that the kernel image resides in NOR flash, which means it will not
|
||||
work with Linaro file system. So if you're replicating the Linaro setup exactly,
|
||||
you will need to create the entry manually, as outline on the above-linked page.
|
||||
|
||||
Rebooting
|
||||
^^^^^^^^^
|
||||
|
||||
At the time of writing, normal Android reboot did not work properly on Juno
|
||||
Android, causing the device to crash into an irrecoverable state. Therefore, WA
|
||||
will perform a hard reset to reboot the device. It will attempt to do this by
|
||||
toggling the DTR line on the serial connection to the device. In order for this
|
||||
to work, you need to make sure that SW1 configuration switch on the back panel of
|
||||
the board (the right-most DIP switch) is toggled *down*.
|
||||
|
||||
|
||||
Linux
|
||||
-----
|
||||
|
||||
General Device Setup
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can specify the device interface by setting ``device`` setting in a
|
||||
``config`` file or section. Available interfaces can be viewed by running
|
||||
``wa list targets`` command. If you don't see your specific platform listed
|
||||
(which is likely unless you're using one of the Arm-supplied platforms), then
|
||||
you should use ``generic_linux`` interface.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device: generic_linux
|
||||
|
||||
The device interface may be configured through ``device_config`` setting, who's
|
||||
value is a ``dict`` mapping setting names to their values. Some of the most
|
||||
common parameters you might want to change are outlined below.
|
||||
|
||||
|
||||
:host: This should be either the the DNS name or IP address of the device.
|
||||
|
||||
:username: The login name of the user on the device that WA will use. This user should
|
||||
have a home directory (unless an alternative working directory is specified
|
||||
using ``working_directory`` config -- see below), and, for full
|
||||
functionality, the user should have sudo rights (WA will be able to use
|
||||
sudo-less acounts but some instruments or workload may not work).
|
||||
|
||||
:password: Password for the account on the device. Either this of a ``keyfile`` (see
|
||||
below) must be specified.
|
||||
|
||||
:keyfile: If key-based authentication is used, this may be used to specify the SSH identity
|
||||
file instead of the password.
|
||||
|
||||
:property_files: This is a list of paths that will be pulled for each WA run into the __meta
|
||||
subdirectory in the results. The intention is to collect meta-data about the
|
||||
device that may aid in reporducing the results later. The paths specified do
|
||||
not have to exist on the device (they will be ignored if they do not). The
|
||||
default list is ``['/proc/version', '/etc/debian_version', '/etc/lsb-release', '/etc/arch-release']``
|
||||
|
||||
|
||||
In addition, ``working_directory``, ``core_names``, ``modules`` etc. can also
|
||||
be specified and have the same meaning as for Android devices (see above).
|
||||
|
||||
A typical ``device_config`` inside ``config.yaml`` may look something like
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device_config:
|
||||
host: 192.168.0.7
|
||||
username: guest
|
||||
password: guest
|
||||
# ...
|
||||
|
||||
Chrome OS
|
||||
---------
|
||||
|
||||
General Device Setup
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can specify the device interface by setting ``device`` setting in a
|
||||
``config`` file or section. Available interfaces can be viewed by
|
||||
running ``wa list targets`` command. If you don't see your specific platform
|
||||
listed (which is likely unless you're using one of the Arm-supplied platforms), then
|
||||
you should use ``generic_chromeos`` interface.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device: generic_chromeos
|
||||
|
||||
The device interface may be configured through ``device_config`` setting, who's
|
||||
value is a ``dict`` mapping setting names to their values. The ChromeOS target
|
||||
is essentially the same as a linux device and requires a similar setup, however
|
||||
it also optionally supports connecting to an android container running on the
|
||||
device which will be automatically detected if present. If the device supports
|
||||
android applications then the android configuration is also supported. In order
|
||||
to support this WA will open 2 connections to the device, one via SSH to
|
||||
the main OS and another via ADB to the android container where a limited
|
||||
subset of functionality can be performed.
|
||||
|
||||
In order to distinguish between the two connections some of the android specific
|
||||
configuration has been renamed to reflect the destination.
|
||||
|
||||
:android_working_directory: WA needs a "working" directory on the device which it will use for collecting
|
||||
traces, caching assets it pushes to the device, etc. By default, it will
|
||||
create one under ``/sdcard`` which should be mapped and writable on standard
|
||||
Android builds. If this is not the case for your device, you will need to
|
||||
specify an alternative working directory (e.g. under ``/data/local``).
|
||||
|
||||
|
||||
A typical ``device_config`` inside ``config.yaml`` for a ChromeOS device may
|
||||
look something like
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device_config:
|
||||
host: 192.168.0.7
|
||||
username: root
|
||||
android_working_direcory: '/sdcard/wa-working'
|
||||
# ...
|
||||
|
||||
.. note:: This assumes that your Chromebook is in developer mode and is
|
||||
configured to run an SSH server with the appropriate ssh keys added to the
|
||||
authorized_keys file on the device.
|
||||
|
||||
|
||||
Related Settings
|
||||
----------------
|
||||
|
||||
Reboot Policy
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
This indicates when during WA execution the device will be rebooted. By default
|
||||
this is set to ``as_needed``, indicating that WA will only reboot the device if
|
||||
it becomes unresponsive. Please see ``reboot_policy`` documentation in
|
||||
:ref:`configuration-specification` for more details.
|
||||
|
||||
Execution Order
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
``execution_order`` defines the order in which WA will execute workloads.
|
||||
``by_iteration`` (set by default) will execute the first iteration of each spec
|
||||
first, followed by the second iteration of each spec (that defines more than one
|
||||
iteration) and so forth. The alternative will loop through all iterations for
|
||||
the first first spec first, then move on to second spec, etc. Again, please see
|
||||
:ref:`configuration-specification` for more details.
|
||||
|
||||
|
||||
Adding a new target interface
|
||||
-----------------------------
|
||||
|
||||
If you are working with a particularly unusual device (e.g. a early stage
|
||||
development board) or need to be able to handle some quirk of your Android
|
||||
build, configuration available in ``generic_android`` interface may not be
|
||||
enough for you. In that case, you may need to write a custom interface for your
|
||||
device. A device interface is an ``Extension`` (a plug-in) type in WA and is
|
||||
implemented similar to other extensions (such as workloads or instruments).
|
||||
Pleaser refer to the
|
||||
:ref:`adding a custom target <adding-custom-target-example>` section for
|
||||
information on how this may be done.
|
@ -1,159 +0,0 @@
|
||||
.. _revent_files_creation:
|
||||
|
||||
Automating GUI Interactions With Revent
|
||||
=======================================
|
||||
|
||||
Overview and Usage
|
||||
------------------
|
||||
|
||||
The revent utility can be used to record and later play back a sequence of user
|
||||
input events, such as key presses and touch screen taps. This is an alternative
|
||||
to Android UI Automator for providing automation for workloads.
|
||||
|
||||
Using revent with workloads
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Some workloads (pretty much all games) rely on recorded revents for their
|
||||
execution. ReventWorkloads require between 1 and 4 revent files to be ran.
|
||||
There is one mandatory recording, ``run``, for performing the actual execution of
|
||||
the workload and the remaining stages are optional. ``setup`` can be used to perform
|
||||
the initial setup (navigating menus, selecting game modes, etc).
|
||||
``extract_results`` can be used to perform any actions after the main stage of
|
||||
the workload for example to navigate a results or summary screen of the app. And
|
||||
finally ``teardown`` can be used to perform any final actions for example
|
||||
exiting the app.
|
||||
|
||||
Because revents are very device-specific\ [*]_, these files would need to
|
||||
be recorded for each device.
|
||||
|
||||
The files must be called ``<device name>.(setup|run|extract_results|teardown).revent``,
|
||||
where ``<device name>`` is the name of your device (as defined by the model
|
||||
name of your device which can be retrieved with
|
||||
``adb shell getprop ro.product.model`` or by the ``name`` attribute of your
|
||||
customized device class).
|
||||
|
||||
WA will look for these files in two places:
|
||||
``<installdir>/wa/workloads/<workload name>/revent_files`` and
|
||||
``$WA_USER_DIRECTORY/dependencies/<workload name>``. The
|
||||
first location is primarily intended for revent files that come with WA (and if
|
||||
you did a system-wide install, you'll need sudo to add files there), so it's
|
||||
probably easier to use the second location for the files you record. Also, if
|
||||
revent files for a workload exist in both locations, the files under
|
||||
``$WA_USER_DIRECTORY/dependencies`` will be used in favour
|
||||
of those installed with WA.
|
||||
|
||||
.. [*] It's not just about screen resolution -- the event codes may be different
|
||||
even if devices use the same screen.
|
||||
|
||||
.. _revent-recording:
|
||||
|
||||
Recording
|
||||
^^^^^^^^^
|
||||
|
||||
WA features a ``record`` command that will automatically deploy and start revent
|
||||
on the target device.
|
||||
|
||||
If you want to simply record a single recording on the device then the following
|
||||
command can be used which will save the recording in the current directory::
|
||||
|
||||
wa record
|
||||
|
||||
There is one mandatory stage called 'run' and 3 optional stages: 'setup',
|
||||
'extract_results' and 'teardown' which are used for playback of a workload.
|
||||
The different stages are distinguished by the suffix in the recording file path.
|
||||
In order to facilitate in creating these recordings you can specify ``--setup``,
|
||||
``--extract-results``, ``--teardown`` or ``--all`` to indicate which stages you
|
||||
would like to create recordings for and the appropriate file name will be generated.
|
||||
|
||||
You can also directly specify a workload to create recordings for and WA will
|
||||
walk you through the relevant steps. For example if we waned to create
|
||||
recordings for the Angrybirds Rio workload we can specify the ``workload`` flag
|
||||
with ``-w``. And in this case WA can be used to automatically deploy and launch
|
||||
the workload and record ``setup`` (``-s``) , ``run`` (``-r``) and ``teardown``
|
||||
(``-t``) stages for the workload. In order to do this we would use the following
|
||||
command with an example output shown below::
|
||||
|
||||
wa record -srt -w angrybirds_rio
|
||||
|
||||
::
|
||||
|
||||
INFO Setting up target
|
||||
INFO Deploying angrybirds_rio
|
||||
INFO Press Enter when you are ready to record SETUP...
|
||||
[Pressed Enter]
|
||||
INFO Press Enter when you have finished recording SETUP...
|
||||
[Pressed Enter]
|
||||
INFO Pulling '<device_model>setup.revent' from device
|
||||
INFO Press Enter when you are ready to record RUN...
|
||||
[Pressed Enter]
|
||||
INFO Press Enter when you have finished recording RUN...
|
||||
[Pressed Enter]
|
||||
INFO Pulling '<device_model>.run.revent' from device
|
||||
INFO Press Enter when you are ready to record TEARDOWN...
|
||||
[Pressed Enter]
|
||||
INFO Press Enter when you have finished recording TEARDOWN...
|
||||
[Pressed Enter]
|
||||
INFO Pulling '<device_model>.teardown.revent' from device
|
||||
INFO Tearing down angrybirds_rio
|
||||
INFO Recording(s) are available at: '$WA_USER_DIRECTORY/dependencies/angrybirds_rio/revent_files'
|
||||
|
||||
Once you have made your desired recordings, you can either manually playback
|
||||
individual recordings using the :ref:`replay <replay-command>` command or, with
|
||||
the recordings in the appropriate dependencies location, simply run the workload
|
||||
using the :ref:`run <run-command>` command and then all the available recordings will be
|
||||
played back automatically.
|
||||
|
||||
For more information on available arguments please see the :ref:`Record <record_command>`
|
||||
command.
|
||||
|
||||
.. note:: By default revent recordings are not portable across devices and
|
||||
therefore will require recording for each new device you wish to use the
|
||||
workload on. Alternatively a "gamepad" recording mode is also supported.
|
||||
This mode requires a gamepad to be connected to the device when recording
|
||||
but the recordings produced in this mode should be portable across devices.
|
||||
|
||||
.. _revent_replaying:
|
||||
|
||||
Replaying
|
||||
^^^^^^^^^
|
||||
|
||||
If you want to replay a single recorded file, you can use ``wa replay``
|
||||
providing it with the file you want to replay. An example of the command output
|
||||
is shown below::
|
||||
|
||||
wa replay my_recording.revent
|
||||
INFO Setting up target
|
||||
INFO Pushing file to target
|
||||
INFO Starting replay
|
||||
INFO Finished replay
|
||||
|
||||
If you are using a device that supports android you can optionally specify a
|
||||
package name to launch before replaying the recording.
|
||||
|
||||
If you have recorded the required files for your workload and have placed the in
|
||||
the appropriate location (or specified the workload during recording) then you
|
||||
can simply run the relevant workload and your recordings will be replayed at the
|
||||
appropriate times automatically.
|
||||
|
||||
For more information run please read :ref:`replay-command`
|
||||
|
||||
Revent vs UiAutomator
|
||||
----------------------
|
||||
|
||||
In general, Android UI Automator is the preferred way of automating user input
|
||||
for Android workloads because, unlike revent, UI Automator does not depend on a
|
||||
particular screen resolution, and so is more portable across different devices.
|
||||
It also gives better control and can potentially be faster for doing UI
|
||||
manipulations, as input events are scripted based on the available UI elements,
|
||||
rather than generated by human input.
|
||||
|
||||
On the other hand, revent can be used to manipulate pretty much any workload,
|
||||
where as UI Automator only works for Android UI elements (such as text boxes or
|
||||
radio buttons), which makes the latter useless for things like games. Recording
|
||||
revent sequence is also faster than writing automation code (on the other hand,
|
||||
one would need maintain a different revent log for each screen resolution).
|
||||
|
||||
.. note:: For ChromeOS targets, UI Automator can only be used with android
|
||||
applications and not the ChomeOS host applications themselves.
|
||||
|
||||
|
@ -1,330 +0,0 @@
|
||||
.. _installation:
|
||||
|
||||
************
|
||||
Installation
|
||||
************
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 2
|
||||
:local:
|
||||
|
||||
|
||||
.. module:: wa
|
||||
|
||||
This page describes the 3 methods of installing Workload Automation 3. The first
|
||||
option is to use :ref:`pip` which will install the latest release of WA, the
|
||||
latest development version from :ref:`github <github>` or via a
|
||||
:ref:`dockerfile`.
|
||||
|
||||
|
||||
Prerequisites
|
||||
=============
|
||||
|
||||
Operating System
|
||||
----------------
|
||||
|
||||
WA runs on a native Linux install. It has been tested on recent Ubuntu releases,
|
||||
but other recent Linux distributions should work as well. It should run on
|
||||
either 32-bit or 64-bit OS, provided the correct version of dependencies (see
|
||||
below) are installed. Officially, **other environments are not supported**.
|
||||
WA has been known to run on Linux Virtual machines and in Cygwin environments,
|
||||
though additional configuration may be required in both cases (known issues
|
||||
include makings sure USB/serial connections are passed to the VM, and wrong
|
||||
python/pip binaries being picked up in Cygwin). WA *should* work on other
|
||||
Unix-based systems such as BSD or Mac OS X, but it has not been tested
|
||||
in those environments. WA *does not* run on Windows (though it should be
|
||||
possible to get limited functionality with minimal porting effort).
|
||||
|
||||
.. Note:: If you plan to run Workload Automation on Linux devices only,
|
||||
SSH is required, and Android SDK is optional if you wish
|
||||
to run WA on Android devices at a later time. Then follow the
|
||||
steps to install the necessary python packages to set up WA.
|
||||
|
||||
However, you would be starting off with a limited number of
|
||||
workloads that will run on Linux devices.
|
||||
|
||||
Android SDK
|
||||
-----------
|
||||
|
||||
To interact with Android devices you will need to have the Android SDK
|
||||
with at least one platform installed.
|
||||
To install it, download the ADT Bundle from here_. Extract it
|
||||
and add ``<path_to_android_sdk>/sdk/platform-tools`` and ``<path_to_android_sdk>/sdk/tools``
|
||||
to your ``PATH``. To test that you've installed it properly, run ``adb
|
||||
version``. The output should be similar to this::
|
||||
|
||||
adb version
|
||||
Android Debug Bridge version 1.0.39
|
||||
|
||||
.. _here: https://developer.android.com/sdk/index.html
|
||||
|
||||
Once that is working, run ::
|
||||
|
||||
android update sdk
|
||||
|
||||
This will open up a dialog box listing available android platforms and
|
||||
corresponding API levels, e.g. ``Android 4.3 (API 18)``. For WA, you will need
|
||||
at least API level 18 (i.e. Android 4.3), though installing the latest is
|
||||
usually the best bet.
|
||||
|
||||
Optionally (but recommended), you should also set ``ANDROID_HOME`` to point to
|
||||
the install location of the SDK (i.e. ``<path_to_android_sdk>/sdk``).
|
||||
|
||||
|
||||
Python
|
||||
------
|
||||
|
||||
Workload Automation 3 currently supports Python 3.5+
|
||||
|
||||
.. note:: If your system's default python version is still Python 2, please
|
||||
replace the commands listed here with their Python3 equivalent
|
||||
(e.g. python3, pip3 etc.)
|
||||
|
||||
.. _pip:
|
||||
|
||||
pip
|
||||
---
|
||||
|
||||
pip is the recommended package manager for Python. It is not part of standard
|
||||
Python distribution and would need to be installed separately. On Ubuntu and
|
||||
similar distributions, this may be done with APT::
|
||||
|
||||
sudo apt-get install python-pip
|
||||
|
||||
.. note:: Some versions of pip (in particluar v1.5.4 which comes with Ubuntu
|
||||
14.04) are know to set the wrong permissions when installing
|
||||
packages, resulting in WA failing to import them. To avoid this it
|
||||
is recommended that you update pip and setuptools before proceeding
|
||||
with installation::
|
||||
|
||||
sudo -H pip install --upgrade pip
|
||||
sudo -H pip install --upgrade setuptools
|
||||
|
||||
If you do run into this issue after already installing some packages,
|
||||
you can resolve it by running ::
|
||||
|
||||
sudo chmod -R a+r /usr/local/lib/python3.X/dist-packages
|
||||
sudo find /usr/local/lib/python3.X/dist-packages -type d -exec chmod a+x {} \;
|
||||
|
||||
(The paths above will work for Ubuntu; they may need to be adjusted
|
||||
for other distros).
|
||||
|
||||
|
||||
Python Packages
|
||||
---------------
|
||||
|
||||
.. note:: pip should automatically download and install missing dependencies,
|
||||
so if you're using pip, you can skip this section. However some
|
||||
packages the will be installed have C plugins and will require Python
|
||||
development headers to install. You can get those by installing
|
||||
``python-dev`` package in apt on Ubuntu (or the equivalent for your
|
||||
distribution).
|
||||
|
||||
Workload Automation 3 depends on the following additional libraries:
|
||||
|
||||
* pexpect
|
||||
* docutils
|
||||
* pySerial
|
||||
* pyYAML
|
||||
* python-dateutil
|
||||
* louie
|
||||
* pandas
|
||||
* devlib
|
||||
* wrapt
|
||||
* requests
|
||||
* colorama
|
||||
* future
|
||||
|
||||
You can install these with pip::
|
||||
|
||||
sudo -H pip install pexpect
|
||||
sudo -H pip install pyserial
|
||||
sudo -H pip install pyyaml
|
||||
sudo -H pip install docutils
|
||||
sudo -H pip install python-dateutil
|
||||
sudo -H pip install devlib
|
||||
sudo -H pip install pandas
|
||||
sudo -H pip install louie
|
||||
sudo -H pip install wrapt
|
||||
sudo -H pip install requests
|
||||
sudo -H pip install colorama
|
||||
sudo -H pip install future
|
||||
|
||||
Some of these may also be available in your distro's repositories, e.g. ::
|
||||
|
||||
sudo apt-get install python-serial
|
||||
|
||||
Distro package versions tend to be older, so pip installation is recommended.
|
||||
However, pip will always download and try to build the source, so in some
|
||||
situations distro binaries may provide an easier fall back. Please also note that
|
||||
distro package names may differ from pip packages.
|
||||
|
||||
|
||||
Optional Python Packages
|
||||
------------------------
|
||||
|
||||
.. note:: Unlike the mandatory dependencies in the previous section,
|
||||
pip will *not* install these automatically, so you will have
|
||||
to explicitly install them if/when you need them.
|
||||
|
||||
In addition to the mandatory packages listed in the previous sections, some WA
|
||||
functionality (e.g. certain plugins) may have additional dependencies. Since
|
||||
they are not necessary to be able to use most of WA, they are not made mandatory
|
||||
to simplify initial WA installation. If you try to use an plugin that has
|
||||
additional, unmet dependencies, WA will tell you before starting the run, and
|
||||
you can install it then. They are listed here for those that would rather
|
||||
install them upfront (e.g. if you're planning to use WA to an environment that
|
||||
may not always have Internet access).
|
||||
|
||||
* nose
|
||||
* mock
|
||||
* daqpower
|
||||
* sphinx
|
||||
* sphinx_rtd_theme
|
||||
* psycopg2-binary
|
||||
|
||||
|
||||
|
||||
.. _github:
|
||||
|
||||
Installing
|
||||
==========
|
||||
|
||||
Installing the latest released version from PyPI (Python Package Index)::
|
||||
|
||||
sudo -H pip install wlauto
|
||||
|
||||
This will install WA along with its mandatory dependencies. If you would like to
|
||||
install all optional dependencies at the same time, do the following instead::
|
||||
|
||||
sudo -H pip install wlauto[all]
|
||||
|
||||
|
||||
Alternatively, you can also install the latest development version from GitHub
|
||||
(you will need git installed for this to work)::
|
||||
|
||||
git clone git@github.com:ARM-software/workload-automation.git workload-automation
|
||||
cd workload-automation
|
||||
sudo -H python setup.py install
|
||||
|
||||
.. note:: Please note that if using pip to install from github this will most
|
||||
likely result in an older and incompatible version of devlib being
|
||||
installed alongside WA. If you wish to use pip please also manually
|
||||
install the latest version of
|
||||
`devlib <https://github.com/ARM-software/devlib>`_.
|
||||
|
||||
.. note:: Please note that while a `requirements.txt` is included, this is
|
||||
designed to be a reference of known working packages rather to than to
|
||||
be used as part of a standard installation. The version restrictions
|
||||
in place as part of `setup.py` should automatically ensure the correct
|
||||
packages are install however if encountering issues please try
|
||||
updating/downgrading to the package versions list within.
|
||||
|
||||
|
||||
If the above succeeds, try ::
|
||||
|
||||
wa --version
|
||||
|
||||
Hopefully, this should output something along the lines of ::
|
||||
|
||||
"Workload Automation version $version".
|
||||
|
||||
.. _dockerfile:
|
||||
|
||||
Dockerfile
|
||||
============
|
||||
|
||||
As an alternative we also provide a Dockerfile that will create an image called
|
||||
wadocker, and is preconfigured to run WA and devlib. Please note that the build
|
||||
process automatically accepts the licenses for the Android SDK, so please be
|
||||
sure that you are willing to accept these prior to building and running the
|
||||
image in a container.
|
||||
|
||||
The Dockerfile can be found in the "extras" directory or online at
|
||||
`<https://github.com/ARM-software /workload- automation/blob/next/extras/Dockerfile>`_
|
||||
which contains additional information about how to build and to use the file.
|
||||
|
||||
|
||||
(Optional) Post Installation
|
||||
============================
|
||||
|
||||
Some WA plugins have additional dependencies that need to be
|
||||
satisfied before they can be used. Not all of these can be provided with WA and
|
||||
so will need to be supplied by the user. They should be placed into
|
||||
``~/.workload_automation/dependencies/<extension name>`` so that WA can find
|
||||
them (you may need to create the directory if it doesn't already exist). You
|
||||
only need to provide the dependencies for workloads you want to use.
|
||||
|
||||
.. _apk_files:
|
||||
|
||||
APK Files
|
||||
---------
|
||||
|
||||
APKs are application packages used by Android. These are necessary to install on
|
||||
a device when running an :ref:`ApkWorkload <apk-workload>` or derivative. Please
|
||||
see the workload description using the :ref:`show <show-command>` command to see
|
||||
which version of the apk the UI automation has been tested with and place the
|
||||
apk in the corresponding workloads dependency directory. Automation may also work
|
||||
with other versions (especially if it's only a minor or revision difference --
|
||||
major version differences are more likely to contain incompatible UI changes)
|
||||
but this has not been tested. As a general rule we do not guarantee support for
|
||||
the latest version of an app and they are updated on an as needed basis. We do
|
||||
however attempt to support backwards compatibility with previous major releases
|
||||
however beyond this support will likely be dropped.
|
||||
|
||||
|
||||
Gaming Workloads
|
||||
----------------
|
||||
|
||||
Some workloads (games, demos, etc) cannot be automated using Android's
|
||||
UIAutomator framework because they render the entire UI inside a single OpenGL
|
||||
surface. For these, an interaction session needs to be recorded so that it can
|
||||
be played back by WA. These recordings are device-specific, so they would need
|
||||
to be done for each device you're planning to use. The tool for doing is
|
||||
``revent`` and it is packaged with WA. You can find instructions on how to use
|
||||
it in the :ref:`How To <revent_files_creation>` section.
|
||||
|
||||
This is the list of workloads that rely on such recordings:
|
||||
|
||||
+------------------+
|
||||
| angrybirds_rio |
|
||||
+------------------+
|
||||
| templerun2 |
|
||||
+------------------+
|
||||
|
||||
|
||||
+------------------+
|
||||
|
||||
.. _assets_repository:
|
||||
|
||||
Maintaining Centralized Assets Repository
|
||||
-----------------------------------------
|
||||
|
||||
If there are multiple users within an organization that may need to deploy
|
||||
assets for WA plugins, that organization may wish to maintain a centralized
|
||||
repository of assets that individual WA installs will be able to automatically
|
||||
retrieve asset files from as they are needed. This repository can be any
|
||||
directory on a network filer that mirrors the structure of
|
||||
``~/.workload_automation/dependencies``, i.e. has a subdirectories named after
|
||||
the plugins which assets they contain. Individual WA installs can then set
|
||||
``remote_assets_path`` setting in their config to point to the local mount of
|
||||
that location.
|
||||
|
||||
|
||||
(Optional) Uninstalling
|
||||
=======================
|
||||
|
||||
If you have installed Workload Automation via ``pip`` and wish to remove it, run this command to
|
||||
uninstall it::
|
||||
|
||||
sudo -H pip uninstall wa
|
||||
|
||||
.. Note:: This will *not* remove any user configuration (e.g. the ~/.workload_automation directory)
|
||||
|
||||
|
||||
(Optional) Upgrading
|
||||
====================
|
||||
|
||||
To upgrade Workload Automation to the latest version via ``pip``, run::
|
||||
|
||||
sudo -H pip install --upgrade --no-deps wa
|
@ -1,531 +0,0 @@
|
||||
.. _user-guide:
|
||||
|
||||
**********
|
||||
User Guide
|
||||
**********
|
||||
|
||||
This guide will show you how to quickly start running workloads using
|
||||
Workload Automation 3.
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 2
|
||||
:local:
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
|
||||
Install
|
||||
=======
|
||||
|
||||
.. note:: This is a quick summary. For more detailed instructions, please see
|
||||
the :ref:`installation` section.
|
||||
|
||||
Make sure you have Python 3.5+ and a recent Android SDK with API
|
||||
level 18 or above installed on your system. A complete install of the Android
|
||||
SDK is required, as WA uses a number of its utilities, not just adb. For the
|
||||
SDK, make sure that either ``ANDROID_HOME`` environment variable is set, or that
|
||||
``adb`` is in your ``PATH``.
|
||||
|
||||
.. Note:: If you plan to run Workload Automation on Linux devices only, SSH is required,
|
||||
and Android SDK is optional if you wish to run WA on Android devices at a
|
||||
later time.
|
||||
|
||||
However, you would be starting off with a limited number of workloads that
|
||||
will run on Linux devices.
|
||||
|
||||
In addition to the base Python install, you will also need to have ``pip``
|
||||
(Python's package manager) installed as well. This is usually a separate package.
|
||||
|
||||
Once you have those, you can install WA with::
|
||||
|
||||
sudo -H pip install wlauto
|
||||
|
||||
This will install Workload Automation on your system, along with its mandatory
|
||||
dependencies.
|
||||
|
||||
Alternatively we provide a Dockerfile that which can be used to create a Docker
|
||||
image for running WA along with its dependencies. More information can be found
|
||||
in the :ref:`Installation <dockerfile>` section.
|
||||
|
||||
(Optional) Verify installation
|
||||
-------------------------------
|
||||
|
||||
Once the tarball has been installed, try executing ::
|
||||
|
||||
wa -h
|
||||
|
||||
You should see a help message outlining available subcommands.
|
||||
|
||||
|
||||
(Optional) APK files
|
||||
--------------------
|
||||
|
||||
A large number of WA workloads are installed as APK files. These cannot be
|
||||
distributed with WA and so you will need to obtain those separately.
|
||||
|
||||
For more details, please see the :ref:`installation <apk_files>` section.
|
||||
|
||||
|
||||
List Command
|
||||
============
|
||||
|
||||
In order to get started with using WA we first we need to find
|
||||
out what is available to use. In order to do this we can use the :ref:`list <list-command>`
|
||||
command followed by the type of plugin that you wish to see.
|
||||
|
||||
For example to see what workloads are available along with a short description
|
||||
of each you run::
|
||||
|
||||
wa list workloads
|
||||
|
||||
Which will give an output in the format of:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
adobereader: The Adobe Reader workflow carries out the following typical
|
||||
productivity tasks.
|
||||
androbench: Executes storage performance benchmarks
|
||||
angrybirds_rio: Angry Birds Rio game.
|
||||
antutu: Executes Antutu 3D, UX, CPU and Memory tests
|
||||
applaunch: This workload launches and measures the launch time of applications
|
||||
for supporting workloads.
|
||||
benchmarkpi: Measures the time the target device takes to run and complete the
|
||||
Pi calculation algorithm.
|
||||
dhrystone: Runs the Dhrystone benchmark.
|
||||
exoplayer: Android ExoPlayer
|
||||
geekbench: Geekbench provides a comprehensive set of benchmarks engineered to
|
||||
quickly and accurately measure
|
||||
processor and memory performance.
|
||||
#..
|
||||
|
||||
The same syntax can be used to display ``commands``,
|
||||
``energy_instrument_backends``, ``instruments``, ``output_processors``,
|
||||
``resource_getters``, ``targets``. Once you have found the plugin you are
|
||||
looking for you can use the :ref:`show <show-command>` command to display more
|
||||
detailed information. Alternatively please see the
|
||||
:ref:`Plugin Reference <plugin-reference>` for an online version.
|
||||
|
||||
Show Command
|
||||
============
|
||||
|
||||
If you want to learn more information about a particular plugin, such as the
|
||||
parameters it supports, you can use the "show" command::
|
||||
|
||||
wa show dhrystone
|
||||
|
||||
If you have ``pandoc`` installed on your system, this will display man
|
||||
page-like description of the plugin, and the parameters it supports. If you do
|
||||
not have ``pandoc``, you will instead see the same information as raw
|
||||
restructured text.
|
||||
|
||||
Configure Your Device
|
||||
=====================
|
||||
|
||||
There are multiple options for configuring your device depending on your
|
||||
particular use case.
|
||||
|
||||
You can either add your configuration to the default configuration file
|
||||
``config.yaml``, under the ``$WA_USER_DIRECTORY/`` directory or you can specify it in
|
||||
the ``config`` section of your agenda directly.
|
||||
|
||||
Alternatively if you are using multiple devices, you may want to create separate
|
||||
config files for each of your devices you will be using. This allows you to
|
||||
specify which device you would like to use for a particular run and pass it as
|
||||
an argument when invoking with the ``-c`` flag.
|
||||
::
|
||||
|
||||
wa run dhrystone -c my_device.yaml
|
||||
|
||||
By default WA will use the “most specific” configuration available for example
|
||||
any configuration specified inside an agenda will override a passed
|
||||
configuration file which will in turn overwrite the default configuration file.
|
||||
|
||||
.. note:: For a more information about configuring your
|
||||
device please see :ref:`Setting Up A Device <setting-up-a-device>`.
|
||||
|
||||
Android
|
||||
-------
|
||||
|
||||
By default, the device WA will use is set to 'generic_android'. WA is configured
|
||||
to work with a generic Android device through ``adb``. If you only have one
|
||||
device listed when you execute ``adb devices``, and your device has a standard
|
||||
Android configuration, then no extra configuration is required.
|
||||
|
||||
However, if your device is connected via network, you will have to manually
|
||||
execute ``adb connect <device ip>`` (or specify this in your
|
||||
:ref:`agenda <agenda>`) so that it appears in the device listing.
|
||||
|
||||
If you have multiple devices connected, you will need to tell WA which one you
|
||||
want it to use. You can do that by setting ``device`` in the device_config section.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# ...
|
||||
|
||||
device_config:
|
||||
device: 'abcdef0123456789'
|
||||
# ...
|
||||
# ...
|
||||
|
||||
Linux
|
||||
-----
|
||||
|
||||
First, set the device to 'generic_linux'
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# ...
|
||||
device: 'generic_linux'
|
||||
# ...
|
||||
|
||||
Find the device_config section and add these parameters
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# ...
|
||||
|
||||
device_config:
|
||||
host: '192.168.0.100'
|
||||
username: 'root'
|
||||
password: 'password'
|
||||
# ...
|
||||
# ...
|
||||
|
||||
Parameters:
|
||||
|
||||
- Host is the IP of your target Linux device
|
||||
- Username is the user for the device
|
||||
- Password is the password for the device
|
||||
|
||||
Enabling and Disabling Augmentations
|
||||
---------------------------------------
|
||||
|
||||
Augmentations are the collective name for "instruments" and "output
|
||||
processors" in WA3.
|
||||
|
||||
Some augmentations are enabled by default after your initial install of WA,
|
||||
which are specified in the ``config.yaml`` file located in your
|
||||
``WA_USER_DIRECTORY``, typically ``~/.workload_autoamation``.
|
||||
|
||||
.. note:: Some Linux devices may not be able to run certain augmentations
|
||||
provided by WA (e.g. cpufreq is disabled or unsupported by the
|
||||
device).
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# ...
|
||||
|
||||
augmentations:
|
||||
# Records the time it took to run the workload
|
||||
- execution_time
|
||||
|
||||
# Collects /proc/interrupts before and after execution and does a diff.
|
||||
- interrupts
|
||||
|
||||
# Collects the contents of/sys/devices/system/cpu before and after
|
||||
# execution and does a diff.
|
||||
- cpufreq
|
||||
|
||||
# Generate a txt file containing general status information about
|
||||
# which runs failed and which were successful.
|
||||
- status
|
||||
|
||||
# ...
|
||||
|
||||
If you only wanted to keep the 'execution_time' instrument enabled, you can comment out
|
||||
the rest of the list augmentations to disable them.
|
||||
|
||||
This should give you basic functionality. If you are working with a development
|
||||
board or you need some advanced functionality additional configuration may be required.
|
||||
Please see the :ref:`device setup <setting-up-a-device>` section for more details.
|
||||
|
||||
.. note:: In WA2 'Instrumentation' and 'Result Processors' were divided up into their
|
||||
own sections in the agenda. In WA3 they now fall under the same category of
|
||||
'augmentations'. For compatibility the old naming structure is still valid
|
||||
however using the new entry names is recommended.
|
||||
|
||||
|
||||
|
||||
Running Your First Workload
|
||||
===========================
|
||||
|
||||
The simplest way to run a workload is to specify it as a parameter to WA ``run``
|
||||
:ref:`run <run-command>` sub-command::
|
||||
|
||||
wa run dhrystone
|
||||
|
||||
You will see INFO output from WA as it executes each stage of the run. A
|
||||
completed run output should look something like this::
|
||||
|
||||
INFO Creating output directory.
|
||||
INFO Initializing run
|
||||
INFO Connecting to target
|
||||
INFO Setting up target
|
||||
INFO Initializing execution context
|
||||
INFO Generating jobs
|
||||
INFO Loading job wk1 (dhrystone) [1]
|
||||
INFO Installing instruments
|
||||
INFO Installing output processors
|
||||
INFO Starting run
|
||||
INFO Initializing run
|
||||
INFO Initializing job wk1 (dhrystone) [1]
|
||||
INFO Running job wk1
|
||||
INFO Configuring augmentations
|
||||
INFO Configuring target for job wk1 (dhrystone) [1]
|
||||
INFO Setting up job wk1 (dhrystone) [1]
|
||||
INFO Running job wk1 (dhrystone) [1]
|
||||
INFO Tearing down job wk1 (dhrystone) [1]
|
||||
INFO Completing job wk1
|
||||
INFO Job completed with status OK
|
||||
INFO Finalizing run
|
||||
INFO Finalizing job wk1 (dhrystone) [1]
|
||||
INFO Done.
|
||||
INFO Run duration: 9 seconds
|
||||
INFO Ran a total of 1 iterations: 1 OK
|
||||
INFO Results can be found in wa_output
|
||||
|
||||
|
||||
Once the run has completed, you will find a directory called ``wa_output``
|
||||
in the location where you have invoked ``wa run``. Within this directory,
|
||||
you will find a "results.csv" file which will contain results obtained for
|
||||
dhrystone, as well as a "run.log" file containing detailed log output for
|
||||
the run. You will also find a sub-directory called 'wk1-dhrystone-1' that
|
||||
contains the results for that iteration. Finally, you will find various additional
|
||||
information in the ``wa_output/__meta`` subdirectory for example information
|
||||
extracted from the target and a copy of the agenda file. The contents of
|
||||
iteration-specific subdirectories will vary from workload to workload, and,
|
||||
along with the contents of the main output directory, will depend on the
|
||||
augmentations that were enabled for that run.
|
||||
|
||||
The ``run`` sub-command takes a number of options that control its behaviour,
|
||||
you can view those by executing ``wa run -h``. Please see the :ref:`invocation`
|
||||
section for details.
|
||||
|
||||
|
||||
Create an Agenda
|
||||
================
|
||||
|
||||
Simply running a single workload is normally of little use. Typically, you would
|
||||
want to specify several workloads, setup the device state and, possibly, enable
|
||||
additional augmentations. To do this, you would need to create an "agenda" for
|
||||
the run that outlines everything you want WA to do.
|
||||
|
||||
Agendas are written using YAML_ markup language. A simple agenda might look
|
||||
like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations:
|
||||
- ~execution_time
|
||||
- targz
|
||||
iterations: 2
|
||||
workloads:
|
||||
- memcpy
|
||||
- name: dhrystone
|
||||
params:
|
||||
mloops: 5
|
||||
threads: 1
|
||||
|
||||
This agenda:
|
||||
|
||||
- Specifies two workloads: memcpy and dhrystone.
|
||||
- Specifies that dhrystone should run in one thread and execute five million loops.
|
||||
- Specifies that each of the two workloads should be run twice.
|
||||
- Enables the targz output processor, in addition to the output processors enabled in
|
||||
the config.yaml.
|
||||
- Disables execution_time instrument, if it is enabled in the config.yaml
|
||||
|
||||
An agenda can be created using WA's ``create`` :ref:`command <using-the-create-command>`
|
||||
or in a text editor and saved as a YAML file.
|
||||
|
||||
For more options please see the :ref:`agenda` documentation.
|
||||
|
||||
.. _YAML: http://en.wikipedia.org/wiki/YAML
|
||||
|
||||
.. _using-the-create-command:
|
||||
|
||||
Using the Create Command
|
||||
-------------------------
|
||||
The easiest way to create an agenda is to use the 'create' command. For more
|
||||
in-depth information please see the :ref:`Create Command <create-command>` documentation.
|
||||
|
||||
In order to populate the agenda with relevant information you can supply all of
|
||||
the plugins you wish to use as arguments to the command, for example if we want
|
||||
to create an agenda file for running ``dhrystone`` on a `generic_android` device and we
|
||||
want to enable the ``execution_time`` and ``trace-cmd`` instruments and display the
|
||||
metrics using the ``csv`` output processor. We would use the following command::
|
||||
|
||||
wa create agenda generic_android dhrystone execution_time trace-cmd csv -o my_agenda.yaml
|
||||
|
||||
This will produce a ``my_agenda.yaml`` file containing all the relevant
|
||||
configuration for the specified plugins along with their default values as shown
|
||||
below:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations:
|
||||
- execution_time
|
||||
- trace-cmd
|
||||
- csv
|
||||
iterations: 1
|
||||
device: generic_android
|
||||
device_config:
|
||||
adb_server: null
|
||||
adb_port: null
|
||||
big_core: null
|
||||
core_clusters: null
|
||||
core_names: null
|
||||
device: null
|
||||
disable_selinux: true
|
||||
executables_directory: null
|
||||
load_default_modules: true
|
||||
logcat_poll_period: null
|
||||
model: null
|
||||
modules: null
|
||||
package_data_directory: /data/data
|
||||
shell_prompt: !<tag:wa:regex> '8:^.*(shell|root)@.*:/\S* [#$] '
|
||||
working_directory: null
|
||||
execution_time: {}
|
||||
trace-cmd:
|
||||
buffer_size: null
|
||||
buffer_size_step: 1000
|
||||
events:
|
||||
- sched*
|
||||
- irq*
|
||||
- power*
|
||||
- thermal*
|
||||
functions: null
|
||||
no_install: false
|
||||
report: true
|
||||
report_on_target: false
|
||||
mode: write-to-memory
|
||||
csv:
|
||||
extra_columns: null
|
||||
use_all_classifiers: false
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
params:
|
||||
cleanup_assets: true
|
||||
delay: 0
|
||||
duration: 0
|
||||
mloops: 0
|
||||
taskset_mask: 0
|
||||
threads: 4
|
||||
|
||||
|
||||
Run Command
|
||||
============
|
||||
These examples show some useful options that can be used with WA's ``run`` command.
|
||||
|
||||
Once we have created an agenda to use it with WA we can pass it as a argument to
|
||||
the run command e.g.::
|
||||
|
||||
wa run <path/to/agenda> (e.g. wa run ~/myagenda.yaml)
|
||||
|
||||
By default WA will use the "wa_output" directory to stores its output however to
|
||||
redirect the output to a different directory we can use::
|
||||
|
||||
wa run dhrystone -d my_output_directory
|
||||
|
||||
We can also tell WA to use additional config files by supplying it with
|
||||
the ``-c`` argument. One use case for passing additional config files is if you
|
||||
have multiple devices you wish test with WA, you can store the relevant device
|
||||
configuration in individual config files and then pass the file corresponding to
|
||||
the device you wish to use for that particular test.
|
||||
|
||||
.. note:: As previously mentioned, any more specific configuration present in
|
||||
the agenda file will overwrite the corresponding config parameters
|
||||
specified in the config file(s).
|
||||
|
||||
|
||||
::
|
||||
|
||||
wa run -c myconfig.yaml ~/myagenda.yaml
|
||||
|
||||
To use the same output directory but override the existing contents to
|
||||
store new dhrystone results we can specify the ``-f`` argument::
|
||||
|
||||
wa run -f dhrystone
|
||||
|
||||
To display verbose output while running memcpy::
|
||||
|
||||
wa run --verbose memcpy
|
||||
|
||||
|
||||
.. _output_directory:
|
||||
|
||||
Output
|
||||
======
|
||||
|
||||
The output directory will contain subdirectories for each job that was run,
|
||||
which will in turn contain the generated metrics and artifacts for each job.
|
||||
The directory will also contain a ``run.log`` file containing the complete log
|
||||
output for the run, and a ``__meta`` directory with the configuration and
|
||||
metadata for the run. Metrics are serialized inside ``result.json`` files inside
|
||||
each job's subdirectory. There may also be a ``__failed`` directory containing
|
||||
failed attempts for jobs that have been re-run.
|
||||
|
||||
Augmentations may add additional files at the run or job directory level. The
|
||||
default configuration has ``status`` and ``csv`` augmentations enabled which
|
||||
generate a ``status.txt`` containing status summary for the run and individual
|
||||
jobs, and a ``results.csv`` containing metrics from all jobs in a CSV table,
|
||||
respectively.
|
||||
|
||||
See :ref:`output_directory_structure` for more information.
|
||||
|
||||
In order to make it easier to access WA results from scripts, WA provides an API
|
||||
that parses the contents of the output directory:
|
||||
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> from wa import RunOutput
|
||||
>>> ro = RunOutput('./wa_output')
|
||||
>>> for job in ro.jobs:
|
||||
... if job.status != 'OK':
|
||||
... print('Job "{}" did not complete successfully: {}'.format(job, job.status))
|
||||
... continue
|
||||
... print('Job "{}":'.format(job))
|
||||
... for metric in job.metrics:
|
||||
... if metric.units:
|
||||
... print('\t{}: {} {}'.format(metric.name, metric.value, metric.units))
|
||||
... else:
|
||||
... print('\t{}: {}'.format(metric.name, metric.value))
|
||||
...
|
||||
Job "wk1-dhrystone-1":
|
||||
thread 0 score: 20833333
|
||||
thread 0 DMIPS: 11857
|
||||
thread 1 score: 24509804
|
||||
thread 1 DMIPS: 13950
|
||||
thread 2 score: 18011527
|
||||
thread 2 DMIPS: 10251
|
||||
thread 3 score: 26371308
|
||||
thread 3 DMIPS: 15009
|
||||
time: 1.001251 seconds
|
||||
total DMIPS: 51067
|
||||
total score: 89725972
|
||||
execution_time: 1.4834280014 seconds
|
||||
|
||||
See :ref:`output_processing_api` for details.
|
||||
|
||||
Uninstall
|
||||
=========
|
||||
|
||||
If you have installed Workload Automation via ``pip``, then run this command to
|
||||
uninstall it::
|
||||
|
||||
sudo pip uninstall wa
|
||||
|
||||
|
||||
.. Note:: It will *not* remove any user configuration (e.g. the ~/.workload_automation
|
||||
directory).
|
||||
|
||||
Upgrade
|
||||
=======
|
||||
|
||||
To upgrade Workload Automation to the latest version via ``pip``, run::
|
||||
|
||||
sudo pip install --upgrade --no-deps wa
|
||||
|
@ -1,20 +0,0 @@
|
||||
.. _user_reference:
|
||||
|
||||
***************
|
||||
User Reference
|
||||
***************
|
||||
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 2
|
||||
:local:
|
||||
|
||||
.. include:: user_information/user_reference/configuration.rst
|
||||
|
||||
-------------------
|
||||
|
||||
.. include:: user_information/user_reference/invocation.rst
|
||||
|
||||
-------------------
|
||||
|
||||
.. include:: user_information/user_reference/output_directory.rst
|
@ -1,227 +0,0 @@
|
||||
.. _agenda-reference:
|
||||
|
||||
Agenda
|
||||
------
|
||||
|
||||
|
||||
An agenda can be thought of as a way to define an experiment as it specifies
|
||||
what is to be done during a Workload Automation run. This includes which
|
||||
workloads will be run, with what configuration and which augmentations will be
|
||||
enabled, etc. Agenda syntax is designed to be both succinct and expressive and
|
||||
is written using YAML notation.
|
||||
|
||||
There are three valid top level entries which are:
|
||||
:ref:`config <config-agenda-entry>`, :ref:`workloads <workloads-agenda-entry>`,
|
||||
:ref:`sections <sections-agenda-entry>`.
|
||||
|
||||
An example agenda can be seen here:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config: # General configuration for the run
|
||||
user_directory: ~/.workload_automation/
|
||||
default_output_directory: 'wa_output'
|
||||
augmentations: # A list of all augmentations to be enabled and disabled.
|
||||
- trace-cmd
|
||||
- csv
|
||||
- ~dmesg # Disable the dmseg augmentation
|
||||
|
||||
iterations: 1 # How many iterations to run each workload by default
|
||||
|
||||
device: generic_android
|
||||
device_config:
|
||||
device: R32C801B8XY # The adb name of our device we want to run on
|
||||
disable_selinux: true
|
||||
load_default_modules: true
|
||||
package_data_directory: /data/data
|
||||
|
||||
trace-cmd: # Provide config for the trace-cmd augmentation.
|
||||
buffer_size_step: 1000
|
||||
events:
|
||||
- sched*
|
||||
- irq*
|
||||
- power*
|
||||
- thermal*
|
||||
no_install: false
|
||||
report: true
|
||||
report_on_target: false
|
||||
mode: write-to-disk
|
||||
csv: # Provide config for the csv augmentation
|
||||
use_all_classifiers: true
|
||||
|
||||
sections: # Configure what sections we want and their settings
|
||||
- id: LITTLES # Run workloads just on the LITTLE cores
|
||||
runtime_parameters: # Supply RT parameters to be used for this section
|
||||
num_little_cores: 4
|
||||
num_big_cores: 0
|
||||
|
||||
- id: BIGS # Run workloads just on the big cores
|
||||
runtime_parameters: # Supply RT parameters to be used for this section
|
||||
num_big_cores: 4
|
||||
num_little_cores: 0
|
||||
|
||||
workloads: # List which workloads should be run
|
||||
- name: benchmarkpi
|
||||
augmentations:
|
||||
- ~trace-cmd # Disable the trace-cmd instrument for this workload
|
||||
iterations: 2 # Override the global number of iteration for this workload
|
||||
params: # Specify workload parameters for this workload
|
||||
cleanup_assets: true
|
||||
exact_abi: false
|
||||
force_install: false
|
||||
install_timeout: 300
|
||||
markers_enabled: false
|
||||
prefer_host_package: true
|
||||
strict: false
|
||||
uninstall: false
|
||||
- dhrystone # Run the dhrystone workload with all default config
|
||||
|
||||
This agenda will result in a total of 6 jobs being executed on our Android
|
||||
device, 4 of which running the BenchmarkPi workload with its customized workload
|
||||
parameters and 2 running dhrystone with its default configuration. The first 3
|
||||
will be running on only the little cores and the latter running on the big
|
||||
cores. For all of the jobs executed the output will be processed by the ``csv``
|
||||
processor,(plus any additional processors enabled in the default config file),
|
||||
however trace data will only be collected for the dhrystone jobs.
|
||||
|
||||
.. _config-agenda-entry:
|
||||
|
||||
config
|
||||
^^^^^^^
|
||||
|
||||
This section is used to provide overall configuration for WA and its run. The
|
||||
``config`` section of an agenda will be merged with any other configuration
|
||||
files provided (including the default config file) and merged with the most
|
||||
specific configuration taking precedence (see
|
||||
:ref:`Config Merging <config-merging>` for more information. The only
|
||||
restriction is that ``run_name`` can only be specified in the config section
|
||||
of an agenda as this would not make sense to set as a default.
|
||||
|
||||
Within this section there are multiple distinct types of configuration that can
|
||||
be provided. However in addition to the options listed here all configuration
|
||||
that is available for :ref:`sections <sections-agenda-entry>` can also be entered
|
||||
here and will be globally applied.
|
||||
|
||||
Configuration
|
||||
"""""""""""""
|
||||
|
||||
The first is to configure the behaviour of WA and how a run as a
|
||||
whole will behave. The most common options that that you may want to specify are:
|
||||
|
||||
:device: The name of the 'device' that you wish to perform the run
|
||||
on. This name is a combination of a devlib
|
||||
`Platform <http://devlib.readthedocs.io/en/latest/platform.html>`_ and
|
||||
`Target <http://devlib.readthedocs.io/en/latest/target.html>`_. To
|
||||
see the available options please use ``wa list targets``.
|
||||
:device_config: The is a dict mapping allowing you to configure which target
|
||||
to connect to (e.g. ``host`` for an SSH connection or
|
||||
``device`` to specific an ADB name) as well as configure other
|
||||
options for the device for example the ``working_directory``
|
||||
or the list of ``modules`` to be loaded onto the device. (For
|
||||
more information please see
|
||||
:ref:`here <android-general-device-setup>`)
|
||||
:execution_order: Defines the order in which the agenda spec will be executed.
|
||||
:reboot_policy: Defines when during execution of a run a Device will be rebooted.
|
||||
:max_retries: The maximum number of times failed jobs will be retried before giving up.
|
||||
:allow_phone_home: Prevent running any workloads that are marked with ‘phones_home’.
|
||||
|
||||
For more information and a full list of these configuration options please see
|
||||
:ref:`Run Configuration <run-configuration>` and
|
||||
:ref:`Meta Configuration <meta-configuration>`.
|
||||
|
||||
|
||||
Plugins
|
||||
"""""""
|
||||
:augmentations: Specify a list of which augmentations should be enabled (or if
|
||||
prefixed with a ``~``, disabled).
|
||||
|
||||
.. note:: While augmentations can be enabled and disabled on a per workload
|
||||
basis, they cannot yet be re-configured part way through a run and the
|
||||
configuration provided as part of an agenda config section or separate
|
||||
config file will be used for all jobs in a WA run.
|
||||
|
||||
:<plugin_name>: You can also use this section to supply configuration for
|
||||
specific plugins, such as augmentations, workloads, resource getters etc.
|
||||
To do this the plugin name you wish to configure should be provided as an
|
||||
entry in this section and should contain a mapping of configuration
|
||||
options to their desired settings. If configuration is supplied for a
|
||||
plugin that is not currently enabled then it will simply be ignored. This
|
||||
allows for plugins to be temporarily removed without also having to remove
|
||||
their configuration, or to provide a set of defaults for a plugin which
|
||||
can then be overridden.
|
||||
|
||||
:<global_alias>: Some plugins provide global aliases which can set one or more
|
||||
configuration options at once, and these can also be specified here. For
|
||||
example if you specify a value for the entry ``remote_assets_url`` this
|
||||
will set the URL the http resource getter will use when searching for any
|
||||
missing assets.
|
||||
|
||||
---------------------------
|
||||
|
||||
.. _workloads-agenda-entry:
|
||||
|
||||
workloads
|
||||
^^^^^^^^^
|
||||
|
||||
Here you can specify a list of workloads to be run. If you wish to run a
|
||||
workload with all default values then you can specify the workload name directly
|
||||
as an entry, otherwise a dict mapping should be provided. Any settings provided
|
||||
here will be the most specific and therefore override any other more generalised
|
||||
configuration for that particular workload spec. The valid entries are as
|
||||
follows:
|
||||
|
||||
:workload_name: **(Mandatory)** The name of the workload to be run
|
||||
:iterations: Specify how many iterations the workload should be run
|
||||
:label: Similar to IDs but do not have the uniqueness restriction.
|
||||
If specified, labels will be used by some output processors instead of (or in
|
||||
addition to) the workload name. For example, the csv output processor will put
|
||||
the label in the "workload" column of the CSV file.
|
||||
:augmentations: The instruments and output processors to enable (or
|
||||
disabled using a ~) during this workload.
|
||||
:classifiers: Classifiers allow you to tag metrics from this workload
|
||||
spec which are often used to help identify what runtime parameters were used
|
||||
when post processing results.
|
||||
:workload_parameters: Any parameters to
|
||||
configure that particular workload in a dict form.
|
||||
|
||||
Alias: ``workload_params``
|
||||
|
||||
.. note:: You can see available parameters for a given workload with the
|
||||
:ref:`show command <show-command>` or look it up in the
|
||||
:ref:`Plugin Reference <plugin-reference>`.
|
||||
|
||||
:runtime_parameters: A dict mapping of any runtime parameters that should be set
|
||||
for the device for that particular workload. For available
|
||||
parameters please see
|
||||
:ref:`runtime parameters <runtime-parameters>`.
|
||||
|
||||
Alias: ``runtime_parms``
|
||||
|
||||
.. note:: Unless specified elsewhere these configurations will not be
|
||||
undone once the workload has finished. I.e. if the frequency of a
|
||||
core is changed it will remain at that frequency until otherwise
|
||||
changed.
|
||||
|
||||
.. note:: There is also a shorter ``params`` alias available, however this alias will be
|
||||
interpreted differently depending on whether it is used in workload
|
||||
entry, in which case it will be interpreted as ``workload_params``, or
|
||||
at global config or section (see below) level, in which case it will
|
||||
be interpreted as ``runtime_params``.
|
||||
|
||||
|
||||
---------------------------
|
||||
|
||||
.. _sections-agenda-entry:
|
||||
|
||||
sections
|
||||
^^^^^^^^
|
||||
|
||||
Sections are used for for grouping sets of configuration together in order to
|
||||
reduce the need for duplicated configuration (for more information please see
|
||||
:ref:`Sections <sections>`). Each section specified will be applied for each
|
||||
entry in the ``workloads`` section. The valid configuration entries are the
|
||||
same as the ``"workloads"`` section as mentioned above, except you can
|
||||
additionally specify:
|
||||
|
||||
:workloads: An entry which can be provided with the same configuration entries
|
||||
as the :ref:`workloads <workloads-agenda-entry>` top level entry.
|
@ -1,192 +0,0 @@
|
||||
.. _configuration-specification:
|
||||
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
.. include:: user_information/user_reference/agenda.rst
|
||||
|
||||
---------------------
|
||||
|
||||
.. _run-configuration:
|
||||
|
||||
Run Configuration
|
||||
------------------
|
||||
In addition to specifying run execution parameters through an agenda, the
|
||||
behaviour of WA can be modified through configuration file(s). The default
|
||||
configuration file is ``~/.workload_automation/config.yaml`` (the location can
|
||||
be changed by setting ``WA_USER_DIRECTORY`` environment variable, see
|
||||
:ref:`envvars` section below). This file will be created when you first run WA
|
||||
if it does not already exist. This file must always exist and will always be
|
||||
loaded. You can add to or override the contents of that file on invocation of
|
||||
Workload Automation by specifying an additional configuration file using
|
||||
``--config`` option. Variables with specific names will be picked up by the
|
||||
framework and used to modify the behaviour of Workload automation e.g.
|
||||
the ``iterations`` variable might be specified to tell WA how many times to run
|
||||
each workload.
|
||||
|
||||
---------------------
|
||||
|
||||
.. _available_settings:
|
||||
|
||||
.. include:: run_config/Run_Configuration.rst
|
||||
|
||||
---------------------
|
||||
|
||||
.. _meta-configuration:
|
||||
|
||||
Meta Configuration
|
||||
------------------
|
||||
|
||||
There are also a couple of settings are used to provide additional metadata
|
||||
for a run. These may get picked up by instruments or output processors to
|
||||
attach context to results.
|
||||
|
||||
.. include:: run_config/Meta_Configuration.rst
|
||||
|
||||
---------------------
|
||||
|
||||
.. _envvars:
|
||||
|
||||
Environment Variables
|
||||
---------------------
|
||||
|
||||
In addition to standard configuration described above, WA behaviour can be
|
||||
altered through environment variables. These can determine where WA looks for
|
||||
various assets when it starts.
|
||||
|
||||
:WA_USER_DIRECTORY: This is the location WA will look for config.yaml, plugins,
|
||||
dependencies, and it will also be used for local caches, etc. If this
|
||||
variable is not set, the default location is ``~/.workload_automation`` (this
|
||||
is created when WA is installed).
|
||||
|
||||
.. note:: This location **must** be writable by the user who runs WA.
|
||||
|
||||
|
||||
:WA_LOG_BUFFER_CAPACITY: Specifies the capacity (in log records) for the early
|
||||
log handler which is used to buffer log records until a log file becomes
|
||||
available. If the is not set, the default value of ``1000`` will be used.
|
||||
This should sufficient for most scenarios, however this may need to be
|
||||
increased, e.g. if plugin loader scans a very large number of locations;
|
||||
this may also be set to a lower value to reduce WA's memory footprint on
|
||||
memory-constrained hosts.
|
||||
|
||||
---------------------
|
||||
|
||||
.. include:: user_information/user_reference/runtime_parameters.rst
|
||||
|
||||
---------------------
|
||||
|
||||
.. _config-merging:
|
||||
|
||||
Configuration Merging
|
||||
---------------------
|
||||
WA configuration can come from various sources of increasing priority, as well
|
||||
as being specified in a generic and specific manner. For example WA's global
|
||||
config file would be considered the least specific vs the parameters of a
|
||||
workload in an agenda which would be the most specific. WA has two rules for the
|
||||
priority of configuration:
|
||||
|
||||
- Configuration from higher priority sources overrides configuration from
|
||||
lower priority sources.
|
||||
- More specific configuration overrides less specific configuration.
|
||||
|
||||
There is a situation where these two rules come into conflict. When a generic
|
||||
configuration is given in config source of high priority and a specific
|
||||
configuration is given in a config source of lower priority. In this situation
|
||||
it is not possible to know the end users intention and WA will error.
|
||||
|
||||
This functionality allows for defaults for plugins, targets etc. to be
|
||||
configured at a global level and then seamless overridden without the need to
|
||||
remove the high level configuration.
|
||||
|
||||
Dependent on specificity, configuration parameters from different sources will
|
||||
have different inherent priorities. Within an agenda, the configuration in
|
||||
"workload" entries will be more specific than "sections" entries, which in turn
|
||||
are more specific than parameters in the "config" entry.
|
||||
|
||||
.. _config-include:
|
||||
|
||||
Configuration Includes
|
||||
----------------------
|
||||
|
||||
It is possible to include other files in your config files and agendas. This is
|
||||
done by specifying ``include#`` (note the trailing hash) as a key in one of the
|
||||
mappings, with the value being the path to the file to be included. The path
|
||||
must be either absolute, or relative to the location of the file it is being
|
||||
included from (*not* to the current working directory). The path may also
|
||||
include ``~`` to indicate current user's home directory.
|
||||
|
||||
The include is performed by removing the ``include#`` loading the contents of
|
||||
the specified into the mapping that contained it. In cases where the mapping
|
||||
already contains the key to be loaded, values will be merged using the usual
|
||||
merge method (for overwrites, values in the mapping take precedence over those
|
||||
from the included files).
|
||||
|
||||
Below is an example of an agenda that includes other files. The assumption is
|
||||
that all of those files are in one directory
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# agenda.yaml
|
||||
config:
|
||||
augmentations: [trace-cmd]
|
||||
include#: ./my-config.yaml
|
||||
sections:
|
||||
- include#: ./section1.yaml
|
||||
- include#: ./section2.yaml
|
||||
include#: ./workloads.yaml
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# my-config.yaml
|
||||
augmentations: [cpufreq]
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# section1.yaml
|
||||
runtime_parameters:
|
||||
frequency: max
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# section2.yaml
|
||||
runtime_parameters:
|
||||
frequency: min
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# workloads.yaml
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
|
||||
The above is equivalent to having a single file like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# agenda.yaml
|
||||
config:
|
||||
augmentations: [cpufreq, trace-cmd]
|
||||
sections:
|
||||
- runtime_parameters:
|
||||
frequency: max
|
||||
- runtime_parameters:
|
||||
frequency: min
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
|
||||
Some additional details about the implementation and its limitations:
|
||||
|
||||
- The ``include#`` *must* be a key in a mapping, and the contents of the
|
||||
included file *must* be a mapping as well; it is not possible to include a
|
||||
list (e.g. in the examples above ``workload:`` part *must* be in the included
|
||||
file.
|
||||
- Being a key in a mapping, there can only be one ``include#`` entry per block.
|
||||
- The included file *must* have a ``.yaml`` extension.
|
||||
- Nested inclusions *are* allowed. I.e. included files may themselves include
|
||||
files; in such cases the included paths must be relative to *that* file, and
|
||||
not the "main" file.
|
||||
|
@ -1,376 +0,0 @@
|
||||
.. _invocation:
|
||||
|
||||
Commands
|
||||
========
|
||||
|
||||
Installing the wa package will add ``wa`` command to your system,
|
||||
which you can run from anywhere. This has a number of sub-commands, which can
|
||||
be viewed by executing ::
|
||||
|
||||
wa -h
|
||||
|
||||
Individual sub-commands are discussed in detail below.
|
||||
|
||||
.. _run-command:
|
||||
|
||||
Run
|
||||
---
|
||||
|
||||
The most common sub-command you will use is ``run``. This will run the specified
|
||||
workload(s) and process its resulting output. This takes a single mandatory
|
||||
argument which specifies what you want WA to run. This could be either a workload
|
||||
name, or a path to an agenda" file that allows to specify multiple workloads as
|
||||
well as a lot additional configuration (see :ref:`agenda` section for details).
|
||||
Executing ::
|
||||
|
||||
wa run -h
|
||||
|
||||
Will display help for this subcommand that will look something like this:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
usage: wa run [-h] [-c CONFIG] [-v] [--version] [-d DIR] [-f] [-i ID]
|
||||
[--disable INSTRUMENT]
|
||||
AGENDA
|
||||
|
||||
Execute automated workloads on a remote device and process the resulting
|
||||
output.
|
||||
|
||||
positional arguments:
|
||||
AGENDA Agenda for this workload automation run. This defines
|
||||
which workloads will be executed, how many times, with
|
||||
which tunables, etc. See example agendas in
|
||||
/usr/local/lib/python3.X/dist-packages/wa for an
|
||||
example of how this file should be structured.
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.yaml
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--version show program's version number and exit
|
||||
-d DIR, --output-directory DIR
|
||||
Specify a directory where the output will be
|
||||
generated. If the directory already exists, the script
|
||||
will abort unless -f option (see below) is used, in
|
||||
which case the contents of the directory will be
|
||||
overwritten. If this option is not specified, then
|
||||
wa_output will be used instead.
|
||||
-f, --force Overwrite output directory if it exists. By default,
|
||||
the script will abort in this situation to prevent
|
||||
accidental data loss.
|
||||
-i ID, --id ID Specify a workload spec ID from an agenda to run. If
|
||||
this is specified, only that particular spec will be
|
||||
run, and other workloads in the agenda will be
|
||||
ignored. This option may be used to specify multiple
|
||||
IDs.
|
||||
--disable INSTRUMENT Specify an instrument or output processor to disable
|
||||
from the command line. This equivalent to adding
|
||||
"~{metavar}" to the instruments list in the
|
||||
agenda. This can be used to temporarily disable a
|
||||
troublesome instrument for a particular run without
|
||||
introducing permanent change to the config (which one
|
||||
might then forget to revert). This option may be
|
||||
specified multiple times.
|
||||
|
||||
.. _list-command:
|
||||
|
||||
List
|
||||
----
|
||||
|
||||
This lists all plugins of a particular type. For example ::
|
||||
|
||||
wa list instruments
|
||||
|
||||
will list all instruments currently included in WA. The list will consist of
|
||||
plugin names and short descriptions of the functionality they offer e.g.
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
#..
|
||||
cpufreq: Collects dynamic frequency (DVFS) settings before and after
|
||||
workload execution.
|
||||
dmesg: Collected dmesg output before and during the run.
|
||||
energy_measurement: This instrument is designed to be used as an interface to
|
||||
the various energy measurement instruments located
|
||||
in devlib.
|
||||
execution_time: Measure how long it took to execute the run() methods of
|
||||
a Workload.
|
||||
file_poller: Polls the given files at a set sample interval. The values
|
||||
are output in CSV format.
|
||||
fps: Measures Frames Per Second (FPS) and associated metrics for
|
||||
a workload.
|
||||
#..
|
||||
|
||||
|
||||
You can use the same syntax to quickly display information about ``commands``,
|
||||
``energy_instrument_backends``, ``instruments``, ``output_processors``, ``resource_getters``,
|
||||
``targets`` and ``workloads``
|
||||
|
||||
.. _show-command:
|
||||
|
||||
Show
|
||||
----
|
||||
|
||||
This will show detailed information about an plugin (workloads, targets,
|
||||
instruments etc.), including a full description and any relevant
|
||||
parameters/configuration that are available. For example executing ::
|
||||
|
||||
wa show benchmarkpi
|
||||
|
||||
will produce something like: ::
|
||||
|
||||
|
||||
benchmarkpi
|
||||
-----------
|
||||
|
||||
Measures the time the target device takes to run and complete the Pi
|
||||
calculation algorithm.
|
||||
|
||||
http://androidbenchmark.com/howitworks.php
|
||||
|
||||
from the website:
|
||||
|
||||
The whole idea behind this application is to use the same Pi calculation
|
||||
algorithm on every Android Device and check how fast that process is.
|
||||
Better calculation times, conclude to faster Android devices. This way you
|
||||
can also check how lightweight your custom made Android build is. Or not.
|
||||
|
||||
As Pi is an irrational number, Benchmark Pi does not calculate the actual Pi
|
||||
number, but an approximation near the first digits of Pi over the same
|
||||
calculation circles the algorithms needs.
|
||||
|
||||
So, the number you are getting in milliseconds is the time your mobile device
|
||||
takes to run and complete the Pi calculation algorithm resulting in a
|
||||
approximation of the first Pi digits.
|
||||
|
||||
parameters
|
||||
~~~~~~~~~~
|
||||
|
||||
cleanup_assets : boolean
|
||||
If ``True``, if assets are deployed as part of the workload they
|
||||
will be removed again from the device as part of finalize.
|
||||
|
||||
default: ``True``
|
||||
|
||||
package_name : str
|
||||
The package name that can be used to specify
|
||||
the workload apk to use.
|
||||
|
||||
install_timeout : integer
|
||||
Timeout for the installation of the apk.
|
||||
|
||||
constraint: ``value > 0``
|
||||
|
||||
default: ``300``
|
||||
|
||||
version : str
|
||||
The version of the package to be used.
|
||||
|
||||
variant : str
|
||||
The variant of the package to be used.
|
||||
|
||||
strict : boolean
|
||||
Whether to throw an error if the specified package cannot be found
|
||||
on host.
|
||||
|
||||
force_install : boolean
|
||||
Always re-install the APK, even if matching version is found already installed
|
||||
on the device.
|
||||
|
||||
uninstall : boolean
|
||||
If ``True``, will uninstall workload's APK as part of teardown.'
|
||||
|
||||
exact_abi : boolean
|
||||
If ``True``, workload will check that the APK matches the target
|
||||
device ABI, otherwise any suitable APK found will be used.
|
||||
|
||||
markers_enabled : boolean
|
||||
If set to ``True``, workloads will insert markers into logs
|
||||
at various points during execution. These markers may be used
|
||||
by other plugins or post-processing scripts to provide
|
||||
measurements or statistics for specific parts of the workload
|
||||
execution.
|
||||
|
||||
.. note:: You can also use this command to view global settings by using ``wa show settings``
|
||||
|
||||
|
||||
.. _create-command:
|
||||
|
||||
Create
|
||||
------
|
||||
|
||||
This aids in the creation of new WA-related objects for example agendas and workloads.
|
||||
For more detailed information on creating workloads please see the
|
||||
:ref:`adding a workload <adding-a-workload-example>` section for more details.
|
||||
|
||||
As an example to create an agenda that will run the dhrystone and memcpy workloads
|
||||
that will use the status and hwmon augmentations, run each test 3 times and save
|
||||
into the file ``my_agenda.yaml`` the following command can be used::
|
||||
|
||||
wa create agenda dhrystone memcpy status hwmon -i 3 -o my_agenda.yaml
|
||||
|
||||
Which will produce something like::
|
||||
|
||||
config:
|
||||
augmentations:
|
||||
- status
|
||||
- hwmon
|
||||
status: {}
|
||||
hwmon: {}
|
||||
iterations: 3
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
params:
|
||||
cleanup_assets: true
|
||||
delay: 0
|
||||
duration: 0
|
||||
mloops: 0
|
||||
taskset_mask: 0
|
||||
threads: 4
|
||||
- name: memcpy
|
||||
params:
|
||||
buffer_size: 5242880
|
||||
cleanup_assets: true
|
||||
cpus: null
|
||||
iterations: 1000
|
||||
|
||||
This will be populated with default values which can then be customised for the
|
||||
particular use case.
|
||||
|
||||
Additionally the create command can be used to initialize (and update) a
|
||||
Postgres database which can be used by the ``postgres`` output processor.
|
||||
|
||||
The most of database connection parameters have a default value however they can
|
||||
be overridden via command line arguments. When initializing the database WA will
|
||||
also save the supplied parameters into the default user config file so that they
|
||||
do not need to be specified time the output processor is used.
|
||||
|
||||
As an example if we had a database server running on at 10.0.0.2 using the
|
||||
standard port we could use the following command to initialize a database for
|
||||
use with WA::
|
||||
|
||||
wa create database -a 10.0.0.2 -u my_username -p Pa55w0rd
|
||||
|
||||
This will log into the database server with the supplied credentials and create
|
||||
a database (defaulting to 'wa') and will save the configuration to the
|
||||
``~/.workload_automation/config.yaml`` file.
|
||||
|
||||
With updates to WA there may be changes to the database schema used. In this
|
||||
case the create command can also be used with the ``-U`` flag to update the
|
||||
database to use the new schema as follows::
|
||||
|
||||
wa create database -a 10.0.0.2 -u my_username -p Pa55w0rd -U
|
||||
|
||||
This will upgrade the database sequentially until the database schema is using
|
||||
the latest version.
|
||||
|
||||
.. _process-command:
|
||||
|
||||
Process
|
||||
--------
|
||||
|
||||
This command allows for output processors to be ran on data that was produced by
|
||||
a previous run.
|
||||
|
||||
There are 2 ways of specifying which processors you wish to use, either passing
|
||||
them directly as arguments to the process command with the ``--processor``
|
||||
argument or by providing an additional config file with the ``--config``
|
||||
argument. Please note that by default the process command will not rerun
|
||||
processors that have already been ran during the run, in order to force a rerun
|
||||
of the processors you can specific the ``--force`` argument.
|
||||
|
||||
Additionally if you have a directory containing multiple run directories you can
|
||||
specify the ``--recursive`` argument which will cause WA to walk the specified
|
||||
directory processing all the WA output sub-directories individually.
|
||||
|
||||
|
||||
As an example if we had performed multiple experiments and have the various WA
|
||||
output directories in our ``my_experiments`` directory, and we now want to process
|
||||
the outputs with a tool that only supports CSV files. We can easily generate CSV
|
||||
files for all the runs contained in our directory using the CSV processor by
|
||||
using the following command::
|
||||
|
||||
wa process -r -p csv my_experiments
|
||||
|
||||
|
||||
.. _record_command:
|
||||
|
||||
Record
|
||||
------
|
||||
|
||||
This command simplifies the process of recording revent files. It will
|
||||
automatically deploy revent and has options to automatically open apps and
|
||||
record specified stages of a workload. Revent allows you to record raw inputs
|
||||
such as screen swipes or button presses. This can be useful for recording inputs
|
||||
for workloads such as games that don't have XML UI layouts that can be used with
|
||||
UIAutomator. As a drawback from this, revent recordings are specific to the
|
||||
device type they were recorded on. WA uses two parts to the names of revent
|
||||
recordings in the format, ``{device_name}.{suffix}.revent``. - device_name can
|
||||
either be specified manually with the ``-d`` argument or it can be automatically
|
||||
determined. On Android device it will be obtained from ``build.prop``, on Linux
|
||||
devices it is obtained from ``/proc/device-tree/model``. - suffix is used by WA
|
||||
to determine which part of the app execution the recording is for, currently
|
||||
these are either ``setup``, ``run``, ``extract_results`` or ``teardown``. All
|
||||
stages except ``run`` are optional for playback and to specify which stages
|
||||
should be recorded the ``-s``, ``-r``, ``-e`` or ``-t`` arguments respectively,
|
||||
or optionally ``-a`` to indicate all stages should be recorded.
|
||||
|
||||
|
||||
The full set of options for this command are::
|
||||
|
||||
usage: wa record [-h] [-c CONFIG] [-v] [--version] [-d DEVICE] [-o FILE] [-s]
|
||||
[-r] [-e] [-t] [-a] [-C] [-p PACKAGE | -w WORKLOAD]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.yaml
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--version show program's version number and exit
|
||||
-d DEVICE, --device DEVICE
|
||||
Specify the device on which to run. This will take
|
||||
precedence over the device (if any) specified in
|
||||
configuration.
|
||||
-o FILE, --output FILE
|
||||
Specify the output file
|
||||
-s, --setup Record a recording for setup stage
|
||||
-r, --run Record a recording for run stage
|
||||
-e, --extract_results Record a recording for extract_results stage
|
||||
-t, --teardown Record a recording for teardown stage
|
||||
-a, --all Record recordings for available stages
|
||||
-C, --clear Clear app cache before launching it
|
||||
-p PACKAGE, --package PACKAGE
|
||||
Android package to launch before recording
|
||||
-w WORKLOAD, --workload WORKLOAD
|
||||
Name of a revent workload (mostly games)
|
||||
|
||||
For more information please see :ref:`Revent Recording <revent-recording>`.
|
||||
|
||||
.. _replay-command:
|
||||
|
||||
Replay
|
||||
------
|
||||
|
||||
Alongside ``record`` wa also has a command to playback a single recorded revent
|
||||
file. It behaves similar to the ``record`` command taking a subset of the same
|
||||
options allowing you to automatically launch a package on the device ::
|
||||
|
||||
usage: wa replay [-h] [-c CONFIG] [-v] [--debug] [--version] [-p PACKAGE] [-C]
|
||||
revent
|
||||
|
||||
positional arguments:
|
||||
revent The name of the file to replay
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.py
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--debug Enable debug mode. Note: this implies --verbose.
|
||||
--version show program's version number and exit
|
||||
-p PACKAGE, --package PACKAGE
|
||||
Package to launch before recording
|
||||
-C, --clear Clear app cache before launching it
|
||||
|
||||
For more information please see :ref:`Revent Replaying <revent_replaying>`.
|
@ -1,139 +0,0 @@
|
||||
.. _output_directory_structure:
|
||||
|
||||
Output Directory Structure
|
||||
==========================
|
||||
|
||||
This is an overview of WA output directory structure.
|
||||
|
||||
.. note:: In addition to files and subdirectories described here,
|
||||
other content may present in the output directory for
|
||||
a run, depending on the enabled augmentations.
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
The output directory will contain a subdirectory for every job iteration that
|
||||
was run, as well as some additional entries. The following diagram illustrates
|
||||
the typical structure of WA output directory::
|
||||
|
||||
wa_output/
|
||||
├── __meta/
|
||||
│ ├── config.json
|
||||
│ ├── jobs.json
|
||||
│ ├── raw_config
|
||||
│ │ ├── cfg0-config.yaml
|
||||
│ │ └── agenda.yaml
|
||||
│ ├── run_info.json
|
||||
│ └── target_info.json
|
||||
├── __failed/
|
||||
│ └── wk1-dhrystone-1-attempt1
|
||||
├── wk1-dhrystone-1/
|
||||
│ └── result.json
|
||||
├── wk1-dhrystone-2/
|
||||
│ └── result.json
|
||||
├── wk2-memcpy-1/
|
||||
│ └── result.json
|
||||
├── wk2-memcpy-2/
|
||||
│ └── result.json
|
||||
├── result.json
|
||||
└── run.log
|
||||
|
||||
This is the directory structure that would be generated after running two
|
||||
iterations each of ``dhrystone`` and ``memcpy`` workloads with no augmentations
|
||||
enabled, and with the first attempt at the first iteration of dhrystone having
|
||||
failed.
|
||||
|
||||
You may notice that a number of directories named ``wk*-x-x`` were generated in the
|
||||
output directory structure. Each of these directories represents a
|
||||
:term:`job`. The name of the output directory is as stated :ref:`here <job_execution_subd>`.
|
||||
|
||||
|
||||
Output Directory Entries
|
||||
------------------------
|
||||
|
||||
result.json
|
||||
Contains a JSON structure describing the result of the execution,
|
||||
including collected metrics and artifacts. There will be one for each
|
||||
job execution, and one for the overall run. The run ``result.json`` will
|
||||
only contain metrics/artifacts for the run as a whole, and will not
|
||||
contain results for individual jobs.
|
||||
|
||||
You typically would not access ``result.json`` files directly. Instead
|
||||
you would either enable augmentations to format the results in easier to
|
||||
manage form (such as CSV table), or use :ref:`output_processing_api` to
|
||||
access the results from scripts.
|
||||
|
||||
|
||||
run.log
|
||||
This is a log of everything that happened during the run, including all
|
||||
interactions with the target, and all the decisions made by the
|
||||
framework. The output is equivalent to what you would see on the console
|
||||
when running with ``--verbose`` option.
|
||||
|
||||
.. note:: WA source contains a syntax file for Vim that will color the
|
||||
initial part of each log line, in a similar way to what you
|
||||
see on the console. This may be useful for quickly spotting
|
||||
error and warning messages when scrolling through the log.
|
||||
|
||||
https://github.com/ARM-software/workload-automation/blob/next/extras/walog.vim
|
||||
|
||||
__meta
|
||||
This directory contains configuration and run metadata. See
|
||||
:ref:`config_and_meta` below for details.
|
||||
|
||||
__failed
|
||||
This directory will only be present if one or more job executions has
|
||||
failed and were re-run. This directory contains output directories for
|
||||
the failed attempts.
|
||||
|
||||
.. _job_execution_subd:
|
||||
|
||||
job execution output subdirectory
|
||||
Each subdirectory will be named ``<job id>_<workload label>_<iteration
|
||||
number>``, and will, at minimum, contain a ``result.json`` (see above).
|
||||
Additionally, it may contain raw output from the workload, and any
|
||||
additional artifacts (e.g. traces) generated by augmentations. Finally,
|
||||
if workload execution has failed, WA may gather some additional logging
|
||||
(such as the UI state at the time of failure) and place it here.
|
||||
|
||||
|
||||
.. _config_and_meta:
|
||||
|
||||
Configuration and Metadata
|
||||
--------------------------
|
||||
|
||||
As stated above, the ``__meta`` directory contains run configuration and
|
||||
metadata. Typically, you would not access these files directly, but would use
|
||||
the :ref:`output_processing_api` to query the metadata.
|
||||
|
||||
For more details about WA configuration see :ref:`configuration-specification`.
|
||||
|
||||
config.json
|
||||
Contains the overall run configuration, such as target interface
|
||||
configuration, and job execution order, and various "meta-configuration"
|
||||
settings, such as default output path, verbosity level, and logging
|
||||
formatting.
|
||||
|
||||
jobs.json
|
||||
Final configuration for all jobs, including enabled augmentations,
|
||||
workload and runtime parameters, etc.
|
||||
|
||||
raw_config
|
||||
This directory contains copies of config file(s) and the agenda that
|
||||
were parsed in order to generate configuration for this run. Each config
|
||||
file is prefixed with ``cfg<N>-``, where ``<N>`` is the number
|
||||
indicating the order (with respect to the other other config files) in
|
||||
which it was parsed, e.g. ``cfg0-config.yaml`` is always a copy of
|
||||
``$WA_USER_DIRECTORY/config.yaml``. The one file without a prefix is the
|
||||
agenda.
|
||||
|
||||
run_info.json
|
||||
Run metadata, e.g. duration, start/end timestamps and duration.
|
||||
|
||||
target_info.json
|
||||
Extensive information about the target. This includes information about
|
||||
the target's CPUS configuration, kernel and userspace versions, etc. The
|
||||
exact content will vary depending on the target type (Android vs Linux)
|
||||
and what could accessed on a particular device (e.g. if
|
||||
``/proc/config.gz`` exists on the target, the kernel config will be
|
||||
included).
|
@ -1,245 +0,0 @@
|
||||
.. _runtime-parameters:
|
||||
|
||||
Runtime Parameters
|
||||
------------------
|
||||
|
||||
.. contents:: Contents
|
||||
:local:
|
||||
|
||||
Runtime parameters are options that can be specified to automatically configure
|
||||
device at runtime. They can be specified at the global level in the agenda or
|
||||
for individual workloads.
|
||||
|
||||
Example
|
||||
^^^^^^^
|
||||
Say we want to perform an experiment on an Android big.LITTLE devices to compare
|
||||
the power consumption between the big and LITTLE clusters running the dhrystone
|
||||
and benchmarkpi workloads. Assuming we have additional instrumentation active
|
||||
for this device that can measure the power the device is consuming, to reduce
|
||||
external factors we want to ensure that the device is in airplane mode turned on
|
||||
for all our tests and the screen is off only for our dhrystone run. We will then
|
||||
run 2 :ref:`sections <sections>` will each enable a single cluster on the
|
||||
device, set the cores to their maximum frequency and disable all available idle
|
||||
states.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
runtime_parameters:
|
||||
airplane_mode: true
|
||||
#..
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
iterations: 1
|
||||
runtime_parameters:
|
||||
screen_on: false
|
||||
unlock_screen: 'vertical'
|
||||
- name: benchmarkpi
|
||||
iterations: 1
|
||||
sections:
|
||||
- id: LITTLES
|
||||
runtime_parameters:
|
||||
num_little_cores: 4
|
||||
little_governor: userspace
|
||||
little_frequency: max
|
||||
little_idle_states: none
|
||||
num_big_cores: 0
|
||||
|
||||
- id: BIGS
|
||||
runtime_parameters:
|
||||
num_big_cores: 4
|
||||
big_governor: userspace
|
||||
big_frequency: max
|
||||
big_idle_states: none
|
||||
num_little_cores: 0
|
||||
|
||||
|
||||
HotPlug
|
||||
^^^^^^^
|
||||
|
||||
Parameters:
|
||||
|
||||
:num_cores: An ``int`` that specifies the total number of cpu cores to be online.
|
||||
|
||||
:num_<core_name>_cores: An ``int`` that specifies the total number of that particular core
|
||||
to be online, the target will be queried and if the core_names can
|
||||
be determine a parameter for each of the unique core names will be
|
||||
available.
|
||||
|
||||
:cpu<core_no>_online: A ``boolean`` that specifies whether that particular cpu, e.g. cpu0 will
|
||||
be online.
|
||||
|
||||
If big.LITTLE is detected for the device and additional 2 parameters are available:
|
||||
|
||||
:num_big_cores: An ``int`` that specifies the total number of `big` cpu cores to be online.
|
||||
|
||||
:num_little_cores: An ``int`` that specifies the total number of `little` cpu cores to be online.
|
||||
|
||||
|
||||
|
||||
.. Note:: Please note that if the device in question is operating its own dynamic
|
||||
hotplugging then WA may be unable to set the CPU state or will be overridden.
|
||||
Unfortunately the method of disabling dynamic hot plugging will vary from
|
||||
device to device.
|
||||
|
||||
|
||||
CPUFreq
|
||||
^^^^^^^
|
||||
|
||||
:frequency: An ``int`` that can be used to specify a frequency for all cores if there are common frequencies available.
|
||||
|
||||
.. Note:: When settings the frequency, if the governor is not set to userspace then WA will attempt to set the maximum
|
||||
and minimum frequencies to mimic the desired behaviour.
|
||||
|
||||
:max_frequency: An ``int`` that can be used to specify a maximum frequency for all cores if there are common frequencies available.
|
||||
|
||||
:min_frequency: An ``int`` that can be used to specify a minimum frequency for all cores if there are common frequencies available.
|
||||
|
||||
:governor: A ``string`` that can be used to specify the governor for all cores if there are common governors available.
|
||||
|
||||
:governor: A ``string`` that can be used to specify the governor for all cores if there are common governors available.
|
||||
|
||||
:gov_tunables: A ``dict`` that can be used to specify governor
|
||||
tunables for all cores, unlike the other common parameters these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
|
||||
:<core_name>_frequency: An ``int`` that can be used to specify a frequency for cores of a particular type e.g. 'A72'.
|
||||
|
||||
:<core_name>_max_frequency: An ``int`` that can be used to specify a maximum frequency for cores of a particular type e.g. 'A72'.
|
||||
|
||||
:<core_name>_min_frequency: An ``int`` that can be used to specify a minimum frequency for cores of a particular type e.g. 'A72'.
|
||||
|
||||
:<core_name>_governor: A ``string`` that can be used to specify the governor for cores of a particular type e.g. 'A72'.
|
||||
|
||||
:<core_name>_governor: A ``string`` that can be used to specify the governor for cores of a particular type e.g. 'A72'.
|
||||
|
||||
:<core_name>_gov_tunables: A ``dict`` that can be used to specify governor
|
||||
tunables for cores of a particular type e.g. 'A72', these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
|
||||
|
||||
:cpu<no>_frequency: An ``int`` that can be used to specify a frequency for a particular core e.g. 'cpu0'.
|
||||
|
||||
:cpu<no>_max_frequency: An ``int`` that can be used to specify a maximum frequency for a particular core e.g. 'cpu0'.
|
||||
|
||||
:cpu<no>_min_frequency: An ``int`` that can be used to specify a minimum frequency for a particular core e.g. 'cpu0'.
|
||||
|
||||
:cpu<no>_governor: A ``string`` that can be used to specify the governor for a particular core e.g. 'cpu0'.
|
||||
|
||||
:cpu<no>_governor: A ``string`` that can be used to specify the governor for a particular core e.g. 'cpu0'.
|
||||
|
||||
:cpu<no>_gov_tunables: A ``dict`` that can be used to specify governor
|
||||
tunables for a particular core e.g. 'cpu0', these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
|
||||
|
||||
If big.LITTLE is detected for the device an additional set of parameters are available:
|
||||
|
||||
:big_frequency: An ``int`` that can be used to specify a frequency for the big cores.
|
||||
|
||||
:big_max_frequency: An ``int`` that can be used to specify a maximum frequency for the big cores.
|
||||
|
||||
:big_min_frequency: An ``int`` that can be used to specify a minimum frequency for the big cores.
|
||||
|
||||
:big_governor: A ``string`` that can be used to specify the governor for the big cores.
|
||||
|
||||
:big_governor: A ``string`` that can be used to specify the governor for the big cores.
|
||||
|
||||
:big_gov_tunables: A ``dict`` that can be used to specify governor
|
||||
tunables for the big cores, these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
|
||||
:little_frequency: An ``int`` that can be used to specify a frequency for the little cores.
|
||||
|
||||
:little_max_frequency: An ``int`` that can be used to specify a maximum frequency for the little cores.
|
||||
|
||||
:little_min_frequency: An ``int`` that can be used to specify a minimum frequency for the little cores.
|
||||
|
||||
:little_governor: A ``string`` that can be used to specify the governor for the little cores.
|
||||
|
||||
:little_governor: A ``string`` that can be used to specify the governor for the little cores.
|
||||
|
||||
:little_gov_tunables: A ``dict`` that can be used to specify governor
|
||||
tunables for the little cores, these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
|
||||
|
||||
CPUIdle
|
||||
^^^^^^^
|
||||
|
||||
:idle_states: A ``string`` or list of strings which can be used to specify what
|
||||
idles states should be enabled for all cores if there are common
|
||||
idle states available. 'all' and 'none' are also valid entries as a
|
||||
shorthand
|
||||
|
||||
:<core_name>_idle_states: A ``string`` or list of strings which can be used to
|
||||
specify what idles states should be enabled for cores of a particular type
|
||||
e.g. 'A72'. 'all' and 'none' are also valid entries as a shorthand
|
||||
:cpu<no>_idle_states: A ``string`` or list of strings which can be used to
|
||||
specify what idles states should be enabled for a particular core e.g.
|
||||
'cpu0'. 'all' and 'none' are also valid entries as a shorthand
|
||||
|
||||
If big.LITTLE is detected for the device and additional set of parameters are available:
|
||||
|
||||
:big_idle_states: A ``string`` or list of strings which can be used to specify
|
||||
what idles states should be enabled for the big cores. 'all' and 'none' are
|
||||
also valid entries as a shorthand
|
||||
:little_idle_states: A ``string`` or list of strings which can be used to
|
||||
specify what idles states should be enabled for the little cores. 'all' and
|
||||
'none' are also valid entries as a shorthand.
|
||||
|
||||
|
||||
Android Specific Runtime Parameters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
:brightness: An ``int`` between 0 and 255 (inclusive) to specify the brightness
|
||||
the screen should be set to. Defaults to ``127``.
|
||||
|
||||
:airplane_mode: A ``boolean`` to specify whether airplane mode should be
|
||||
enabled for the device.
|
||||
|
||||
:rotation: A ``String`` to specify the screen orientation for the device. Valid
|
||||
entries are ``NATURAL``, ``LEFT``, ``INVERTED``, ``RIGHT``.
|
||||
|
||||
:screen_on: A ``boolean`` to specify whether the devices screen should be
|
||||
turned on. Defaults to ``True``.
|
||||
|
||||
:unlock_screen: A ``String`` to specify how the devices screen should be
|
||||
unlocked. Unlocking screen is disabled by default. ``vertical``, ``diagonal``
|
||||
and ``horizontal`` are the supported values (see :meth:`devlib.AndroidTarget.swipe_to_unlock`).
|
||||
Note that unlocking succeeds when no passcode is set. Since unlocking screen
|
||||
requires turning on the screen, this option overrides value of ``screen_on``
|
||||
option.
|
||||
|
||||
.. _setting-sysfiles:
|
||||
|
||||
Setting Sysfiles
|
||||
^^^^^^^^^^^^^^^^
|
||||
In order to perform additional configuration of a target the ``sysfile_values``
|
||||
runtime parameter can be used. The value for this parameter is a mapping (an
|
||||
associative array, in YAML) of file paths onto values that should be written
|
||||
into those files. ``sysfile_values`` is the only runtime parameter that is
|
||||
available for any (Linux) device. Other runtime parameters will depend on the
|
||||
specifics of the device used (e.g. its CPU cores configuration) as detailed
|
||||
above.
|
||||
|
||||
.. note:: By default WA will attempt to verify that the any sysfile values were
|
||||
written correctly by reading the node back and comparing the two values. If
|
||||
you do not wish this check to happen, for example the node you are writing to
|
||||
is write only, you can append an ``!`` to the file path to disable this
|
||||
verification.
|
||||
|
||||
For example the following configuration could be used to enable and verify that cpu0
|
||||
is online, however will not attempt to check that its governor have been set to
|
||||
userspace::
|
||||
|
||||
- name: dhrystone
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/online: 1
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor!: userspace
|
BIN
doc/source/wa-execution.png
Normal file
BIN
doc/source/wa-execution.png
Normal file
Binary file not shown.
After ![]() (image error) Size: 102 KiB |
1031
doc/source/writing_extensions.rst
Normal file
1031
doc/source/writing_extensions.rst
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,148 +0,0 @@
|
||||
# This Dockerfile creates an image for use with Workload Automation
|
||||
# and/or devlib.
|
||||
#
|
||||
# To build this Docker image, please run the following command from
|
||||
# this directory:
|
||||
#
|
||||
# docker build -t wa .
|
||||
#
|
||||
# This will create an image called wa, which is preconfigured to
|
||||
# run WA and devlib. Please note that the build process automatically
|
||||
# accepts the licenses for the Android SDK, so please be sure that you
|
||||
# are willing to accept these prior to building and running the image
|
||||
# in a container.
|
||||
#
|
||||
# To run the container, please run the following command from the
|
||||
# directory you wish to work from:
|
||||
#
|
||||
# docker run -it --privileged -v /dev/bus/usb:/dev/bus/usb --volume ${PWD}:/workspace --workdir /workspace wa
|
||||
#
|
||||
# If using selinux you may need to add the `z` option when mounting
|
||||
# volumes e.g.:
|
||||
# --volume ${PWD}:/workspace:z
|
||||
# Warning: Please ensure you do not use this option when mounting
|
||||
# system directores. For more information please see:
|
||||
# https://docs.docker.com/storage/bind-mounts/#configure-the-selinux-label
|
||||
#
|
||||
# The above command starts the container in privileged mode, with
|
||||
# access to USB devices. The current directory is mounted into the
|
||||
# image, allowing you to work from there. Any files written to this
|
||||
# directory are directly written to the host. Additional "volumes",
|
||||
# such as required assets, can be mounted into the container using a
|
||||
# second --volume command.
|
||||
#
|
||||
# If you require access to a TTY from the Docker container, please
|
||||
# also mount this into the container in the same style as is used to
|
||||
# mount USB devices. For example:
|
||||
#
|
||||
# docker run -it --privileged -v /dev/ttyUSB0:/dev/ttyUSB0 -v /dev/bus/usb:/dev/bus/usb --volume ${PWD}:/workspace --workdir /workspace wa
|
||||
#
|
||||
# When you are finished, please run `exit` to leave the container.
|
||||
#
|
||||
# The relevant environment variables are stored in a separate
|
||||
# file which is automatically sourced in an interactive shell.
|
||||
# If running from a non-interactive environment this can
|
||||
# be manually sourced with `source /home/wa/.wa_environment`
|
||||
#
|
||||
# NOTE: Please make sure that the ADB server is NOT running on the
|
||||
# host. If in doubt, run `adb kill-server` before running the docker
|
||||
# container.
|
||||
#
|
||||
|
||||
# We want to make sure to base this on a recent ubuntu release
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# Please update the references below to use different versions of
|
||||
# devlib, WA or the Android SDK
|
||||
ARG DEVLIB_REF=v1.3.4
|
||||
ARG WA_REF=v3.3.1
|
||||
ARG ANDROID_SDK_URL=https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip
|
||||
|
||||
# Set a default timezone to use
|
||||
ENV TZ=Europe/London
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apache2-utils \
|
||||
bison \
|
||||
cmake \
|
||||
curl \
|
||||
emacs \
|
||||
flex \
|
||||
git \
|
||||
libcdk5-dev \
|
||||
libiio-dev \
|
||||
libxml2 \
|
||||
libxml2-dev \
|
||||
locales \
|
||||
nano \
|
||||
openjdk-8-jre-headless \
|
||||
python3 \
|
||||
python3-pip \
|
||||
ssh \
|
||||
sshpass \
|
||||
sudo \
|
||||
trace-cmd \
|
||||
usbutils \
|
||||
vim \
|
||||
wget \
|
||||
zip
|
||||
|
||||
# Clone and download iio-capture
|
||||
RUN git clone -v https://github.com/BayLibre/iio-capture.git /tmp/iio-capture && \
|
||||
cd /tmp/iio-capture && \
|
||||
make && \
|
||||
make install
|
||||
|
||||
RUN pip3 install pandas
|
||||
|
||||
# Ensure we're using utf-8 as our default encoding
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
|
||||
# Let's get the two repos we need, and install them
|
||||
RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && \
|
||||
cd /tmp/devlib && \
|
||||
git checkout $DEVLIB_REF && \
|
||||
python3 setup.py install && \
|
||||
pip3 install .[full]
|
||||
RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && \
|
||||
cd /tmp/wa && \
|
||||
git checkout $WA_REF && \
|
||||
python3 setup.py install && \
|
||||
pip3 install .[all]
|
||||
|
||||
# Clean-up
|
||||
RUN rm -R /tmp/devlib /tmp/wa
|
||||
|
||||
# Create and switch to the wa user
|
||||
RUN useradd -m -G plugdev,dialout wa
|
||||
USER wa
|
||||
|
||||
# Let's set up the Android SDK for the user
|
||||
RUN mkdir -p /home/wa/.android
|
||||
RUN mkdir -p /home/wa/AndroidSDK && cd /home/wa/AndroidSDK && wget $ANDROID_SDK_URL -O sdk.zip && unzip sdk.zip
|
||||
RUN cd /home/wa/AndroidSDK/tools/bin && yes | ./sdkmanager --licenses && ./sdkmanager platform-tools && ./sdkmanager 'build-tools;27.0.3'
|
||||
|
||||
# Download Monsoon
|
||||
RUN mkdir -p /home/wa/monsoon
|
||||
RUN curl https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py\?format\=TEXT | base64 --decode > /home/wa/monsoon/monsoon.py
|
||||
RUN chmod +x /home/wa/monsoon/monsoon.py
|
||||
|
||||
# Update WA's required environment variables.
|
||||
RUN echo 'export PATH=/home/wa/monsoon:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/platform-tools:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/build-tools:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export ANDROID_HOME=/home/wa/AndroidSDK' >> /home/wa/.wa_environment
|
||||
|
||||
# Source WA environment variables in an interactive environment
|
||||
RUN echo 'source /home/wa/.wa_environment' >> /home/wa/.bashrc
|
||||
|
||||
# Generate some ADB keys. These will change each time the image is build but will otherwise persist.
|
||||
RUN /home/wa/AndroidSDK/platform-tools/adb keygen /home/wa/.android/adbkey
|
||||
|
||||
# We need to make sure to add the remote assets too
|
||||
RUN wa --version && echo 'remote_assets_url: https://raw.githubusercontent.com/ARM-software/workload-automation-assets/master/dependencies' >> /home/wa/.workload_automation/config.yaml
|
||||
|
@ -1,20 +1,12 @@
|
||||
This directory is intended for miscellaneous extra stuff that may be
|
||||
useful while developing Workload Automation. It should *NOT* contain
|
||||
anything necessary for *using* workload automation. Whenever you add
|
||||
something to this directory, please also add a short description of
|
||||
what it is in this file.
|
||||
|
||||
Dockerfile
|
||||
Docker file for generating a Docker image containing WA,
|
||||
devlib, and the required parts of the Android SDK. This can be
|
||||
run in a container to avoid configuring WA on the host. Should
|
||||
work "out of the box".
|
||||
This directory is intended for miscellaneous extra stuff that may be useful while developing
|
||||
Workload Automation. It should *NOT* contain anything necessary for *using* workload automation.
|
||||
Whenever you add something to this directory, please also add a short description of what it is in
|
||||
this file.
|
||||
|
||||
pylintrc
|
||||
pylint configuration file set up for WA development (see
|
||||
comment at the top of the file for how to use).
|
||||
pylint configuration file set up for WA development (see comment at the top of the file
|
||||
for how to use).
|
||||
|
||||
walog.vim
|
||||
Vim syntax file for WA logs; adds highlighting similar to what
|
||||
comes out in the console. See comment in the file for how to
|
||||
enable it.
|
||||
Vim syntax file for WA logs; adds highlighting similar to what comes out
|
||||
in the console. See comment in the file for how to enable it.
|
||||
|
@ -43,7 +43,7 @@ ignore=external
|
||||
# https://bitbucket.org/logilab/pylint/issue/232/wrong-hanging-indentation-false-positive
|
||||
# TODO: disabling no-value-for-parameter and logging-format-interpolation, as they appear to be broken
|
||||
# in version 1.4.1 and return a lot of false postives; should be re-enabled once fixed.
|
||||
disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330,no-value-for-parameter,logging-format-interpolation,no-else-return,inconsistent-return-statements,keyword-arg-before-vararg,consider-using-enumerate,no-member,super-with-arguments,useless-object-inheritance,raise-missing-from,no-else-raise,no-else-break,no-else-continue
|
||||
disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330,no-value-for-parameter,logging-format-interpolation
|
||||
|
||||
[FORMAT]
|
||||
max-module-lines=4000
|
||||
|
@ -1,3 +0,0 @@
|
||||
[pytest]
|
||||
filterwarnings=
|
||||
ignore::DeprecationWarning:past[.*]
|
@ -1,30 +0,0 @@
|
||||
bcrypt==4.0.1
|
||||
certifi==2024.7.4
|
||||
cffi==1.15.1
|
||||
charset-normalizer==3.1.0
|
||||
colorama==0.4.6
|
||||
cryptography==43.0.1
|
||||
devlib==1.3.4
|
||||
future==0.18.3
|
||||
idna==3.7
|
||||
Louie-latest==1.3.1
|
||||
lxml==4.9.2
|
||||
nose==1.3.7
|
||||
numpy==1.24.3
|
||||
pandas==2.0.1
|
||||
paramiko==3.4.0
|
||||
pexpect==4.8.0
|
||||
ptyprocess==0.7.0
|
||||
pycparser==2.21
|
||||
PyNaCl==1.5.0
|
||||
pyserial==3.5
|
||||
python-dateutil==2.8.2
|
||||
pytz==2023.3
|
||||
PyYAML==6.0
|
||||
requests==2.32.0
|
||||
scp==0.14.5
|
||||
six==1.16.0
|
||||
tzdata==2023.3
|
||||
urllib3==1.26.19
|
||||
wlauto==3.3.1
|
||||
wrapt==1.15.0
|
@ -1,3 +1,4 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -12,6 +13,5 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
wa create workload $@
|
||||
|
||||
dhrystone: dhrystone.c
|
||||
$(CROSS_COMPILE)gcc -O3 -static dhrystone.c -o dhrystone
|
16
scripts/list_extensions
Normal file
16
scripts/list_extensions
Normal file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
wa list $@
|
5
wa/workloads/memcpy/src/build.sh → scripts/run_workloads
Executable file → Normal file
5
wa/workloads/memcpy/src/build.sh → scripts/run_workloads
Executable file → Normal file
@ -1,4 +1,5 @@
|
||||
# Copyright 2013-2017 ARM Limited
|
||||
#!/bin/bash
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -12,5 +13,5 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
wa run $@
|
||||
|
||||
${CROSS_COMPILE}gcc -static memcopy.c -o memcopy
|
@ -13,5 +13,5 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from wa.framework.entrypoint import main
|
||||
from wlauto.core.entry_point import main
|
||||
main()
|
||||
|
81
setup.py
Executable file → Normal file
81
setup.py
Executable file → Normal file
@ -20,19 +20,16 @@ from itertools import chain
|
||||
|
||||
try:
|
||||
from setuptools import setup
|
||||
from setuptools.command.sdist import sdist as orig_sdist
|
||||
except ImportError:
|
||||
from distutils.core import setup
|
||||
from distutils.command.sdist import sdist as orig_sdist
|
||||
|
||||
|
||||
wa_dir = os.path.join(os.path.dirname(__file__), 'wa')
|
||||
wlauto_dir = os.path.join(os.path.dirname(__file__), 'wlauto')
|
||||
|
||||
sys.path.insert(0, os.path.join(wa_dir, 'framework'))
|
||||
from version import (get_wa_version, get_wa_version_with_commit,
|
||||
format_version, required_devlib_version)
|
||||
sys.path.insert(0, os.path.join(wlauto_dir, 'core'))
|
||||
from version import get_wa_version
|
||||
|
||||
# happens if falling back to distutils
|
||||
# happends if falling back to distutils
|
||||
warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
|
||||
warnings.filterwarnings('ignore', "Unknown distribution option: 'extras_require'")
|
||||
|
||||
@ -42,9 +39,9 @@ except OSError:
|
||||
pass
|
||||
|
||||
packages = []
|
||||
data_files = {'': [os.path.join(wa_dir, 'commands', 'postgres_schema.sql')]}
|
||||
data_files = {}
|
||||
source_dir = os.path.dirname(__file__)
|
||||
for root, dirs, files in os.walk(wa_dir):
|
||||
for root, dirs, files in os.walk(wlauto_dir):
|
||||
rel_dir = os.path.relpath(root, source_dir)
|
||||
data = []
|
||||
if '__init__.py' in files:
|
||||
@ -62,81 +59,43 @@ for root, dirs, files in os.walk(wa_dir):
|
||||
|
||||
scripts = [os.path.join('scripts', s) for s in os.listdir('scripts')]
|
||||
|
||||
with open("README.rst", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
devlib_version = format_version(required_devlib_version)
|
||||
params = dict(
|
||||
name='wlauto',
|
||||
description='A framework for automating workload execution and measurement collection on ARM devices.',
|
||||
long_description=long_description,
|
||||
version=get_wa_version_with_commit(),
|
||||
description='A framework for automating workload execution and measurment collection on ARM devices.',
|
||||
version=get_wa_version(),
|
||||
packages=packages,
|
||||
package_data=data_files,
|
||||
include_package_data=True,
|
||||
scripts=scripts,
|
||||
url='https://github.com/ARM-software/workload-automation',
|
||||
url='N/A',
|
||||
license='Apache v2',
|
||||
maintainer='ARM Architecture & Technology Device Lab',
|
||||
maintainer_email='workload-automation@arm.com',
|
||||
python_requires='>= 3.7',
|
||||
setup_requires=[
|
||||
'numpy<=1.16.4; python_version<"3"',
|
||||
'numpy; python_version>="3"',
|
||||
],
|
||||
install_requires=[
|
||||
'python-dateutil', # converting between UTC and local time.
|
||||
'pexpect>=3.3', # Send/receive to/from device
|
||||
'pexpect>=3.3', # Send/recieve to/from device
|
||||
'pyserial', # Serial port interface
|
||||
'colorama', # Printing with colors
|
||||
'pyYAML>=5.1b3', # YAML-formatted agenda parsing
|
||||
'requests', # Fetch assets over HTTP
|
||||
'devlib>={}'.format(devlib_version), # Interacting with devices
|
||||
'louie-latest', # callbacks dispatch
|
||||
'wrapt', # better decorators
|
||||
'pandas>=0.23.0,<=0.24.2; python_version<"3.5.3"', # Data analysis and manipulation
|
||||
'pandas>=0.23.0; python_version>="3.5.3"', # Data analysis and manipulation
|
||||
'future', # Python 2-3 compatiblity
|
||||
'pyYAML', # YAML-formatted agenda parsing
|
||||
'requests', # Fetch assets over HTTP
|
||||
],
|
||||
dependency_links=['https://github.com/ARM-software/devlib/tarball/master#egg=devlib-{}'.format(devlib_version)],
|
||||
extras_require={
|
||||
'test': ['nose', 'mock'],
|
||||
'other': ['jinja2', 'pandas>=0.13.1'],
|
||||
'test': ['nose'],
|
||||
'mongodb': ['pymongo'],
|
||||
'notify': ['notify2'],
|
||||
'doc': ['sphinx', 'sphinx_rtd_theme'],
|
||||
'postgres': ['psycopg2-binary'],
|
||||
'daq': ['daqpower'],
|
||||
'doc': ['sphinx'],
|
||||
},
|
||||
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Development Status :: 4 - Beta',
|
||||
'Environment :: Console',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
],
|
||||
)
|
||||
|
||||
all_extras = list(chain(iter(params['extras_require'].values())))
|
||||
params['extras_require']['all'] = all_extras
|
||||
|
||||
|
||||
class sdist(orig_sdist):
|
||||
|
||||
user_options = orig_sdist.user_options + [
|
||||
('strip-commit', 's',
|
||||
"Strip git commit hash from package version ")
|
||||
]
|
||||
|
||||
def initialize_options(self):
|
||||
orig_sdist.initialize_options(self)
|
||||
self.strip_commit = False
|
||||
|
||||
def run(self):
|
||||
if self.strip_commit:
|
||||
self.distribution.get_version = get_wa_version
|
||||
orig_sdist.run(self)
|
||||
|
||||
|
||||
params['cmdclass'] = {'sdist': sdist}
|
||||
all_extras = list(chain(params['extras_require'].itervalues()))
|
||||
params['extras_require']['everything'] = all_extras
|
||||
|
||||
setup(**params)
|
||||
|
@ -1,23 +0,0 @@
|
||||
config:
|
||||
iterations: 1
|
||||
augmentations:
|
||||
- ~~
|
||||
- status
|
||||
device: generic_local
|
||||
device_config:
|
||||
big_core: null
|
||||
core_clusters: null
|
||||
core_names: null
|
||||
executables_directory: null
|
||||
keep_password: true
|
||||
load_default_modules: false
|
||||
model: null
|
||||
modules: null
|
||||
password: null
|
||||
shell_prompt: !<tag:wa:regex> '40:^.*(shell|root|juno)@?.*:[/~]\S* *[#$] '
|
||||
unrooted: True
|
||||
working_directory: null
|
||||
workloads:
|
||||
- name: idle
|
||||
params:
|
||||
duration: 1
|
@ -1,6 +0,0 @@
|
||||
config:
|
||||
# tab on the following line
|
||||
reboot_policy: never
|
||||
workloads:
|
||||
- antutu
|
||||
|
@ -1,7 +0,0 @@
|
||||
config:
|
||||
augmentations: [~execution_time]
|
||||
include#: configs/test.yaml
|
||||
sections:
|
||||
- include#: sections/section1.yaml
|
||||
- include#: sections/section2.yaml
|
||||
include#: workloads.yaml
|
@ -1 +0,0 @@
|
||||
augmentations: [cpufreq, trace-cmd]
|
@ -1,2 +0,0 @@
|
||||
classifiers:
|
||||
included: true
|
@ -1 +0,0 @@
|
||||
classifiers: {'section': 'one'}
|
@ -1,2 +0,0 @@
|
||||
classifiers: {'section': 'two'}
|
||||
include#: ../section-include.yaml
|
@ -1,2 +0,0 @@
|
||||
augmentations: [execution_time]
|
||||
|
@ -1,5 +0,0 @@
|
||||
workloads:
|
||||
- dhrystone
|
||||
- name: memcpy
|
||||
classifiers:
|
||||
memcpy: True
|
@ -1,242 +0,0 @@
|
||||
# Copyright 2013-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
# pylint: disable=E0611
|
||||
# pylint: disable=R0201
|
||||
import os
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from unittest import TestCase
|
||||
|
||||
from nose.tools import assert_equal, assert_in, raises, assert_true
|
||||
|
||||
|
||||
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
|
||||
os.environ['WA_USER_DIRECTORY'] = os.path.join(DATA_DIR, 'includes')
|
||||
|
||||
from wa.framework.configuration.execution import ConfigManager
|
||||
from wa.framework.configuration.parsers import AgendaParser
|
||||
from wa.framework.exception import ConfigError
|
||||
from wa.utils.serializer import yaml
|
||||
from wa.utils.types import reset_all_counters
|
||||
|
||||
|
||||
YAML_TEST_FILE = os.path.join(DATA_DIR, 'test-agenda.yaml')
|
||||
YAML_BAD_SYNTAX_FILE = os.path.join(DATA_DIR, 'bad-syntax-agenda.yaml')
|
||||
INCLUDES_TEST_FILE = os.path.join(DATA_DIR, 'includes', 'agenda.yaml')
|
||||
|
||||
invalid_agenda_text = """
|
||||
workloads:
|
||||
- id: 1
|
||||
workload_parameters:
|
||||
test: 1
|
||||
"""
|
||||
|
||||
duplicate_agenda_text = """
|
||||
global:
|
||||
iterations: 1
|
||||
workloads:
|
||||
- id: 1
|
||||
workload_name: antutu
|
||||
workload_parameters:
|
||||
test: 1
|
||||
- id: "1"
|
||||
workload_name: benchmarkpi
|
||||
"""
|
||||
|
||||
short_agenda_text = """
|
||||
workloads: [antutu, dhrystone, benchmarkpi]
|
||||
"""
|
||||
|
||||
default_ids_agenda_text = """
|
||||
workloads:
|
||||
- antutu
|
||||
- id: wk1
|
||||
name: benchmarkpi
|
||||
- id: test
|
||||
name: dhrystone
|
||||
params:
|
||||
cpus: 1
|
||||
- vellamo
|
||||
"""
|
||||
|
||||
sectioned_agenda_text = """
|
||||
sections:
|
||||
- id: sec1
|
||||
runtime_params:
|
||||
dp: one
|
||||
workloads:
|
||||
- name: antutu
|
||||
workload_parameters:
|
||||
markers_enabled: True
|
||||
- benchmarkpi
|
||||
- name: dhrystone
|
||||
runtime_params:
|
||||
dp: two
|
||||
- id: sec2
|
||||
runtime_params:
|
||||
dp: three
|
||||
workloads:
|
||||
- antutu
|
||||
workloads:
|
||||
- memcpy
|
||||
"""
|
||||
|
||||
dup_sectioned_agenda_text = """
|
||||
sections:
|
||||
- id: sec1
|
||||
workloads:
|
||||
- antutu
|
||||
- id: sec1
|
||||
workloads:
|
||||
- benchmarkpi
|
||||
workloads:
|
||||
- memcpy
|
||||
"""
|
||||
|
||||
yaml_anchors_agenda_text = """
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
params: &dhrystone_single_params
|
||||
cleanup_assets: true
|
||||
cpus: 0
|
||||
delay: 3
|
||||
duration: 0
|
||||
mloops: 10
|
||||
threads: 1
|
||||
- name: dhrystone
|
||||
params:
|
||||
<<: *dhrystone_single_params
|
||||
threads: 4
|
||||
"""
|
||||
|
||||
|
||||
class AgendaTest(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
reset_all_counters()
|
||||
self.config = ConfigManager()
|
||||
self.parser = AgendaParser()
|
||||
|
||||
def test_yaml_load(self):
|
||||
self.parser.load_from_path(self.config, YAML_TEST_FILE)
|
||||
assert_equal(len(self.config.jobs_config.root_node.workload_entries), 4)
|
||||
|
||||
def test_duplicate_id(self):
|
||||
duplicate_agenda = yaml.load(duplicate_agenda_text)
|
||||
|
||||
try:
|
||||
self.parser.load(self.config, duplicate_agenda, 'test')
|
||||
except ConfigError as e:
|
||||
assert_in('duplicate', e.message.lower()) # pylint: disable=E1101
|
||||
else:
|
||||
raise Exception('ConfigError was not raised for an agenda with duplicate ids.')
|
||||
|
||||
def test_yaml_missing_field(self):
|
||||
invalid_agenda = yaml.load(invalid_agenda_text)
|
||||
|
||||
try:
|
||||
self.parser.load(self.config, invalid_agenda, 'test')
|
||||
except ConfigError as e:
|
||||
assert_in('workload name', e.message)
|
||||
else:
|
||||
raise Exception('ConfigError was not raised for an invalid agenda.')
|
||||
|
||||
def test_defaults(self):
|
||||
short_agenda = yaml.load(short_agenda_text)
|
||||
self.parser.load(self.config, short_agenda, 'test')
|
||||
|
||||
workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
assert_equal(len(workload_entries), 3)
|
||||
assert_equal(workload_entries[0].config['workload_name'], 'antutu')
|
||||
assert_equal(workload_entries[0].id, 'wk1')
|
||||
|
||||
def test_default_id_assignment(self):
|
||||
default_ids_agenda = yaml.load(default_ids_agenda_text)
|
||||
|
||||
self.parser.load(self.config, default_ids_agenda, 'test2')
|
||||
workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
assert_equal(workload_entries[0].id, 'wk2')
|
||||
assert_equal(workload_entries[3].id, 'wk3')
|
||||
|
||||
def test_sections(self):
|
||||
sectioned_agenda = yaml.load(sectioned_agenda_text)
|
||||
self.parser.load(self.config, sectioned_agenda, 'test')
|
||||
|
||||
root_node_workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
leaves = list(self.config.jobs_config.root_node.leaves())
|
||||
section1_workload_entries = leaves[0].workload_entries
|
||||
section2_workload_entries = leaves[0].workload_entries
|
||||
|
||||
assert_equal(root_node_workload_entries[0].config['workload_name'], 'memcpy')
|
||||
assert_true(section1_workload_entries[0].config['workload_parameters']['markers_enabled'])
|
||||
assert_equal(section2_workload_entries[0].config['workload_name'], 'antutu')
|
||||
|
||||
def test_yaml_anchors(self):
|
||||
yaml_anchors_agenda = yaml.load(yaml_anchors_agenda_text)
|
||||
self.parser.load(self.config, yaml_anchors_agenda, 'test')
|
||||
|
||||
workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
assert_equal(len(workload_entries), 2)
|
||||
assert_equal(workload_entries[0].config['workload_name'], 'dhrystone')
|
||||
assert_equal(workload_entries[0].config['workload_parameters']['threads'], 1)
|
||||
assert_equal(workload_entries[0].config['workload_parameters']['delay'], 3)
|
||||
assert_equal(workload_entries[1].config['workload_name'], 'dhrystone')
|
||||
assert_equal(workload_entries[1].config['workload_parameters']['threads'], 4)
|
||||
assert_equal(workload_entries[1].config['workload_parameters']['delay'], 3)
|
||||
|
||||
@raises(ConfigError)
|
||||
def test_dup_sections(self):
|
||||
dup_sectioned_agenda = yaml.load(dup_sectioned_agenda_text)
|
||||
self.parser.load(self.config, dup_sectioned_agenda, 'test')
|
||||
|
||||
@raises(ConfigError)
|
||||
def test_bad_syntax(self):
|
||||
self.parser.load_from_path(self.config, YAML_BAD_SYNTAX_FILE)
|
||||
|
||||
|
||||
class FakeTargetManager:
|
||||
|
||||
def merge_runtime_parameters(self, params):
|
||||
return params
|
||||
|
||||
def validate_runtime_parameters(self, params):
|
||||
pass
|
||||
|
||||
|
||||
class IncludesTest(TestCase):
|
||||
|
||||
def test_includes(self):
|
||||
from pprint import pprint
|
||||
parser = AgendaParser()
|
||||
cm = ConfigManager()
|
||||
tm = FakeTargetManager()
|
||||
|
||||
includes = parser.load_from_path(cm, INCLUDES_TEST_FILE)
|
||||
include_set = set([os.path.basename(i) for i in includes])
|
||||
assert_equal(include_set,
|
||||
set(['test.yaml', 'section1.yaml', 'section2.yaml',
|
||||
'section-include.yaml', 'workloads.yaml']))
|
||||
|
||||
job_classifiers = {j.id: j.classifiers
|
||||
for j in cm.jobs_config.generate_job_specs(tm)}
|
||||
assert_equal(job_classifiers,
|
||||
{
|
||||
's1-wk1': {'section': 'one'},
|
||||
's2-wk1': {'section': 'two', 'included': True},
|
||||
's1-wk2': {'section': 'one', 'memcpy': True},
|
||||
's2-wk2': {'section': 'two', 'included': True, 'memcpy': True},
|
||||
})
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user