mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-01-18 03:56:04 +00:00
Documentation: Update WA3 Documentation
Update the documentation and build system for producing documentation for WA3 with support for automatic building on readthedocs. Note: This is currently a WIP.
This commit is contained in:
parent
98bed3822a
commit
bc87eacde2
56
doc/Makefile
56
doc/Makefile
@ -11,14 +11,14 @@ SPHINXAPI = sphinx-apidoc
|
||||
SPHINXAPIOPTS =
|
||||
|
||||
WAEXT = ./build_plugin_docs.py
|
||||
WAEXTOPTS = source/plugins ../wlauto ../wlauto/external ../wlauto/tests
|
||||
WAEXTOPTS = source/plugins ../wa ../wa/tests ../wa/framework
|
||||
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
ALLSPHINXAPIOPTS = -f $(SPHINXAPIOPTS) -o source/api ../wlauto
|
||||
ALLSPHINXAPIOPTS = -f $(SPHINXAPIOPTS) -o source/api ../wa
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
|
||||
@ -58,52 +58,38 @@ coverage:
|
||||
@echo
|
||||
@echo "Build finished. The coverage reports are in $(BUILDDIR)/coverage."
|
||||
|
||||
api: ../wlauto
|
||||
rm -rf source/api/*
|
||||
$(SPHINXAPI) $(ALLSPHINXAPIOPTS)
|
||||
|
||||
waext: ../wlauto
|
||||
rm -rf source/plugins
|
||||
mkdir -p source/plugins
|
||||
$(WAEXT) $(WAEXTOPTS)
|
||||
|
||||
|
||||
sigtab: ../wlauto/core/instrumentation.py source/instrumentation_method_map.template
|
||||
rm -rf source/instrumentation_method_map.rst
|
||||
./build_instrumentation_method_map.py source/instrumentation_method_map.rst
|
||||
|
||||
html: api waext sigtab
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml: api waext sigtab
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml: api waext sigtab
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle: api waext sigtab
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json: api waext sigtab
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp: api waext sigtab
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp: api waext sigtab
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
@ -112,7 +98,7 @@ qthelp: api waext sigtab
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/WorkloadAutomation2.qhc"
|
||||
|
||||
devhelp: api
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@ -121,64 +107,64 @@ devhelp: api
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/WorkloadAutomation2"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub: api waext sigtab
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex: api waext sigtab
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf: api waext sigtab
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text: api waext sigtab
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man: api waext sigtab
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo: api waext sigtab
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info: api waext sigtab
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext: api waext sigtab
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes: api waext sigtab
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck: api waext sigtab
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest: api waext sigtab
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
@ -18,8 +18,8 @@ import sys
|
||||
import string
|
||||
from copy import copy
|
||||
|
||||
from wlauto.core.instrumentation import SIGNAL_MAP, PRIORITY_MAP
|
||||
from wlauto.utils.doc import format_simple_table
|
||||
from wa.framework.instrumentation import SIGNAL_MAP, Priority
|
||||
from wa.utils.doc import format_simple_table
|
||||
|
||||
|
||||
CONVINIENCE_ALIASES = ['initialize', 'setup', 'start', 'stop', 'process_workload_result',
|
||||
@ -36,7 +36,7 @@ def escape_trailing_underscore(value):
|
||||
def generate_instrumentation_method_map(outfile):
|
||||
signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.iteritems()],
|
||||
headers=['method name', 'signal'], align='<<')
|
||||
priority_table = format_simple_table([(escape_trailing_underscore(k), v) for k, v in PRIORITY_MAP.iteritems()],
|
||||
priority_table = format_simple_table(zip(Priority.names, Priority.values),
|
||||
headers=['prefix', 'priority'], align='<>')
|
||||
with open(OUTPUT_TEMPLATE_FILE) as fh:
|
||||
template = string.Template(fh.read())
|
||||
|
@ -26,9 +26,13 @@ from wa.utils.misc import capitalize
|
||||
|
||||
GENERATE_FOR_PLUGIN = ['workload', 'instrument', 'output_processor', 'target']
|
||||
|
||||
|
||||
def generate_plugin_documentation(source_dir, outdir, ignore_paths):
|
||||
pluginloader.clear()
|
||||
pluginloader.update(paths=[source_dir], ignore_paths=ignore_paths)
|
||||
if not os.path.exists(outdir):
|
||||
os.mkdir(outdir)
|
||||
|
||||
for ext_type in pluginloader.kinds:
|
||||
if not ext_type in GENERATE_FOR_PLUGIN:
|
||||
continue
|
||||
@ -59,6 +63,5 @@ def generate_config_documentation(config, outdir):
|
||||
wfh.write(get_params_rst(config.config_points))
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
generate_plugin_documentation(sys.argv[2], sys.argv[1], sys.argv[3:])
|
263
doc/make.bat
Normal file
263
doc/make.bat
Normal file
@ -0,0 +1,263 @@
|
||||
@ECHO OFF
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set BUILDDIR=_build
|
||||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
|
||||
set I18NSPHINXOPTS=%SPHINXOPTS% .
|
||||
if NOT "%PAPER%" == "" (
|
||||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
||||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
|
||||
)
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
if "%1" == "help" (
|
||||
:help
|
||||
echo.Please use `make ^<target^>` where ^<target^> is one of
|
||||
echo. html to make standalone HTML files
|
||||
echo. dirhtml to make HTML files named index.html in directories
|
||||
echo. singlehtml to make a single large HTML file
|
||||
echo. pickle to make pickle files
|
||||
echo. json to make JSON files
|
||||
echo. htmlhelp to make HTML files and a HTML help project
|
||||
echo. qthelp to make HTML files and a qthelp project
|
||||
echo. devhelp to make HTML files and a Devhelp project
|
||||
echo. epub to make an epub
|
||||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
||||
echo. text to make text files
|
||||
echo. man to make manual pages
|
||||
echo. texinfo to make Texinfo files
|
||||
echo. gettext to make PO message catalogs
|
||||
echo. changes to make an overview over all changed/added/deprecated items
|
||||
echo. xml to make Docutils-native XML files
|
||||
echo. pseudoxml to make pseudoxml-XML files for display purposes
|
||||
echo. linkcheck to check all external links for integrity
|
||||
echo. doctest to run all doctests embedded in the documentation if enabled
|
||||
echo. coverage to run coverage check of the documentation if enabled
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "clean" (
|
||||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
||||
del /q /s %BUILDDIR%\*
|
||||
goto end
|
||||
)
|
||||
|
||||
|
||||
REM Check if sphinx-build is available and fallback to Python version if any
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 goto sphinx_python
|
||||
goto sphinx_ok
|
||||
|
||||
:sphinx_python
|
||||
|
||||
set SPHINXBUILD=python -m sphinx.__init__
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.http://sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
:sphinx_ok
|
||||
|
||||
|
||||
if "%1" == "html" (
|
||||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "dirhtml" (
|
||||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "singlehtml" (
|
||||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pickle" (
|
||||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the pickle files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "json" (
|
||||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the JSON files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "htmlhelp" (
|
||||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run HTML Help Workshop with the ^
|
||||
.hhp project file in %BUILDDIR%/htmlhelp.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "qthelp" (
|
||||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
||||
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
||||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\devlib.qhcp
|
||||
echo.To view the help file:
|
||||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\devlib.ghc
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "devhelp" (
|
||||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "epub" (
|
||||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latex" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdf" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf
|
||||
cd %~dp0
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdfja" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf-ja
|
||||
cd %~dp0
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "text" (
|
||||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The text files are in %BUILDDIR%/text.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "man" (
|
||||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "texinfo" (
|
||||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "gettext" (
|
||||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "changes" (
|
||||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.The overview file is in %BUILDDIR%/changes.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "linkcheck" (
|
||||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Link check complete; look for any errors in the above output ^
|
||||
or in %BUILDDIR%/linkcheck/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "doctest" (
|
||||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of doctests in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/doctest/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "coverage" (
|
||||
%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of coverage in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/coverage/python.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "xml" (
|
||||
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The XML files are in %BUILDDIR%/xml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pseudoxml" (
|
||||
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
|
||||
goto end
|
||||
)
|
||||
|
||||
:end
|
1
doc/requirements.txt
Normal file
1
doc/requirements.txt
Normal file
@ -0,0 +1 @@
|
||||
nose
|
0
doc/source/_static/.gitignore
vendored
0
doc/source/_static/.gitignore
vendored
0
doc/source/_templates/.gitignore
vendored
0
doc/source/_templates/.gitignore
vendored
@ -1,101 +0,0 @@
|
||||
Additional Topics
|
||||
+++++++++++++++++
|
||||
|
||||
Modules
|
||||
=======
|
||||
|
||||
Modules are essentially plug-ins for Plugins. They provide a way of defining
|
||||
common and reusable functionality. An Plugin can load zero or more modules
|
||||
during its creation. Loaded modules will then add their capabilities (see
|
||||
Capabilities_) to those of the Plugin. When calling code tries to access an
|
||||
attribute of an Plugin the Plugin doesn't have, it will try to find the
|
||||
attribute among its loaded modules and will return that instead.
|
||||
|
||||
.. note:: Modules are themselves plugins, and can therefore load their own
|
||||
modules. *Do not* abuse this.
|
||||
|
||||
For example, calling code may wish to reboot an unresponsive device by calling
|
||||
``device.hard_reset()``, but the ``Device`` in question does not have a
|
||||
``hard_reset`` method; however the ``Device`` has loaded ``netio_switch``
|
||||
module which allows to disable power supply over a network (say this device
|
||||
is in a rack and is powered through such a switch). The module has
|
||||
``reset_power`` capability (see Capabilities_ below) and so implements
|
||||
``hard_reset``. This will get invoked when ``device.hard_rest()`` is called.
|
||||
|
||||
.. note:: Modules can only extend Plugins with new attributes; they cannot
|
||||
override existing functionality. In the example above, if the
|
||||
``Device`` has implemented ``hard_reset()`` itself, then *that* will
|
||||
get invoked irrespective of which modules it has loaded.
|
||||
|
||||
If two loaded modules have the same capability or implement the same method,
|
||||
then the last module to be loaded "wins" and its method will be invoke,
|
||||
effectively overriding the module that was loaded previously.
|
||||
|
||||
Specifying Modules
|
||||
------------------
|
||||
|
||||
Modules get loaded when an Plugin is instantiated by the plugin loader.
|
||||
There are two ways to specify which modules should be loaded for a device.
|
||||
|
||||
|
||||
Capabilities
|
||||
============
|
||||
|
||||
Capabilities define the functionality that is implemented by an Plugin,
|
||||
either within the Plugin itself or through loadable modules. A capability is
|
||||
just a label, but there is an implied contract. When an Plugin claims to have
|
||||
a particular capability, it promises to expose a particular set of
|
||||
functionality through a predefined interface.
|
||||
|
||||
Currently used capabilities are described below.
|
||||
|
||||
.. note:: Since capabilities are basically random strings, the user can always
|
||||
define their own; and it is then up to the user to define, enforce and
|
||||
document the contract associated with their capability. Below, are the
|
||||
"standard" capabilities used in WA.
|
||||
|
||||
|
||||
.. note:: The method signatures in the descriptions below show the calling
|
||||
signature (i.e. they're omitting the initial self parameter).
|
||||
|
||||
active_cooling
|
||||
--------------
|
||||
|
||||
Intended to be used by devices and device modules, this capability implies
|
||||
that the device implements a controllable active cooling solution (e.g.
|
||||
a programmable fan). The device/module must implement the following methods:
|
||||
|
||||
start_active_cooling()
|
||||
Active cooling is started (e.g. the fan is turned on)
|
||||
|
||||
stop_active_cooling()
|
||||
Active cooling is stopped (e.g. the fan is turned off)
|
||||
|
||||
|
||||
reset_power
|
||||
-----------
|
||||
|
||||
Intended to be used by devices and device modules, this capability implies
|
||||
that the device is capable of performing a hard reset by toggling power. The
|
||||
device/module must implement the following method:
|
||||
|
||||
hard_reset()
|
||||
The device is restarted. This method cannot rely on the device being
|
||||
responsive and must work even if the software on the device has crashed.
|
||||
|
||||
|
||||
flash
|
||||
-----
|
||||
|
||||
Intended to be used by devices and device modules, this capability implies
|
||||
that the device can be flashed with new images. The device/module must
|
||||
implement the following method:
|
||||
|
||||
flash(image_bundle=None, images=None)
|
||||
``image_bundle`` is a path to a "bundle" (e.g. a tarball) that contains
|
||||
all the images to be flashed. Which images go where must also be defined
|
||||
within the bundle. ``images`` is a dict mapping image destination (e.g.
|
||||
partition name) to the path to that specific image. Both
|
||||
``image_bundle`` and ``images`` may be specified at the same time. If
|
||||
there is overlap between the two, ``images`` wins and its contents will
|
||||
be flashed in preference to the ``image_bundle``.
|
7
doc/source/api_reference.rst
Normal file
7
doc/source/api_reference.rst
Normal file
@ -0,0 +1,7 @@
|
||||
API Reference
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 5
|
||||
|
||||
api/wa
|
@ -1,399 +1,27 @@
|
||||
=================================
|
||||
What's New in Workload Automation
|
||||
=================================
|
||||
-------------
|
||||
Version 2.4.0
|
||||
-------------
|
||||
|
||||
Additions:
|
||||
##########
|
||||
|
||||
Devices
|
||||
~~~~~~~~
|
||||
- ``gem5_linux`` and ``gem5_android``: Interfaces for Gem5 simulation
|
||||
environment running Linux and Android respectively.
|
||||
- ``XE503C1211``: Interface for Samsung XE503C12 Chromebooks.
|
||||
- ``chromeos_test_image``: Chrome OS test image device. An off the shelf
|
||||
device will not work with this device interface.
|
||||
|
||||
Instruments
|
||||
~~~~~~~~~~~~
|
||||
- ``freq_sweep``: Allows "sweeping" workloads across multiple CPU frequencies.
|
||||
- ``screenon``: Ensures screen is on, before each iteration, or periodically
|
||||
on Android devices.
|
||||
- ``energy_model``: This instrument can be used to generate an energy model
|
||||
for a device based on collected power and performance measurments.
|
||||
- ``netstats``: Allows monitoring data sent/received by applications on an
|
||||
Android device.
|
||||
|
||||
Modules
|
||||
~~~~~~~
|
||||
- ``cgroups``: Allows query and manipulation of cgroups controllers on a Linux
|
||||
device. Currently, only cpusets controller is implemented.
|
||||
- ``cpuidle``: Implements cpuidle state discovery, query and manipulation for
|
||||
a Linux device. This replaces the more primitive get_cpuidle_states method
|
||||
of LinuxDevice.
|
||||
- ``cpufreq`` has now been split out into a device module
|
||||
|
||||
Reasource Getters
|
||||
~~~~~~~~~~~~~~~~~
|
||||
- ``http_assets``: Downloads resources from a web server.
|
||||
|
||||
Results Processors
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
- ``ipynb_exporter``: Generates an IPython notebook from a template with the
|
||||
results and runs it.
|
||||
- ``notify``: Displays a desktop notification when a run finishes
|
||||
(Linux only).
|
||||
- ``cpustates``: Processes power ftrace to produce CPU state and parallelism
|
||||
stats. There is also a script to invoke this outside of WA.
|
||||
|
||||
Workloads
|
||||
~~~~~~~~~
|
||||
- ``telemetry``: Executes Google's Telemetery benchmarking framework
|
||||
- ``hackbench``: Hackbench runs tests on the Linux scheduler
|
||||
- ``ebizzy``: This workload resembles common web server application workloads.
|
||||
- ``power_loadtest``: Continuously cycles through a set of browser-based
|
||||
activities and monitors battery drain on a device (part of ChromeOS autotest
|
||||
suite).
|
||||
- ``rt-app``: Simulates configurable real-time periodic load.
|
||||
- ``linpack-cli``: Command line version of linpack benchmark.
|
||||
- ``lmbench``: A suite of portable ANSI/C microbenchmarks for UNIX/POSIX.
|
||||
- ``stream``: Measures memory bandwidth.
|
||||
- ``iozone``: Runs a series of disk I/O performance tests.
|
||||
- ``androbench``: Measures the storage performance of device.
|
||||
- ``autotest``: Executes tests from ChromeOS autotest suite.
|
||||
|
||||
Framework
|
||||
~~~~~~~~~
|
||||
- ``wlauto.utils``:
|
||||
- Added ``trace_cmd``, a generic trace-cmd paraser.
|
||||
- Added ``UbootMenu``, allows navigating Das U-boot menu over serial.
|
||||
- ``wlauto.utils.types``:
|
||||
- ``caseless_string``: Behaves exactly like a string, except this ignores
|
||||
case in comparisons. It does, however, preserve case.
|
||||
- ``list_of``: allows dynamic generation of type-safe list types based on
|
||||
an existing type.
|
||||
- ``arguments``: represents arguments that are passed on a command line to
|
||||
an application.
|
||||
- ``list-or``: allows dynamic generation of types that accept either a base
|
||||
type or a list of base type. Using this ``list_or_integer``,
|
||||
``list_or_number`` and ``list_or_bool`` were also added.
|
||||
- ``wlauto.core.configuration.WorkloadRunSpec``:
|
||||
- ``copy``: Allows making duplicates of ``WorkloadRunSpec``'s
|
||||
- ``wlatuo.utils.misc``:
|
||||
- ``list_to_ranges`` and ``ranges_to_list``: convert between lists of
|
||||
integers and corresponding range strings, e.g. between [0,1,2,4] and
|
||||
'0-2,4'
|
||||
- ``list_to_mask`` and ``mask_to_list``: convert between lists of integers
|
||||
and corresponding integer masks, e.g. between [0,1,2,4] and 0x17
|
||||
- ``wlauto.instrumentation``:
|
||||
- ``instrument_is_enabled``: Returns whether or not an instrument is
|
||||
enabled for the current job.
|
||||
- ``wlauto.core.result``:
|
||||
- Added "classifiers" field to Metric objects. This is a dict mapping
|
||||
classifier names (arbitrary strings) to corresponding values for that
|
||||
specific metrics. This is to allow plugins to add plugin-specific
|
||||
annotations to metric that could be handled in a generic way (e.g. by
|
||||
result processors). They can also be set in agendas.
|
||||
- Failed jobs will now be automatically retired
|
||||
- Implemented dynamic device modules that may be loaded automatically on
|
||||
device initialization if the device supports them.
|
||||
- Added support for YAML configs.
|
||||
- Added ``initialze`` and ``finalize`` methods to workloads.
|
||||
- ``wlauto.core.ExecutionContext``:
|
||||
- Added ``job_status`` property that returns the status of the currently
|
||||
running job.
|
||||
|
||||
Fixes/Improvements
|
||||
##################
|
||||
|
||||
Devices
|
||||
~~~~~~~~
|
||||
- ``tc2``: Workaround for buffer overrun when loading large initrd blob.
|
||||
- ``juno``:
|
||||
- UEFI config can now be specified as a parameter.
|
||||
- Adding support for U-Boot booting.
|
||||
- No longer auto-disconnects ADB at the end of a run.
|
||||
- Added ``actually_disconnect`` to restore old disconnect behaviour
|
||||
- Now passes ``video`` command line to Juno kernel to work around a known
|
||||
issue where HDMI loses sync with monitors.
|
||||
- Fixed flashing.
|
||||
|
||||
Instruments
|
||||
~~~~~~~~~~~
|
||||
- ``trace_cmd``:
|
||||
- Fixed ``buffer_size_file`` for non-Android devices
|
||||
- Reduce starting priority.
|
||||
- Now handles trace headers and thread names with spaces
|
||||
- ``energy_probe``: Added ``device_entry`` parameter.
|
||||
- ``hwmon``:
|
||||
- Sensor discovery is now done only at the start of a run.
|
||||
- Now prints both before/after and mean temperatures.
|
||||
- ``daq``:
|
||||
- Now reports energy
|
||||
- Fixed file descriptor leak
|
||||
- ``daq_power.csv`` now matches the order of labels (if specified).
|
||||
- Added ``gpio_sync``. When enabled, this wil cause the instrument to
|
||||
insert a marker into ftrace, while at the same time setting a GPIO pin
|
||||
high.
|
||||
- Added ``negative_values`` parameter. which can be used to specify how
|
||||
negative values in the samples should be handled.
|
||||
- Added ``merge_channels`` parameter. When set DAQ channel will be summed
|
||||
together.
|
||||
- Workload labels, rather than names, are now used in the "workload"
|
||||
column.
|
||||
- ``cpufreq``:
|
||||
- Fixes missing directories problem.
|
||||
- Refined the availability check not to rely on the top-level cpu/cpufreq
|
||||
directory
|
||||
- Now handles non-integer output in ``get_available_frequencies``.
|
||||
- ``sysfs_extractor``:
|
||||
- No longer raises an error when both device and host paths are empty.
|
||||
- Fixed pulled files verification.
|
||||
- ``perf``:
|
||||
- Updated binaries.
|
||||
- Added option to force install.
|
||||
- ``killall`` is now run as root on rooted Android devices.
|
||||
- ``fps``:
|
||||
- now generates detailed FPS traces as well as report average FPS.
|
||||
- Updated jank calcluation to only count "large" janks.
|
||||
- Now filters out bogus ``actual-present`` times and ignore janks above
|
||||
``PAUSE_LATENCY``
|
||||
- ``delay``:
|
||||
- Added ``fixed_before_start`` parameter.
|
||||
- Changed existing ``*_between_specs`` and ``*_between_iterations``
|
||||
callbacks to be ``very_slow``
|
||||
- ``streamline``:
|
||||
- Added Linux support
|
||||
- ``gatord`` is now only started once at the start of the run.
|
||||
|
||||
modules
|
||||
~~~~~~~
|
||||
- ``flashing``:
|
||||
- Fixed vexpress flashing
|
||||
- Added an option to keep UEFI entry
|
||||
|
||||
Result Processors
|
||||
~~~~~~~~~~~~~~~~~
|
||||
- ``cpustate``:
|
||||
- Now generates a timeline csv as well as stats.
|
||||
- Adding ID to overall cpustate reports.
|
||||
- ``csv``: (partial) ``results.csv`` will now be written after each iteration
|
||||
rather than at the end of the run.
|
||||
|
||||
Workloads
|
||||
~~~~~~~~~
|
||||
- ``glb_corporate``: clears logcat to prevent getting results from previous
|
||||
run.
|
||||
- ``sysbench``:
|
||||
- Updated sysbench binary to a statically linked verison
|
||||
- Added ``file_test_mode parameter`` - this is a mandatory argumet if
|
||||
``test`` is ``"fileio"``.
|
||||
- Added ``cmd_params`` parameter to pass options directily to sysbench
|
||||
invocation.
|
||||
- Removed Android browser launch and shutdown from workload (now runs on
|
||||
both Linux and Android).
|
||||
- Now works with unrooted devices.
|
||||
- Added the ability to run based on time.
|
||||
- Added a parameter to taskset to specific core(s).
|
||||
- Added ``threads`` parameter to be consistent with dhrystone.
|
||||
- Fixed case where default ``timeout`` < ``max_time``.
|
||||
- ``Dhrystone``:
|
||||
- added ``taskset_mask`` parameter to allow pinning to specific cores.
|
||||
- Now kills any running instances during setup (also handles CTRL-C).
|
||||
- ``sysfs_extractor``: Added parameter to explicitly enable/disable tempfs
|
||||
caching.
|
||||
- ``antutu``:
|
||||
- Fixed multi-``times`` playback for v5.
|
||||
- Updated result parsing to handle Android M logcat output.
|
||||
- ``geekbench``: Increased timout to cater for slower devices.
|
||||
- ``idle``: Now works on Linux devices.
|
||||
- ``manhattan``: Added ``run_timemout`` parameter.
|
||||
- ``bbench``: Now works when binaries_directory is not in path.
|
||||
- ``nemamark``: Made duration configurable.
|
||||
|
||||
Framework
|
||||
~~~~~~~~~~
|
||||
- ``BaseLinuxDevice``:
|
||||
- Now checks that at least one core is enabled on another cluster before
|
||||
attempting to set number of cores on a cluster to ``0``.
|
||||
- No longer uses ``sudo`` if already logged in as ``root``.
|
||||
- Now saves ``dumpsys window`` output to the ``__meta`` directory.
|
||||
- Now takes ``password_prompt`` as a parameter for devices with a non
|
||||
standard ``sudo`` password prompt.
|
||||
- No longer raises an error if ``keyfile`` or ``password`` are not
|
||||
provided when they are not necessary.
|
||||
- Added new cpufreq APIs:
|
||||
- ``core`` APIs take a core name as the parameter (e.g. "a15")
|
||||
- ``cluster`` APIs take a numeric cluster ID (eg. 0)
|
||||
- ``cpu`` APIs take a cpufreq cpu ID as a parameter.
|
||||
- ``set_cpu_frequency`` now has a ``exact`` parameter. When true (the
|
||||
default) it will produce an error when the specified frequency is not
|
||||
supported by the cpu, otherwise cpufreq will decide what to do.
|
||||
- Added ``{core}_frequency`` runtime parameter to set cluster frequency.
|
||||
- Added ``abi`` property.
|
||||
- ``get_properties`` moved from ``LinuxDevice``, meaning ``AndroidDevice``
|
||||
will try to pull the same files. Added more paths to pull by default
|
||||
too.
|
||||
- fixed ``list_file_systems`` for Android M and Linux devices.
|
||||
- Now sets ``core_clusters`` from ``core_names`` if not explicitly
|
||||
specified.
|
||||
- Added ``invoke`` method that allows invoking an executable on the device
|
||||
under controlled contions (e.g. within a particular directory, or
|
||||
taskset to specific CPUs).
|
||||
- No longer attempts to ``get_sysfile_value()`` as root on unrooted
|
||||
devices.
|
||||
- ``LinuxDevice``:
|
||||
- Now creates ``binaries_directory`` path if it doesn't exist.
|
||||
- Fixed device reset
|
||||
- Fixed ``file_exists``
|
||||
- implemented ``get_pid_of()`` and ``ps()``. Existing implementation
|
||||
relied on Android version of ps.
|
||||
- ``listdir`` will now return an empty list for an empty directory
|
||||
instead of a list containing a single empty string.
|
||||
- ``AndroidDevice``:
|
||||
- Executable (un)installation now works on unrooted devices.
|
||||
- Now takes into account ``binar_directory`` when setting up busybox path.
|
||||
- update ``android_prompt`` so that it works even if is not ``"/"``
|
||||
- ``adb_connect``: do not assume port 5555 anymore.
|
||||
- Now always deploys busybox on rooted devices.
|
||||
- Added ``swipe_to_unlock`` method.
|
||||
- Fixed initialization of ``~/.workload_automation.``.
|
||||
- Fixed replaying events using revent on 64 bit platforms.
|
||||
- Improved error repoting when loading plugins.
|
||||
- ``result`` objects now track their output directories.
|
||||
- ``context.result`` will not result in ``context.run_result`` when not
|
||||
executing a job.
|
||||
- ``wlauto.utils.ssh``:
|
||||
- Fixed key-based authentication.
|
||||
- Fixed carriage return stripping in ssh.
|
||||
- Now takes ``password_prompt`` as a parameter for non standard ``sudo``
|
||||
password prompts.
|
||||
- Now with 100% more thread safety!
|
||||
- If a timeout condition is hit, ^C is now sent to kill the current
|
||||
foreground process and make the shell available for subsequent commands.
|
||||
- More robust ``exit_code`` handling for ssh interface
|
||||
- Now attempts to deal with dropped connections
|
||||
- Fixed error reporting on failed exit code extraction.
|
||||
- Now handles backspaces in serial output
|
||||
- Added ``port`` argument for telnet connections.
|
||||
- Now allows telnet connections without a password.
|
||||
- Fixed config processing for plugins with non-identifier names.
|
||||
- Fixed ``get_meansd`` for numbers < 1
|
||||
- ``wlatuo.utils.ipython``:
|
||||
- Now supports old versions of IPython
|
||||
- Updated version check to only initialize ipython utils if version is
|
||||
< 4.0.0. Version 4.0.0 changes API and breaks WA's usage of it.
|
||||
- Added ``ignore`` parameter to ``check_output``
|
||||
- Agendas:
|
||||
- Now raise an error if an agenda contains duplicate keys
|
||||
- Now raise an error if config section in an agenda is not dict-like
|
||||
- Now properly handles ``core_names`` and ``core_clusters``
|
||||
- When merging list parameters from different sources, duplicates are no
|
||||
longer removed.
|
||||
- The ``INITIAL_BOOT`` signal is now sent went performing a hard reset during
|
||||
intial boot
|
||||
- updated ``ExecutionContext`` to keep a reference to the ``runner``. This
|
||||
will enable Extenstions to do things like modify the job queue.
|
||||
- Parameter now automatically convert int and boot kinds to integer and
|
||||
boolean respectively, this behavior can be supressed by specifying
|
||||
``convert_types``=``False`` when defining the parameter.
|
||||
- Fixed resource resolution when dependency location does not exist.
|
||||
- All device ``push`` and ``pull`` commands now raise ``DeviceError`` if they
|
||||
didn't succeed.
|
||||
- Fixed showing Parameter default of ``False`` for boolean values.
|
||||
- Updated csv result processor with the option to use classifiers to
|
||||
add columns to ``results.csv``.
|
||||
- ``wlauto.utils.formatter``: Fix terminal size discovery.
|
||||
- The plugin loader will now follow symlinks.
|
||||
- Added arm64-v8a to ABI map
|
||||
- WA now reports syntax errors in a more informative way.
|
||||
- Resource resolver: now prints the path of the found resource to the log.
|
||||
- Resource getter: look for executable in the bin/ directory under resource
|
||||
owner's dependencies directory as well as general dependencies bin.
|
||||
- ``GamingWorkload``:
|
||||
- Added an option to prevent clearing of package data before execution.
|
||||
- Added the ability to override the timeout of deploying the assets
|
||||
tarball.
|
||||
- ``ApkWorkload``: Added an option to skip host-side APK check entirely.
|
||||
- ``utils.misc.normalize``: only normalize string keys.
|
||||
- Better error reporting for subprocess.CalledProcessError
|
||||
- ``boolean`` now interprets ``'off'`` as ``False``
|
||||
- ``wlauto.utils.uefi``: Added support for debug builds.
|
||||
- ``wlauto.utils.serial_port``: Now supports fdexpect versions > 4.0.0
|
||||
- Semanatics for ``initialize``/``finalize`` for *all* Plugins are changed
|
||||
so that now they will always run at most once per run. They will not be
|
||||
executed twice even if invoked via instances of different subclasses (if
|
||||
those subclasses defined their own verions, then their versions will be
|
||||
invoked once each, but the base version will only get invoked once).
|
||||
- Pulling entries from procfs does not work on some platforms. WA now tries
|
||||
to cat the contents of a property_file and write it to a output file on the
|
||||
host.
|
||||
|
||||
Documentation
|
||||
~~~~~~~~~~~~~
|
||||
- ``installation``:
|
||||
- Added ``post install`` section which lists workloads that require
|
||||
additional external dependencies.
|
||||
- Added the ``uninstall`` and ``upgrade`` commands for users to remove or
|
||||
upgrade Workload Automation.
|
||||
- Added documentation explaining how to use ``remote_assets_path``
|
||||
setting.
|
||||
- Added warning about potential permission issues with pip.
|
||||
- ``quickstart``: Added steps for setting up WA to run on Linux devices.
|
||||
- ``device_setup``: fixed ``generic_linux`` ``device_config`` example.
|
||||
- ``contributing``: Clarified style guidelines
|
||||
- ``daq_device_setup``: Added an illustration for DAQ wiring.
|
||||
- ``writing_plugins``: Documented the Workload initialize and finalize
|
||||
methods.
|
||||
- Added descriptions to plugin that didn't have one.
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
- ``daq_server``:
|
||||
- Fixed showing available devices.
|
||||
- Now works with earlier versions of the DAQmx driver.thus you can now run
|
||||
the server on Linux systems.
|
||||
- DAQ error messages are now properly propaged to the client.
|
||||
- Server will now periodically clean up uncollected files.
|
||||
- fixed not being able to resolve IP address for hostname
|
||||
(report "localhost" in that case).
|
||||
- Works with latest version of twisted.
|
||||
- ``setup.py``: Fixed paths to work with Mac OS X.
|
||||
- ``summary_csv`` is no longer enabled by default.
|
||||
- ``status`` result processor is now enabled by default.
|
||||
- Commands:
|
||||
- ``show``:
|
||||
- Now shows what platform plugins support.
|
||||
- Will no longer try to use a pager if ``PAGER=''`` in the environment.
|
||||
- ``list``:
|
||||
- Added ``"-p"`` option to filter results by supported platforms.
|
||||
- Added ``"--packaged-only"`` option to only list plugins packaged
|
||||
with WA.
|
||||
- ``run``: Added ``"--disable"`` option to diable instruments.
|
||||
- ``create``:
|
||||
- Added ``agenda`` sub-command to generate agendas for a set of
|
||||
plugins.
|
||||
- ``create workload`` now gives more informative errors if Android SDK
|
||||
installed but no platform has been downloaded.
|
||||
|
||||
Incompatible changes
|
||||
####################
|
||||
|
||||
Framework
|
||||
~~~~~~~~~
|
||||
- ``BaseLinuxDevice``:
|
||||
- Renamed ``active_cpus`` to ``online_cpus``
|
||||
- Renamed ``get_cluster_cpu`` to ``get_cluster_active_cpu``
|
||||
- Renamed ``get_core_cpu`` to ``get_core_online_cpu``
|
||||
- All plugin's ``initialize`` function now takes one (and only one)
|
||||
parameter, ``context``.
|
||||
- ``wlauto.core.device``: Removed ``init`` function. Replaced with
|
||||
``initialize``
|
||||
|
||||
-------------
|
||||
Version 2.3.0
|
||||
Version 3.0
|
||||
-------------
|
||||
|
||||
- First publicly-released version.
|
||||
WA3 is a re-write of WA2 therefore please note that while backwards compatibility
|
||||
has attempted to be maintained where possible, there maybe breaking
|
||||
changes moving from WA2 to WA3.
|
||||
|
||||
- Changes:
|
||||
- Configuration files ``config.py`` are now specified in YAML format in
|
||||
``config.yaml``. WA3 has support for automatic conversion of the default
|
||||
config file and will be performed upon first invocation of WA3.
|
||||
- The "config" and "global" sections in an agenda are not interchangeable so can all be specified in a "config" section.
|
||||
- "Results Processors" are now known as "Output Processors" and can now be ran offline.
|
||||
- "Instrumentation" is now know as "Instruments" for more consistent naming.
|
||||
- "Both "Output Processor" and "Instrument" configuration has been merged into "Augmentations" (support for the old naming schemes have been retained for backwards compatibility)
|
||||
|
||||
|
||||
- New features:
|
||||
- There is a new Output API which can be used to aid in post processing a run's output. For more information please see :ref:`output-api`.
|
||||
- All "augmentations" can now be enabled on a per workload basis.
|
||||
|
||||
For more information on migrating from WA2 to WA3 please see the :ref:`migration-guide`.
|
||||
|
@ -1,11 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 ARM Limited
|
||||
#
|
||||
# Copyright 2018 ARM Limited
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# WA3 documentation build configuration file.
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -13,12 +11,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#
|
||||
# Workload Automation 2 documentation build configuration file, created by
|
||||
# sphinx-quickstart on Mon Jul 15 09:00:46 2013.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its containing dir.
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
@ -26,33 +20,43 @@
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys, os
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings('ignore', "Module louie was already imported")
|
||||
import sys
|
||||
import os
|
||||
import shlex
|
||||
from sphinx.apidoc import main
|
||||
|
||||
this_dir = os.path.dirname(__file__)
|
||||
sys.path.insert(0, os.path.join(this_dir, '..'))
|
||||
sys.path.insert(0, os.path.join(this_dir, '../..'))
|
||||
import wlauto
|
||||
import wa
|
||||
from build_plugin_docs import (generate_plugin_documentation,
|
||||
generate_run_config_documentation,
|
||||
generate_meta_config_documentation)
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.viewcode',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ['static/templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst', '.md']
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
@ -62,21 +66,25 @@ source_suffix = '.rst'
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Workload Automation'
|
||||
copyright = u'2013, ARM Ltd'
|
||||
project = u'wa'
|
||||
copyright = u'2018, ARM Limited'
|
||||
author = u'ARM Limited'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = wlauto.__version__
|
||||
version = wa.framework.version.get_wa_version()
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = wlauto.__version__
|
||||
release = wa.framework.version.get_wa_version()
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
@ -86,9 +94,11 @@ release = wlauto.__version__
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['**/*-example']
|
||||
exclude_patterns = ['../build', 'developer_reference', 'user_reference',
|
||||
'how_tos', 'run_config']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
@ -108,12 +118,18 @@ pygments_style = 'sphinx'
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'classic'
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
@ -142,7 +158,12 @@ html_theme = 'classic'
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
html_static_path = ['static']
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
@ -185,11 +206,24 @@ html_static_path = ['_static']
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Language to be used for generating the HTML full-text search index.
|
||||
# Sphinx supports the following languages:
|
||||
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
|
||||
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
|
||||
#html_search_language = 'en'
|
||||
|
||||
# A dictionary with options for the search language support, empty by default.
|
||||
# Now only 'ja' uses this config value
|
||||
#html_search_options = {'type': 'default'}
|
||||
|
||||
# The name of a javascript file (relative to the configuration directory) that
|
||||
# implements a search results scorer. If empty, the default will be used.
|
||||
#html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'WorkloadAutomationdoc'
|
||||
htmlhelp_basename = 'wadoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
@ -200,13 +234,17 @@ latex_elements = {
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
('index', 'WorkloadAutomation.tex', u'Workload Automation Documentation',
|
||||
u'WA Mailing List \\textless{}workload-automation@arm.com\\textgreater{},Sergei Trofimov \\textless{}sergei.trofimov@arm.com\\textgreater{}, Vasilis Flouris \\textless{}vasilis.flouris@arm.com\\textgreater{}, Mohammed Binsabbar \\textless{}mohammed.binsabbar@arm.com\\textgreater{}', 'manual'),
|
||||
(master_doc, 'wa.tex', u'wa Documentation',
|
||||
u'Arm Limited', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
@ -230,27 +268,27 @@ latex_documents = [
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output --------------------------------------------
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'workloadautomation', u'Workload Automation Documentation',
|
||||
[u'WA Mailing List <workload-automation@arm.com>, Sergei Trofimov <sergei.trofimov@arm.com>, Vasilis Flouris <vasilis.flouris@arm.com>'], 1)
|
||||
(master_doc, 'wa', u'wa Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output ------------------------------------------------
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'WorkloadAutomation', u'Workload Automation Documentation',
|
||||
u'WA Mailing List <workload-automation@arm.com>, Sergei Trofimov <sergei.trofimov@arm.com>, Vasilis Flouris <vasilis.flouris@arm.com>', 'WorkloadAutomation', 'A framwork for automationg workload execution on mobile devices.',
|
||||
(master_doc, 'wa', u'wa Documentation',
|
||||
author, 'wa', 'A framework for automating workload execution on mobile devices.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
@ -263,8 +301,25 @@ texinfo_documents = [
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
|
||||
def run_apidoc(_):
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
||||
cur_dir = os.path.abspath(os.path.dirname(__file__))
|
||||
api_output = os.path.join(cur_dir, 'api')
|
||||
module = os.path.join(cur_dir, '..', '..', 'wa')
|
||||
main(['-f', '-o', api_output, module, '--force'])
|
||||
|
||||
def setup(app):
|
||||
module_dir = os.path.join('..', '..', 'wa')
|
||||
excluded_extensions = [os.path.join(module_dir, 'framework'),
|
||||
os.path.join(module_dir, 'tests')]
|
||||
os.chdir(os.path.dirname(__file__))
|
||||
app.connect('builder-inited', run_apidoc)
|
||||
generate_plugin_documentation(module_dir, 'plugins', excluded_extensions)
|
||||
generate_run_config_documentation('run_config')
|
||||
generate_meta_config_documentation('run_config')
|
||||
app.add_object_type('confval', 'confval',
|
||||
objname='configuration value',
|
||||
indextemplate='pair: %s; configuration value')
|
||||
|
@ -1,220 +0,0 @@
|
||||
.. _configuration-specification:
|
||||
|
||||
=============
|
||||
Configuration
|
||||
=============
|
||||
|
||||
In addition to specifying run execution parameters through an agenda, the
|
||||
behavior of WA can be modified through configuration file(s). The default
|
||||
configuration file is ``~/.workload_automation/config.py`` (the location can be
|
||||
changed by setting ``WA_USER_DIRECTORY`` environment variable, see :ref:`envvars`
|
||||
section below). This file will be
|
||||
created when you first run WA if it does not already exist. This file must
|
||||
always exist and will always be loaded. You can add to or override the contents
|
||||
of that file on invocation of Workload Automation by specifying an additional
|
||||
configuration file using ``--config`` option.
|
||||
|
||||
The config file is just a Python source file, so it can contain any valid Python
|
||||
code (though execution of arbitrary code through the config file is
|
||||
discouraged). Variables with specific names will be picked up by the framework
|
||||
and used to modify the behavior of Workload automation.
|
||||
|
||||
.. note:: As of version 2.1.3 is also possible to specify the following
|
||||
configuration in the agenda. See :ref:`configuration in an agenda <configuration_in_agenda>`\ .
|
||||
|
||||
|
||||
.. _available_settings:
|
||||
|
||||
Available Settings
|
||||
==================
|
||||
|
||||
.. note:: Plugins such as workloads, instrumentation or result processors
|
||||
may also pick up certain settings from this file, so the list below is
|
||||
not exhaustive. Please refer to the documentation for the specific
|
||||
plugins to see what settings they accept.
|
||||
|
||||
.. confval:: device
|
||||
|
||||
This setting defines what specific Device subclass will be used to interact
|
||||
the connected device. Obviously, this must match your setup.
|
||||
|
||||
.. confval:: device_config
|
||||
|
||||
This must be a Python dict containing setting-value mapping for the
|
||||
configured :rst:dir:`device`. What settings and values are valid is specific
|
||||
to each device. Please refer to the documentation for your device.
|
||||
|
||||
.. confval:: reboot_policy
|
||||
|
||||
This defines when during execution of a run the Device will be rebooted. The
|
||||
possible values are:
|
||||
|
||||
``"never"``
|
||||
The device will never be rebooted.
|
||||
``"initial"``
|
||||
The device will be rebooted when the execution first starts, just before
|
||||
executing the first workload spec.
|
||||
``"each_spec"``
|
||||
The device will be rebooted before running a new workload spec.
|
||||
Note: this acts the same as each_iteration when execution order is set to by_iteration
|
||||
``"each_iteration"``
|
||||
The device will be rebooted before each new iteration.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:doc:`execution_model`
|
||||
|
||||
.. confval:: execution_order
|
||||
|
||||
Defines the order in which the agenda spec will be executed. At the moment,
|
||||
the following execution orders are supported:
|
||||
|
||||
``"by_iteration"``
|
||||
The first iteration of each workload spec is executed one after the other,
|
||||
so all workloads are executed before proceeding on to the second iteration.
|
||||
E.g. A1 B1 C1 A2 C2 A3. This is the default if no order is explicitly specified.
|
||||
|
||||
In case of multiple sections, this will spread them out, such that specs
|
||||
from the same section are further part. E.g. given sections X and Y, global
|
||||
specs A and B, and two iterations, this will run ::
|
||||
|
||||
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
|
||||
|
||||
``"by_section"``
|
||||
Same as ``"by_iteration"``, however this will group specs from the same
|
||||
section together, so given sections X and Y, global specs A and B, and two iterations,
|
||||
this will run ::
|
||||
|
||||
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
|
||||
|
||||
``"by_spec"``
|
||||
All iterations of the first spec are executed before moving on to the next
|
||||
spec. E.g. A1 A2 A3 B1 C1 C2 This may also be specified as ``"classic"``,
|
||||
as this was the way workloads were executed in earlier versions of WA.
|
||||
|
||||
``"random"``
|
||||
Execution order is entirely random.
|
||||
|
||||
Added in version 2.1.5.
|
||||
|
||||
|
||||
.. confval:: retry_on_status
|
||||
|
||||
This is list of statuses on which a job will be cosidered to have failed and
|
||||
will be automatically retried up to ``max_retries`` times. This defaults to
|
||||
``["FAILED", "PARTIAL"]`` if not set. Possible values are:
|
||||
|
||||
``"OK"``
|
||||
This iteration has completed and no errors have been detected
|
||||
|
||||
``"PARTIAL"``
|
||||
One or more instruments have failed (the iteration may still be running).
|
||||
|
||||
``"FAILED"``
|
||||
The workload itself has failed.
|
||||
|
||||
``"ABORTED"``
|
||||
The user interupted the workload
|
||||
|
||||
.. confval:: max_retries
|
||||
|
||||
The maximum number of times failed jobs will be retried before giving up. If
|
||||
not set, this will default to ``3``.
|
||||
|
||||
.. note:: this number does not include the original attempt
|
||||
|
||||
.. confval:: instrumentation
|
||||
|
||||
This should be a list of instruments to be enabled during run execution.
|
||||
Values must be names of available instruments. Instruments are used to
|
||||
collect additional data, such as energy measurements or execution time,
|
||||
during runs.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:doc:`api/wlauto.instrumentation`
|
||||
|
||||
.. confval:: result_processors
|
||||
|
||||
This should be a list of result processors to be enabled during run execution.
|
||||
Values must be names of available result processors. Result processor define
|
||||
how data is output from WA.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:doc:`api/wlauto.result_processors`
|
||||
|
||||
.. confval:: logging
|
||||
|
||||
A dict that contains logging setting. At the moment only three settings are
|
||||
supported:
|
||||
|
||||
``"file format"``
|
||||
Controls how logging output appears in the run.log file in the output
|
||||
directory.
|
||||
``"verbose format"``
|
||||
Controls how logging output appear on the console when ``--verbose`` flag
|
||||
was used.
|
||||
``"regular format"``
|
||||
Controls how logging output appear on the console when ``--verbose`` flag
|
||||
was not used.
|
||||
|
||||
All three values should be Python `old-style format strings`_ specifying which
|
||||
`log record attributes`_ should be displayed.
|
||||
|
||||
.. confval:: remote_assets_path
|
||||
|
||||
Path to the local mount of a network assets repository. See
|
||||
:ref:`assets_repository`.
|
||||
|
||||
|
||||
There are also a couple of settings are used to provide additional metadata
|
||||
for a run. These may get picked up by instruments or result processors to
|
||||
attach context to results.
|
||||
|
||||
.. confval:: project
|
||||
|
||||
A string naming the project for which data is being collected. This may be
|
||||
useful, e.g. when uploading data to a shared database that is populated from
|
||||
multiple projects.
|
||||
|
||||
.. confval:: project_stage
|
||||
|
||||
A dict or a string that allows adding additional identifier. This is may be
|
||||
useful for long-running projects.
|
||||
|
||||
.. confval:: run_name
|
||||
|
||||
A string that labels the WA run that is bing performed. This would typically
|
||||
be set in the ``config`` section of an agenda (see
|
||||
:ref:`configuration in an agenda <configuration_in_agenda>`) rather than in the config file.
|
||||
|
||||
.. _old-style format strings: http://docs.python.org/2/library/stdtypes.html#string-formatting-operations
|
||||
.. _log record attributes: http://docs.python.org/2/library/logging.html#logrecord-attributes
|
||||
|
||||
|
||||
.. _envvars:
|
||||
|
||||
Environment Variables
|
||||
=====================
|
||||
|
||||
In addition to standard configuration described above, WA behaviour can be
|
||||
altered through environment variables. These can determine where WA looks for
|
||||
various assets when it starts.
|
||||
|
||||
.. confval:: WA_USER_DIRECTORY
|
||||
|
||||
This is the location WA will look for config.py, inustrumentation , and it
|
||||
will also be used for local caches, etc. If this variable is not set, the
|
||||
default location is ``~/.workload_automation`` (this is created when WA
|
||||
is installed).
|
||||
|
||||
.. note:: This location **must** be writable by the user who runs WA.
|
||||
|
||||
|
||||
.. confval:: WA_PLUGIN_PATHS
|
||||
|
||||
By default, WA will look for plugins in its own package and in
|
||||
subdirectories under ``WA_USER_DIRECTORY``. This environment variable can
|
||||
be used specify a colon-separated list of additional locations WA should
|
||||
use to look for plugins.
|
@ -1,74 +0,0 @@
|
||||
===========
|
||||
Conventions
|
||||
===========
|
||||
|
||||
Interface Definitions
|
||||
=====================
|
||||
|
||||
Throughout this documentation a number of stubbed-out class definitions will be
|
||||
presented showing an interface defined by a base class that needs to be
|
||||
implemented by the deriving classes. The following conventions will be used when
|
||||
presenting such an interface:
|
||||
|
||||
- Methods shown raising :class:`NotImplementedError` are abstract and *must*
|
||||
be overridden by subclasses.
|
||||
- Methods with ``pass`` in their body *may* be (but do not need to be) overridden
|
||||
by subclasses. If not overridden, these methods will default to the base
|
||||
class implementation, which may or may not be a no-op (the ``pass`` in the
|
||||
interface specification does not necessarily mean that the method does not have an
|
||||
actual implementation in the base class).
|
||||
|
||||
.. note:: If you *do* override these methods you must remember to call the
|
||||
base class' version inside your implementation as well.
|
||||
|
||||
- Attributes who's value is shown as ``None`` *must* be redefined by the
|
||||
subclasses with an appropriate value.
|
||||
- Attributes who's value is shown as something other than ``None`` (including
|
||||
empty strings/lists/dicts) *may* be (but do not need to be) overridden by
|
||||
subclasses. If not overridden, they will default to the value shown.
|
||||
|
||||
Keep in mind that the above convention applies only when showing interface
|
||||
definitions and may not apply elsewhere in the documentation. Also, in the
|
||||
interest of clarity, only the relevant parts of the base class definitions will
|
||||
be shown some members (such as internal methods) may be omitted.
|
||||
|
||||
|
||||
Code Snippets
|
||||
=============
|
||||
|
||||
Code snippets provided are intended to be valid Python code, and to be complete.
|
||||
However, for the sake of clarity, in some cases only the relevant parts will be
|
||||
shown with some details omitted (details that may necessary to validity of the code
|
||||
but not to understanding of the concept being illustrated). In such cases, a
|
||||
commented ellipsis will be used to indicate that parts of the code have been
|
||||
dropped. E.g. ::
|
||||
|
||||
# ...
|
||||
|
||||
def update_result(self, context):
|
||||
# ...
|
||||
context.result.add_metric('energy', 23.6, 'Joules', lower_is_better=True)
|
||||
|
||||
# ...
|
||||
|
||||
|
||||
Core Class Names
|
||||
================
|
||||
|
||||
When core classes are referenced throughout the documentation, usually their
|
||||
fully-qualified names are given e.g. :class:`wlauto.core.workload.Workload`.
|
||||
This is done so that Sphinx_ can resolve them and provide a link. While
|
||||
implementing plugins, however, you should *not* be importing anything
|
||||
directly form under :mod:`wlauto.core`. Instead, classes you are meant to
|
||||
instantiate or subclass have been aliased in the root :mod:`wlauto` package,
|
||||
and should be imported from there, e.g. ::
|
||||
|
||||
from wlauto import Workload
|
||||
|
||||
All examples given in the documentation follow this convention. Please note that
|
||||
this only applies to the :mod:`wlauto.core` subpackage; all other classes
|
||||
should be imported for their corresponding subpackages.
|
||||
|
||||
.. _Sphinx: http://sphinx-doc.org/
|
||||
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 151 KiB |
@ -1,252 +0,0 @@
|
||||
.. _daq_setup:
|
||||
|
||||
DAQ Server Guide
|
||||
================
|
||||
|
||||
NI-DAQ, or just "DAQ", is the Data Acquisition device developed by National
|
||||
Instruments:
|
||||
|
||||
http://www.ni.com/data-acquisition/
|
||||
|
||||
WA uses the DAQ to collect power measurements during workload execution. A
|
||||
client/server solution for this is distributed as part of WA, though it is
|
||||
distinct from WA and may be used separately (by invoking the client APIs from a
|
||||
Python script, or used directly from the command line).
|
||||
|
||||
This solution is dependent on the NI-DAQmx driver for the DAQ device. At the
|
||||
time of writing, only Windows versions of the driver are supported (there is an
|
||||
old Linux version that works on some versions of RHEL and Centos, but it is
|
||||
unsupported and won't work with recent Linux kernels). Because of this, the
|
||||
server part of the solution will need to be run on a Windows machine (though it
|
||||
should also work on Linux, if the driver becomes available).
|
||||
|
||||
|
||||
.. _daq_wiring:
|
||||
|
||||
DAQ Device Wiring
|
||||
-----------------
|
||||
|
||||
The server expects the device to be wired in a specific way in order to be able
|
||||
to collect power measurements. Two consecutive Analogue Input (AI) channels on
|
||||
the DAQ are used to form a logical "port" (starting with AI/0 and AI/1 for port
|
||||
0). Of these, the lower/even channel (e.g. AI/0) is used to measure the voltage
|
||||
on the rail we're interested in; the higher/odd channel (e.g. AI/1) is used to
|
||||
measure the voltage drop across a known very small resistor on the same rail,
|
||||
which is then used to calculate current. The logical wiring diagram looks like
|
||||
this::
|
||||
|
||||
Port N
|
||||
======
|
||||
|
|
||||
| AI/(N*2)+ <--- Vr -------------------------|
|
||||
| |
|
||||
| AI/(N*2)- <--- GND -------------------// |
|
||||
| |
|
||||
| AI/(N*2+1)+ <--- V ------------|-------V |
|
||||
| r | |
|
||||
| AI/(N*2+1)- <--- Vr --/\/\/\----| |
|
||||
| | |
|
||||
| | |
|
||||
| |------------------------------|
|
||||
======
|
||||
|
||||
Where:
|
||||
V: Voltage going into the resistor
|
||||
Vr: Voltage between resistor and the SOC
|
||||
GND: Ground
|
||||
r: The resistor across the rail with a known
|
||||
small value.
|
||||
|
||||
|
||||
The physical wiring will depend on the specific DAQ device, as channel layout
|
||||
varies between models.
|
||||
|
||||
.. note:: Current solution supports variable number of ports, however it
|
||||
assumes that the ports are sequential and start at zero. E.g. if you
|
||||
want to measure power on three rails, you will need to wire ports 0-2
|
||||
(AI/0 to AI/5 channels on the DAQ) to do it. It is not currently
|
||||
possible to use any other configuration (e.g. ports 1, 2 and 5).
|
||||
|
||||
|
||||
As an example, the following illustration shows the wiring of PORT0 (using AI/0
|
||||
and AI/1 channels) on a DAQ USB-6210
|
||||
|
||||
.. image:: daq-wiring.png
|
||||
:scale: 70 %
|
||||
|
||||
Setting up NI-DAQmx driver on a Windows Machine
|
||||
-----------------------------------------------
|
||||
|
||||
- The NI-DAQmx driver is pretty big in size, 1.5 GB. The driver name is
|
||||
'NI-DAQmx' and its version '9.7.0f0' which you can obtain it from National
|
||||
Instruments website by downloading NI Measurement & Automation Explorer (Ni
|
||||
MAX) from: http://joule.ni.com/nidu/cds/view/p/id/3811/lang/en
|
||||
|
||||
.. note:: During the installation process, you might be prompted to install
|
||||
.NET framework 4.
|
||||
|
||||
- The installation process is quite long, 7-15 minutes.
|
||||
- Once installed, open NI MAX, which should be in your desktop, if not type its
|
||||
name in the start->search.
|
||||
- Connect the NI-DAQ device to your machine. You should see it appear under
|
||||
'Devices and Interfaces'. If not, press 'F5' to refresh the list.
|
||||
- Complete the device wiring as described in the :ref:`daq_wiring` section.
|
||||
- Quit NI MAX.
|
||||
|
||||
|
||||
Setting up DAQ server
|
||||
---------------------
|
||||
|
||||
The DAQ power measurement solution is implemented in daqpower Python library,
|
||||
the package for which can be found in WA's install location under
|
||||
``wlauto/external/daq_server/daqpower-1.0.0.tar.gz`` (the version number in your
|
||||
installation may be different).
|
||||
|
||||
- Install NI-DAQmx driver, as described in the previous section.
|
||||
- Install Python 2.7.
|
||||
- Download and install ``pip``, ``numpy`` and ``twisted`` Python packages.
|
||||
These packages have C plugins, an so you will need a native compiler set
|
||||
up if you want to install them from PyPI. As an easier alternative, you can
|
||||
find pre-built Windows installers for these packages here_ (the versions are
|
||||
likely to be older than what's on PyPI though).
|
||||
- Install the daqpower package using pip::
|
||||
|
||||
pip install C:\Python27\Lib\site-packages\wlauto\external\daq_server\daqpower-1.0.0.tar.gz
|
||||
|
||||
This should automatically download and install ``PyDAQmx`` package as well
|
||||
(the Python bindings for the NI-DAQmx driver).
|
||||
|
||||
.. _here: http://www.lfd.uci.edu/~gohlke/pythonlibs/
|
||||
|
||||
|
||||
Running DAQ server
|
||||
------------------
|
||||
|
||||
Once you have installed the ``daqpower`` package and the required dependencies as
|
||||
described above, you can start the server by executing ``run-daq-server`` from the
|
||||
command line. The server will start listening on the default port, 45677.
|
||||
|
||||
.. note:: There is a chance that pip will not add ``run-daq-server`` into your
|
||||
path. In that case, you can run daq server as such:
|
||||
``python C:\path to python\Scripts\run-daq-server``
|
||||
|
||||
You can optionally specify flags to control the behaviour or the server::
|
||||
|
||||
usage: run-daq-server [-h] [-d DIR] [-p PORT] [--debug] [--verbose]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-d DIR, --directory DIR
|
||||
Working directory
|
||||
-p PORT, --port PORT port the server will listen on.
|
||||
--debug Run in debug mode (no DAQ connected).
|
||||
--verbose Produce verobose output.
|
||||
|
||||
.. note:: The server will use a working directory (by default, the directory
|
||||
the run-daq-server command was executed in, or the location specified
|
||||
with -d flag) to store power traces before they are collected by the
|
||||
client. This directory must be read/write-able by the user running
|
||||
the server.
|
||||
|
||||
|
||||
Collecting Power with WA
|
||||
------------------------
|
||||
|
||||
.. note:: You do *not* need to install the ``daqpower`` package on the machine
|
||||
running WA, as it is already included in the WA install structure.
|
||||
However, you do need to make sure that ``twisted`` package is
|
||||
installed.
|
||||
|
||||
You can enable ``daq`` instrument your agenda/config.py in order to get WA to
|
||||
collect power measurements. At minimum, you will also need to specify the
|
||||
resistor values for each port in your configuration, e.g.::
|
||||
|
||||
resistor_values = [0.005, 0.005] # in Ohms
|
||||
|
||||
This also specifies the number of logical ports (measurement sites) you want to
|
||||
use, and, implicitly, the port numbers (ports 0 to N-1 will be used).
|
||||
|
||||
.. note:: "ports" here refers to the logical ports wired on the DAQ (see :ref:`daq_wiring`,
|
||||
not to be confused with the TCP port the server is listening on.
|
||||
|
||||
Unless you're running the DAQ server and WA on the same machine (unlikely
|
||||
considering that WA is officially supported only on Linux and recent NI-DAQmx
|
||||
drivers are only available on Windows), you will also need to specify the IP
|
||||
address of the server::
|
||||
|
||||
daq_server = 127.0.0.1
|
||||
|
||||
There are a number of other settings that can optionally be specified in the
|
||||
configuration (e.g. the labels to be used for DAQ ports). Please refer to the
|
||||
:class:`wlauto.instrumentation.daq.Daq` documentation for details.
|
||||
|
||||
|
||||
Collecting Power from the Command Line
|
||||
--------------------------------------
|
||||
|
||||
``daqpower`` package also comes with a client that may be used from the command
|
||||
line. Unlike when collecting power with WA, you *will* need to install the
|
||||
``daqpower`` package. Once installed, you will be able to interract with a
|
||||
running DAQ server by invoking ``send-daq-command``. The invocation syntax is ::
|
||||
|
||||
send-daq-command --host HOST [--port PORT] COMMAND [OPTIONS]
|
||||
|
||||
Options are command-specific. COMMAND may be one of the following (and they
|
||||
should generally be inoked in that order):
|
||||
|
||||
:configure: Set up a new session, specifying the configuration values to
|
||||
be used. If there is already a configured session, it will
|
||||
be terminated. OPTIONS for this this command are the DAQ
|
||||
configuration parameters listed in the DAQ instrument
|
||||
documentation with all ``_`` replaced by ``-`` and prefixed
|
||||
with ``--``, e.g. ``--resistor-values``.
|
||||
:start: Start collecting power measurments.
|
||||
:stop: Stop collecting power measurments.
|
||||
:get_data: Pull files containg power measurements from the server.
|
||||
There is one option for this command:
|
||||
``--output-directory`` which specifies where the files will
|
||||
be pulled to; if this is not specified, the will be in the
|
||||
current directory.
|
||||
:close: Close the currently configured server session. This will get rid
|
||||
of the data files and configuration on the server, so it would
|
||||
no longer be possible to use "start" or "get_data" commands
|
||||
before a new session is configured.
|
||||
|
||||
A typical command line session would go like this:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
send-daq-command --host 127.0.0.1 configure --resistor-values 0.005 0.005
|
||||
# set up and kick off the use case you want to measure
|
||||
send-daq-command --host 127.0.0.1 start
|
||||
# wait for the use case to complete
|
||||
send-daq-command --host 127.0.0.1 stop
|
||||
send-daq-command --host 127.0.0.1 get_data
|
||||
# files called PORT_0.csv and PORT_1.csv will appear in the current directory
|
||||
# containing measurements collected during use case execution
|
||||
send-daq-command --host 127.0.0.1 close
|
||||
# the session is terminated and the csv files on the server have been
|
||||
# deleted. A new session may now be configured.
|
||||
|
||||
In addtion to these "standard workflow" commands, the following commands are
|
||||
also available:
|
||||
|
||||
:list_devices: Returns a list of DAQ devices detected by the NI-DAQmx
|
||||
driver. In case mutiple devices are connected to the
|
||||
server host, you can specify the device you want to use
|
||||
with ``--device-id`` option when configuring a session.
|
||||
:list_ports: Returns a list of ports tha have been configured for the
|
||||
current session, e.g. ``['PORT_0', 'PORT_1']``.
|
||||
:list_port_files: Returns a list of data files that have been geneted
|
||||
(unless something went wrong, there should be one for
|
||||
each port).
|
||||
|
||||
|
||||
Collecting Power from another Python Script
|
||||
-------------------------------------------
|
||||
|
||||
You can invoke the above commands from a Python script using
|
||||
:py:func:`daqpower.client.execute_command` function, passing in
|
||||
:class:`daqpower.config.ServerConfiguration` and, in case of the configure command,
|
||||
:class:`daqpower.config.DeviceConfigruation`. Please see the implementation of
|
||||
the ``daq`` WA instrument for examples of how these APIs can be used.
|
17
doc/source/developer_reference.rst
Normal file
17
doc/source/developer_reference.rst
Normal file
@ -0,0 +1,17 @@
|
||||
.. _developer_reference:
|
||||
|
||||
====================
|
||||
Developer Reference
|
||||
====================
|
||||
|
||||
.. contents::
|
||||
:depth: 3
|
||||
:local:
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
|
||||
.. include:: developer_reference/writing_extensions.rst
|
||||
.. include:: developer_reference/contributing.rst
|
||||
.. include:: developer_reference/revent.rst
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
Contributing Code
|
||||
=================
|
||||
|
||||
@ -7,7 +6,7 @@ maintainability of the code line we ask that the code uses a coding style
|
||||
consistent with the rest of WA code. Briefly, it is
|
||||
|
||||
- `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_ with line length and block
|
||||
comment rules relaxed (the wrapper for PEP8 checker inside ``dev_scripts``
|
||||
comment rules relaxed (the wrapper for PEP8 checker inside ``dev_scripts``
|
||||
will run it with appropriate configuration).
|
||||
- Four-space indentation (*no tabs!*).
|
||||
- Title-case for class names, underscore-delimited lower case for functions,
|
||||
@ -17,8 +16,8 @@ consistent with the rest of WA code. Briefly, it is
|
||||
"stats" for "statistics", "config" for "configuration", etc are OK). Do
|
||||
*not* use Hungarian notation (so prefer ``birth_date`` over ``dtBirth``).
|
||||
|
||||
New plugins should also follow implementation guidelines specified in
|
||||
:ref:`writing_plugins` section of the documentation.
|
||||
New extensions should also follow implementation guidelines specified in
|
||||
:ref:`writing_extensions` section of the documentation.
|
||||
|
||||
We ask that the following checks are performed on the modified code prior to
|
||||
submitting a pull request:
|
||||
@ -39,18 +38,18 @@ submitting a pull request:
|
||||
- ``./dev_scripts/pep8`` should be run without arguments and should produce no
|
||||
output (any output should be addressed by making appropriate changes in the
|
||||
code).
|
||||
- If the modifications touch core framework (anything under ``wlauto/core``), unit
|
||||
- If the modifications touch core framework (anything under ``wa/framework``), unit
|
||||
tests should be run using ``nosetests``, and they should all pass.
|
||||
|
||||
- If significant additions have been made to the framework, unit
|
||||
tests should be added to cover the new functionality.
|
||||
|
||||
- If modifications have been made to documentation (this includes description
|
||||
attributes for Parameters and Plugins), documentation should be built to
|
||||
attributes for Parameters and Extensions), documentation should be built to
|
||||
make sure no errors or warning during build process, and a visual inspection
|
||||
of new/updated sections in resulting HTML should be performed to ensure
|
||||
everything renders as expected.
|
||||
|
||||
Once you have your contribution is ready, please follow instructions in `GitHub
|
||||
documentation <https://help.github.com/articles/creating-a-pull-request/>`_ to
|
||||
Once you have your contribution is ready, please follow instructions in `GitHub
|
||||
documentation <https://help.github.com/articles/creating-a-pull-request/>`_ to
|
||||
create a pull request.
|
@ -1,229 +1,55 @@
|
||||
.. _revent_files_creation:
|
||||
Revent Recordings
|
||||
=================
|
||||
|
||||
revent
|
||||
++++++
|
||||
Convention for Naming revent Files for Revent Workloads
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
Overview and Usage
|
||||
==================
|
||||
There is a convention for naming revent files which you should follow if you
|
||||
want to record your own revent files. Each revent file must start with the
|
||||
device name(case sensitive) then followed by a dot '.' then the stage name
|
||||
then '.revent'. All your custom revent files should reside at
|
||||
``'~/.workload_automation/dependencies/WORKLOAD NAME/'``. These are the current
|
||||
supported stages:
|
||||
|
||||
revent utility can be used to record and later play back a sequence of user
|
||||
input events, such as key presses and touch screen taps. This is an alternative
|
||||
to Android UI Automator for providing automation for workloads. ::
|
||||
:setup: This stage is where the application is loaded (if present). It is
|
||||
a good place to record an revent here to perform any tasks to get
|
||||
ready for the main part of the workload to start.
|
||||
:run: This stage is where the main work of the workload should be performed.
|
||||
This will allow for more accurate results if the revent file for this
|
||||
stage only records the main actions under test.
|
||||
:extract_results: This stage is used after the workload has been completed
|
||||
to retrieve any metrics from the workload e.g. a score.
|
||||
:teardown: This stage is where any final actions should be performed to
|
||||
clean up the workload.
|
||||
|
||||
Only the run stage is mandatory, the remaining stages will be replayed if a
|
||||
recording is present otherwise no actions will be performed for that particular
|
||||
stage.
|
||||
|
||||
usage:
|
||||
revent [record time file|replay file|info] [verbose]
|
||||
record: stops after either return on stdin
|
||||
or time (in seconds)
|
||||
and stores in file
|
||||
replay: replays eventlog from file
|
||||
info:shows info about each event char device
|
||||
any additional parameters make it verbose
|
||||
For instance, to add a custom revent files for a device named mydevice and
|
||||
a workload name myworkload, you need to add the revent files to the directory
|
||||
``/home/$WA_USER_HOME/dependencies/myworkload/revent_files`` creating it if
|
||||
necessary. ::
|
||||
|
||||
Recording
|
||||
---------
|
||||
mydevice.setup.revent
|
||||
mydevice.run.revent
|
||||
mydevice.extract_results.revent
|
||||
mydevice.teardown.revent
|
||||
|
||||
WA features a ``record`` command that will automatically deploy and start
|
||||
revent on the target device::
|
||||
|
||||
wa record
|
||||
INFO Connecting to device...
|
||||
INFO Press Enter when you are ready to record...
|
||||
[Pressed Enter]
|
||||
INFO Press Enter when you have finished recording...
|
||||
[Pressed Enter]
|
||||
INFO Pulling files from device
|
||||
|
||||
Once started, you will need to get the target device ready to record (e.g.
|
||||
unlock screen, navigate menus and launch an app) then press ``ENTER``.
|
||||
The recording has now started and button presses, taps, etc you perform on
|
||||
the device will go into the .revent file. To stop the recording simply press
|
||||
``ENTER`` again.
|
||||
|
||||
Once you have finished recording the revent file will be pulled from the device
|
||||
to the current directory. It will be named ``{device_model}.revent``. When
|
||||
recording revent files for a ``GameWorkload`` you can use the ``-s`` option to
|
||||
add ``run`` or ``setup`` suffixes.
|
||||
|
||||
From version 2.6 of WA onwards, a "gamepad" recording mode is also supported.
|
||||
This mode requires a gamepad to be connected to the device when recoridng, but
|
||||
the recordings produced in this mode should be portable across devices.
|
||||
|
||||
For more information run please read :ref:`record-command`
|
||||
|
||||
|
||||
Replaying
|
||||
---------
|
||||
|
||||
To replay a recorded file, run ``wa replay``, giving it the file you want to
|
||||
replay::
|
||||
|
||||
wa replay my_recording.revent
|
||||
|
||||
For more information run please read :ref:`replay-command`
|
||||
|
||||
|
||||
Using revent With Workloads
|
||||
---------------------------
|
||||
|
||||
Some workloads (pretty much all games) rely on recorded revents for their
|
||||
execution. :class:`wlauto.common.GameWorkload`-derived workloads expect two
|
||||
revent files -- one for performing the initial setup (navigating menus,
|
||||
selecting game modes, etc), and one for the actual execution of the game.
|
||||
Because revents are very device-specific\ [*]_, these two files would need to
|
||||
be recorded for each device.
|
||||
|
||||
The files must be called ``<device name>.(setup|run).revent``, where
|
||||
``<device name>`` is the name of your device (as defined by the ``name``
|
||||
attribute of your device's class). WA will look for these files in two
|
||||
places: ``<install dir>/wlauto/workloads/<workload name>/revent_files``
|
||||
and ``~/.workload_automation/dependencies/<workload name>``. The first
|
||||
location is primarily intended for revent files that come with WA (and if
|
||||
you did a system-wide install, you'll need sudo to add files there), so it's
|
||||
probably easier to use the second location for the files you record. Also,
|
||||
if revent files for a workload exist in both locations, the files under
|
||||
``~/.workload_automation/dependencies`` will be used in favor of those
|
||||
installed with WA.
|
||||
|
||||
For example, if you wanted to run angrybirds workload on "Acme" device, you would
|
||||
record the setup and run revent files using the method outlined in the section
|
||||
above and then pull them for the devices into the following locations::
|
||||
|
||||
~/workload_automation/dependencies/angrybirds/Acme.setup.revent
|
||||
~/workload_automation/dependencies/angrybirds/Acme.run.revent
|
||||
|
||||
(you may need to create the intermediate directories if they don't already
|
||||
exist).
|
||||
|
||||
.. [*] It's not just about screen resolution -- the event codes may be different
|
||||
even if devices use the same screen.
|
||||
|
||||
|
||||
revent vs. UiAutomator
|
||||
----------------------
|
||||
|
||||
In general, Android UI Automator is the preferred way of automating user input
|
||||
for workloads because, unlike revent, UI Automator does not depend on a
|
||||
particular screen resolution, and so is more portable across different devices.
|
||||
It also gives better control and can potentially be faster for ling UI
|
||||
manipulations, as input events are scripted based on the available UI elements,
|
||||
rather than generated by human input.
|
||||
|
||||
On the other hand, revent can be used to manipulate pretty much any workload,
|
||||
where as UI Automator only works for Android UI elements (such as text boxes or
|
||||
radio buttons), which makes the latter useless for things like games. Recording
|
||||
revent sequence is also faster than writing automation code (on the other hand,
|
||||
one would need maintain a different revent log for each screen resolution).
|
||||
|
||||
|
||||
Using state detection with revent
|
||||
=================================
|
||||
|
||||
State detection can be used to verify that a workload is executing as expected.
|
||||
This utility, if enabled, and if state definitions are available for the
|
||||
particular workload, takes a screenshot after the setup and the run revent
|
||||
sequence, matches the screenshot to a state and compares with the expected
|
||||
state. A WorkloadError is raised if an unexpected state is encountered.
|
||||
|
||||
To enable state detection, make sure a valid state definition file and
|
||||
templates exist for your workload and set the check_states parameter to True.
|
||||
|
||||
State definition directory
|
||||
--------------------------
|
||||
|
||||
State and phase definitions should be placed in a directory of the following
|
||||
structure inside the dependencies directory of each workload (along with
|
||||
revent files etc):
|
||||
|
||||
::
|
||||
|
||||
dependencies/
|
||||
<workload_name>/
|
||||
state_definitions/
|
||||
definition.yaml
|
||||
templates/
|
||||
<oneTemplate>.png
|
||||
<anotherTemplate>.png
|
||||
...
|
||||
|
||||
definition.yaml file
|
||||
--------------------
|
||||
|
||||
This defines each state of the workload and lists which templates are expected
|
||||
to be found and how many are required to be detected for a conclusive match. It
|
||||
also defines the expected state in each workload phase where a state detection
|
||||
is run (currently those are setup_complete and run_complete).
|
||||
|
||||
Templates are picture elements to be matched in a screenshot. Each template
|
||||
mentioned in the definition file should be placed as a file with the same name
|
||||
and a .png extension inside the templates folder. Creating template png files
|
||||
is as simple as taking a screenshot of the workload in a given state, cropping
|
||||
out the relevant templates (eg. a button, label or other unique element that is
|
||||
present in that state) and storing them in PNG format.
|
||||
|
||||
Please see the definition file for Angry Birds below as an example to
|
||||
understand the format. Note that more than just two states (for the afterSetup
|
||||
and afterRun phase) can be defined and this helps track the cause of errors in
|
||||
case an unexpected state is encountered.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
workload_name: angrybirds
|
||||
|
||||
workload_states:
|
||||
- state_name: titleScreen
|
||||
templates:
|
||||
- play_button
|
||||
- logo
|
||||
matches: 2
|
||||
- state_name: worldSelection
|
||||
templates:
|
||||
- first_world_thumb
|
||||
- second_world_thumb
|
||||
- third_world_thumb
|
||||
- fourth_world_thumb
|
||||
matches: 3
|
||||
- state_name: level_selection
|
||||
templates:
|
||||
- locked_level
|
||||
- first_level
|
||||
matches: 2
|
||||
- state_name: gameplay
|
||||
templates:
|
||||
- pause_button
|
||||
- score_label_text
|
||||
matches: 2
|
||||
- state_name: pause_screen
|
||||
templates:
|
||||
- replay_button
|
||||
- menu_button
|
||||
- resume_button
|
||||
- help_button
|
||||
matches: 4
|
||||
- state_name: level_cleared_screen
|
||||
templates:
|
||||
- level_cleared_text
|
||||
- menu_button
|
||||
- replay_button
|
||||
- fast_forward_button
|
||||
matches: 4
|
||||
|
||||
workload_phases:
|
||||
- phase_name: setup_complete
|
||||
expected_state: gameplay
|
||||
- phase_name: run_complete
|
||||
expected_state: level_cleared_screen
|
||||
Any revent file in the dependencies will always overwrite the revent file in the
|
||||
workload directory. So for example it is possible to just provide one revent for
|
||||
setup in the dependencies and use the run.revent that is in the workload directory.
|
||||
|
||||
|
||||
File format of revent recordings
|
||||
================================
|
||||
--------------------------------
|
||||
|
||||
You do not need to understand recording format in order to use revent. This
|
||||
section is intended for those looking to extend revent in some way, or to
|
||||
utilize revent recordings for other purposes.
|
||||
|
||||
Format Overview
|
||||
---------------
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Recordings are stored in a binary format. A recording consists of three
|
||||
sections::
|
||||
@ -253,7 +79,7 @@ All fields are either fixed size or prefixed with their length or the number of
|
||||
|
||||
|
||||
Recording Header
|
||||
----------------
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
An revent recoding header has the following structure
|
||||
|
||||
@ -289,13 +115,13 @@ An revent recoding header has the following structure
|
||||
|
||||
|
||||
Device Description
|
||||
------------------
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This section describes the input devices used in the recording. Its structure is
|
||||
determined by the value of ``Mode`` field in the header.
|
||||
|
||||
general recording
|
||||
~~~~~~~~~~~~~~~~~
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. note:: This is the only format supported prior to version ``2``.
|
||||
|
||||
@ -333,7 +159,7 @@ path is *not* NULL-terminated.
|
||||
|
||||
|
||||
gamepad recording
|
||||
~~~~~~~~~~~~~~~~~
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
The recording has been made from a specific gamepad. All events in the stream
|
||||
will be for that device only. The section describes the device properties that
|
||||
@ -415,7 +241,7 @@ determined by the ``abs_bits`` field.
|
||||
|
||||
|
||||
Event stream
|
||||
------------
|
||||
^^^^^^^^^^^^
|
||||
|
||||
The majority of an revent recording will be made up of the input events that were
|
||||
recorded. The event stream is prefixed with the number of events in the stream,
|
||||
@ -457,7 +283,7 @@ and start and end times for the recording.
|
||||
|
||||
|
||||
Event structure
|
||||
~~~~~~~~~~~~~~~
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Each event entry structured as follows:
|
||||
|
||||
@ -496,14 +322,14 @@ https://www.kernel.org/doc/Documentation/input/event-codes.txt
|
||||
|
||||
|
||||
Parser
|
||||
------
|
||||
^^^^^^
|
||||
|
||||
WA has a parser for revent recordings. This can be used to work with revent
|
||||
recordings in scripts. Here is an example:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from wlauto.utils.revent import ReventRecording
|
||||
from wa.utils.revent import ReventRecording
|
||||
|
||||
with ReventRecording('/path/to/recording.revent') as recording:
|
||||
print "Recording: {}".format(recording.filepath)
|
1118
doc/source/developer_reference/writing_extensions.rst
Normal file
1118
doc/source/developer_reference/writing_extensions.rst
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,407 +0,0 @@
|
||||
Setting Up A Device
|
||||
===================
|
||||
|
||||
WA should work with most Android devices out-of-the box, as long as the device
|
||||
is discoverable by ``adb`` (i.e. gets listed when you run ``adb devices``). For
|
||||
USB-attached devices, that should be the case; for network devices, ``adb connect``
|
||||
would need to be invoked with the IP address of the device. If there is only one
|
||||
device connected to the host running WA, then no further configuration should be
|
||||
necessary (though you may want to :ref:`tweak some Android settings <configuring-android>`\ ).
|
||||
|
||||
If you have multiple devices connected, have a non-standard Android build (e.g.
|
||||
on a development board), or want to use of the more advanced WA functionality,
|
||||
further configuration will be required.
|
||||
|
||||
Android
|
||||
+++++++
|
||||
|
||||
General Device Setup
|
||||
--------------------
|
||||
|
||||
You can specify the device interface by setting ``device`` setting in
|
||||
``~/.workload_automation/config.py``. Available interfaces can be viewed by
|
||||
running ``wa list devices`` command. If you don't see your specific device
|
||||
listed (which is likely unless you're using one of the ARM-supplied platforms), then
|
||||
you should use ``generic_android`` interface (this is set in the config by
|
||||
default).
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
device = 'generic_android'
|
||||
|
||||
The device interface may be configured through ``device_config`` setting, who's
|
||||
value is a ``dict`` mapping setting names to their values. You can find the full
|
||||
list of available parameter by looking up your device interface in the
|
||||
:ref:`devices` section of the documentation. Some of the most common parameters
|
||||
you might want to change are outlined below.
|
||||
|
||||
.. confval:: adb_name
|
||||
|
||||
If you have multiple Android devices connected to the host machine, you will
|
||||
need to set this to indicate to WA which device you want it to use.
|
||||
|
||||
.. confval:: working_directory
|
||||
|
||||
WA needs a "working" directory on the device which it will use for collecting
|
||||
traces, caching assets it pushes to the device, etc. By default, it will
|
||||
create one under ``/sdcard`` which should be mapped and writable on standard
|
||||
Android builds. If this is not the case for your device, you will need to
|
||||
specify an alternative working directory (e.g. under ``/data/local``).
|
||||
|
||||
.. confval:: scheduler
|
||||
|
||||
This specifies the scheduling mechanism (from the perspective of core layout)
|
||||
utilized by the device). For recent big.LITTLE devices, this should generally
|
||||
be "hmp" (ARM Hetrogeneous Mutli-Processing); some legacy development
|
||||
platforms might have Linaro IKS kernels, in which case it should be "iks".
|
||||
For homogeneous (single-cluster) devices, it should be "smp". Please see
|
||||
``scheduler`` parameter in the ``generic_android`` device documentation for
|
||||
more details.
|
||||
|
||||
.. confval:: core_names
|
||||
|
||||
This and ``core_clusters`` need to be set if you want to utilize some more
|
||||
advanced WA functionality (like setting of core-related runtime parameters
|
||||
such as governors, frequencies, etc). ``core_names`` should be a list of
|
||||
core names matching the order in which they are exposed in sysfs. For
|
||||
example, ARM TC2 SoC is a 2x3 big.LITTLE system; its core_names would be
|
||||
``['a7', 'a7', 'a7', 'a15', 'a15']``, indicating that cpu0-cpu2 in cpufreq
|
||||
sysfs structure are A7's and cpu3 and cpu4 are A15's.
|
||||
|
||||
.. confval:: core_clusters
|
||||
|
||||
If ``core_names`` is defined, this must also be defined. This is a list of
|
||||
integer values indicating the cluster the corresponding core in
|
||||
``cores_names`` belongs to. For example, for TC2, this would be
|
||||
``[0, 0, 0, 1, 1]``, indicating that A7's are on cluster 0 and A15's are on
|
||||
cluster 1.
|
||||
|
||||
A typical ``device_config`` inside ``config.py`` may look something like
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
device_config = dict(
|
||||
'adb_name'='0123456789ABCDEF',
|
||||
'working_direcory'='/sdcard/wa-working',
|
||||
'core_names'=['a7', 'a7', 'a7', 'a15', 'a15'],
|
||||
'core_clusters'=[0, 0, 0, 1, 1],
|
||||
# ...
|
||||
)
|
||||
|
||||
.. _configuring-android:
|
||||
|
||||
Configuring Android
|
||||
-------------------
|
||||
|
||||
There are a few additional tasks you may need to perform once you have a device
|
||||
booted into Android (especially if this is an initial boot of a fresh OS
|
||||
deployment):
|
||||
|
||||
- You have gone through FTU (first time usage) on the home screen and
|
||||
in the apps menu.
|
||||
- You have disabled the screen lock.
|
||||
- You have set sleep timeout to the highest possible value (30 mins on
|
||||
most devices).
|
||||
- You have disabled brightness auto-adjust and have set the brightness
|
||||
to a fixed level.
|
||||
- You have set the locale language to "English" (this is important for
|
||||
some workloads in which UI automation looks for specific text in UI
|
||||
elements).
|
||||
|
||||
TC2 Setup
|
||||
---------
|
||||
|
||||
This section outlines how to setup ARM TC2 development platform to work with WA.
|
||||
|
||||
Pre-requisites
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
You can obtain the full set of images for TC2 from Linaro:
|
||||
|
||||
https://releases.linaro.org/latest/android/vexpress-lsk.
|
||||
|
||||
For the easiest setup, follow the instructions on the "Firmware" and "Binary
|
||||
Image Installation" tabs on that page.
|
||||
|
||||
.. note:: The default ``reboot_policy`` in ``config.py`` is to not reboot. With
|
||||
this WA will assume that the device is already booted into Android
|
||||
prior to WA being invoked. If you want to WA to do the initial boot of
|
||||
the TC2, you will have to change reboot policy to at least
|
||||
``initial``.
|
||||
|
||||
|
||||
Setting Up Images
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. note:: Make sure that both DIP switches near the black reset button on TC2
|
||||
are up (this is counter to the Linaro guide that instructs to lower
|
||||
one of the switches).
|
||||
|
||||
.. note:: The TC2 must have an Ethernet connection.
|
||||
|
||||
|
||||
If you have followed the setup instructions on the Linaro page, you should have
|
||||
a USB stick or an SD card with the file system, and internal microSD on the
|
||||
board (VEMSD) with the firmware images. The default Linaro configuration is to
|
||||
boot from the image on the boot partition in the file system you have just
|
||||
created. This is not supported by WA, which expects the image to be in NOR flash
|
||||
on the board. This requires you to copy the images from the boot partition onto
|
||||
the internal microSD card.
|
||||
|
||||
Assuming the boot partition of the Linaro file system is mounted on
|
||||
``/media/boot`` and the internal microSD is mounted on ``/media/VEMSD``, copy
|
||||
the following images::
|
||||
|
||||
cp /media/boot/zImage /media/VEMSD/SOFTWARE/kern_mp.bin
|
||||
cp /media/boot/initrd /media/VEMSD/SOFTWARE/init_mp.bin
|
||||
cp /media/boot/v2p-ca15-tc2.dtb /media/VEMSD/SOFTWARE/mp_a7bc.dtb
|
||||
|
||||
Optionally
|
||||
##########
|
||||
|
||||
The default device tree configuration the TC2 is to boot on the A7 cluster. It
|
||||
is also possible to configure the device tree to boot on the A15 cluster, or to
|
||||
boot with one of the clusters disabled (turning TC2 into an A7-only or A15-only
|
||||
device). Please refer to the "Firmware" tab on the Linaro paged linked above for
|
||||
instructions on how to compile the appropriate device tree configurations.
|
||||
|
||||
WA allows selecting between these configurations using ``os_mode`` boot
|
||||
parameter of the TC2 device interface. In order for this to work correctly,
|
||||
device tree files for the A15-bootcluster, A7-only and A15-only configurations
|
||||
should be copied into ``/media/VEMSD/SOFTWARE/`` as ``mp_a15bc.dtb``,
|
||||
``mp_a7.dtb`` and ``mp_a15.dtb`` respectively.
|
||||
|
||||
This is entirely optional. If you're not planning on switching boot cluster
|
||||
configuration, those files do not need to be present in VEMSD.
|
||||
|
||||
config.txt
|
||||
##########
|
||||
|
||||
Also, make sure that ``USB_REMOTE`` setting in ``/media/VEMSD/config.txt`` is set
|
||||
to ``TRUE`` (this will allow rebooting the device by writing reboot.txt to
|
||||
VEMSD). ::
|
||||
|
||||
USB_REMOTE: TRUE ;Selects remote command via USB
|
||||
|
||||
|
||||
TC2-specific device_config settings
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are a few settings that may need to be set in ``device_config`` inside
|
||||
your ``config.py`` which are specific to TC2:
|
||||
|
||||
.. note:: TC2 *does not* accept most "standard" android ``device_config``
|
||||
settings.
|
||||
|
||||
adb_name
|
||||
If you're running WA with reboots disabled (which is the default reboot
|
||||
policy), you will need to manually run ``adb connect`` with TC2's IP
|
||||
address and set this.
|
||||
|
||||
root_mount
|
||||
WA expects TC2's internal microSD to be mounted on the host under
|
||||
``/media/VEMSD``. If this location is different, it needs to be specified
|
||||
using this setting.
|
||||
|
||||
boot_firmware
|
||||
WA defaults to try booting using UEFI, which will require some additional
|
||||
firmware from ARM that may not be provided with Linaro releases (see the
|
||||
UEFI and PSCI section below). If you do not have those images, you will
|
||||
need to set ``boot_firmware`` to ``bootmon``.
|
||||
|
||||
fs_medium
|
||||
TC2's file system can reside either on an SD card or on a USB stick. Boot
|
||||
configuration is different depending on this. By default, WA expects it
|
||||
to be on ``usb``; if you are using and SD card, you should set this to
|
||||
``sd``.
|
||||
|
||||
bm_image
|
||||
Bootmon image that comes as part of TC2 firmware periodically gets
|
||||
updated. At the time of the release, ``bm_v519r.axf`` was used by
|
||||
ARM. If you are using a more recent image, you will need to set this
|
||||
indicating the image name (just the name of the actual file, *not* the
|
||||
path). Note: this setting only applies if using ``bootmon`` boot
|
||||
firmware.
|
||||
|
||||
serial_device
|
||||
WA will assume TC2 is connected on ``/dev/ttyS0`` by default. If the
|
||||
serial port is different, you will need to set this.
|
||||
|
||||
|
||||
UEFI and PSCI
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
UEFI is a boot firmware alternative to bootmon. Currently UEFI is coupled with PSCI (Power State Coordination Interface). That means
|
||||
that in order to use PSCI, UEFI has to be the boot firmware. Currently the reverse dependency is true as well (for TC2). Therefore
|
||||
using UEFI requires enabling PSCI.
|
||||
|
||||
In case you intend to use uefi/psci mode instead of bootmon, you will need two additional files: tc2_sec.bin and tc2_uefi.bin.
|
||||
after obtaining those files, place them inside /media/VEMSD/SOFTWARE/ directory as such::
|
||||
|
||||
cp tc2_sec.bin /media/VEMSD/SOFTWARE/
|
||||
cp tc2_uefi.bin /media/VEMSD/SOFTWARE/
|
||||
|
||||
|
||||
Juno Setup
|
||||
----------
|
||||
|
||||
.. note:: At the time of writing, the Android software stack on Juno was still
|
||||
very immature. Some workloads may not run, and there maybe stability
|
||||
issues with the device.
|
||||
|
||||
|
||||
The full software stack can be obtained from Linaro:
|
||||
|
||||
https://releases.linaro.org/14.08/members/arm/android/images/armv8-android-juno-lsk
|
||||
|
||||
Please follow the instructions on the "Binary Image Installation" tab on that
|
||||
page. More up-to-date firmware and kernel may also be obtained by registered
|
||||
members from ARM Connected Community: http://www.arm.com/community/ (though this
|
||||
is not guaranteed to work with the Linaro file system).
|
||||
|
||||
UEFI
|
||||
~~~~
|
||||
|
||||
Juno uses UEFI_ to boot the kernel image. UEFI supports multiple boot
|
||||
configurations, and presents a menu on boot to select (in default configuration
|
||||
it will automatically boot the first entry in the menu if not interrupted before
|
||||
a timeout). WA will look for a specific entry in the UEFI menu
|
||||
(``'WA'`` by default, but that may be changed by setting ``uefi_entry`` in the
|
||||
``device_config``). When following the UEFI instructions on the above Linaro
|
||||
page, please make sure to name the entry appropriately (or to correctly set the
|
||||
``uefi_entry``).
|
||||
|
||||
.. _UEFI: http://en.wikipedia.org/wiki/UEFI
|
||||
|
||||
There are two supported way for Juno to discover kernel images through UEFI. It
|
||||
can either load them from NOR flash on the board, or form boot partition on the
|
||||
file system. The setup described on the Linaro page uses the boot partition
|
||||
method.
|
||||
|
||||
If WA does not find the UEFI entry it expects, it will create one. However, it
|
||||
will assume that the kernel image resides in NOR flash, which means it will not
|
||||
work with Linaro file system. So if you're replicating the Linaro setup exactly,
|
||||
you will need to create the entry manually, as outline on the above-linked page.
|
||||
|
||||
Rebooting
|
||||
~~~~~~~~~
|
||||
|
||||
At the time of writing, normal Android reboot did not work properly on Juno
|
||||
Android, causing the device to crash into an irrecoverable state. Therefore, WA
|
||||
will perform a hard reset to reboot the device. It will attempt to do this by
|
||||
toggling the DTR line on the serial connection to the device. In order for this
|
||||
to work, you need to make sure that SW1 configuration switch on the back panel of
|
||||
the board (the right-most DIP switch) is toggled *down*.
|
||||
|
||||
|
||||
Linux
|
||||
+++++
|
||||
|
||||
General Device Setup
|
||||
--------------------
|
||||
|
||||
You can specify the device interface by setting ``device`` setting in
|
||||
``~/.workload_automation/config.py``. Available interfaces can be viewed by
|
||||
running ``wa list devices`` command. If you don't see your specific device
|
||||
listed (which is likely unless you're using one of the ARM-supplied platforms), then
|
||||
you should use ``generic_linux`` interface (this is set in the config by
|
||||
default).
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
device = 'generic_linux'
|
||||
|
||||
The device interface may be configured through ``device_config`` setting, who's
|
||||
value is a ``dict`` mapping setting names to their values. You can find the full
|
||||
list of available parameter by looking up your device interface in the
|
||||
:ref:`devices` section of the documentation. Some of the most common parameters
|
||||
you might want to change are outlined below.
|
||||
|
||||
Currently, the only only supported method for talking to a Linux device is over
|
||||
SSH. Device configuration must specify the parameters need to establish the
|
||||
connection.
|
||||
|
||||
.. confval:: host
|
||||
|
||||
This should be either the the DNS name or IP address of the device.
|
||||
|
||||
.. confval:: username
|
||||
|
||||
The login name of the user on the device that WA will use. This user should
|
||||
have a home directory (unless an alternative working directory is specified
|
||||
using ``working_directory`` config -- see below), and, for full
|
||||
functionality, the user should have sudo rights (WA will be able to use
|
||||
sudo-less acounts but some instruments or workload may not work).
|
||||
|
||||
.. confval:: password
|
||||
|
||||
Password for the account on the device. Either this of a ``keyfile`` (see
|
||||
below) must be specified.
|
||||
|
||||
.. confval:: keyfile
|
||||
|
||||
If key-based authentication is used, this may be used to specify the SSH identity
|
||||
file instead of the password.
|
||||
|
||||
.. confval:: property_files
|
||||
|
||||
This is a list of paths that will be pulled for each WA run into the __meta
|
||||
subdirectory in the results. The intention is to collect meta-data about the
|
||||
device that may aid in reporducing the results later. The paths specified do
|
||||
not have to exist on the device (they will be ignored if they do not). The
|
||||
default list is ``['/proc/version', '/etc/debian_version', '/etc/lsb-release', '/etc/arch-release']``
|
||||
|
||||
|
||||
In addition, ``working_directory``, ``scheduler``, ``core_names``, and
|
||||
``core_clusters`` can also be specified and have the same meaning as for Android
|
||||
devices (see above).
|
||||
|
||||
A typical ``device_config`` inside ``config.py`` may look something like
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
device_config = dict(
|
||||
host='192.168.0.7',
|
||||
username='guest',
|
||||
password='guest',
|
||||
core_names=['a7', 'a7', 'a7', 'a15', 'a15'],
|
||||
core_clusters=[0, 0, 0, 1, 1],
|
||||
# ...
|
||||
)
|
||||
|
||||
|
||||
Related Settings
|
||||
++++++++++++++++
|
||||
|
||||
Reboot Policy
|
||||
-------------
|
||||
|
||||
This indicates when during WA execution the device will be rebooted. By default
|
||||
this is set to ``never``, indicating that WA will not reboot the device. Please
|
||||
see ``reboot_policy`` documentation in :ref:`configuration-specification` for
|
||||
|
||||
more details.
|
||||
|
||||
Execution Order
|
||||
---------------
|
||||
|
||||
``execution_order`` defines the order in which WA will execute workloads.
|
||||
``by_iteration`` (set by default) will execute the first iteration of each spec
|
||||
first, followed by the second iteration of each spec (that defines more than one
|
||||
iteration) and so forth. The alternative will loop through all iterations for
|
||||
the first first spec first, then move on to second spec, etc. Again, please see
|
||||
:ref:`configuration-specification` for more details.
|
||||
|
||||
|
||||
Adding a new device interface
|
||||
+++++++++++++++++++++++++++++
|
||||
|
||||
If you are working with a particularly unusual device (e.g. a early stage
|
||||
development board) or need to be able to handle some quirk of your Android build,
|
||||
configuration available in ``generic_android`` interface may not be enough for
|
||||
you. In that case, you may need to write a custom interface for your device. A
|
||||
device interface is an ``Plugin`` (a plug-in) type in WA and is implemented
|
||||
similar to other plugins (such as workloads or instruments). Pleaser refer to
|
||||
:ref:`adding_a_device` section for information on how this may be done.
|
@ -1,115 +0,0 @@
|
||||
++++++++++++++++++
|
||||
Framework Overview
|
||||
++++++++++++++++++
|
||||
|
||||
Execution Model
|
||||
===============
|
||||
|
||||
At the high level, the execution model looks as follows:
|
||||
|
||||
.. image:: wa-execution.png
|
||||
:scale: 50 %
|
||||
|
||||
After some initial setup, the framework initializes the device, loads and initialized
|
||||
instrumentation and begins executing jobs defined by the workload specs in the agenda. Each job
|
||||
executes in four basic stages:
|
||||
|
||||
setup
|
||||
Initial setup for the workload is performed. E.g. required assets are deployed to the
|
||||
devices, required services or applications are launched, etc. Run time configuration of the
|
||||
device for the workload is also performed at this time.
|
||||
|
||||
run
|
||||
This is when the workload actually runs. This is defined as the part of the workload that is
|
||||
to be measured. Exactly what happens at this stage depends entirely on the workload.
|
||||
|
||||
result processing
|
||||
Results generated during the execution of the workload, if there are any, are collected,
|
||||
parsed and extracted metrics are passed up to the core framework.
|
||||
|
||||
teardown
|
||||
Final clean up is performed, e.g. applications may closed, files generated during execution
|
||||
deleted, etc.
|
||||
|
||||
Signals are dispatched (see signal_dispatch_ below) at each stage of workload execution,
|
||||
which installed instrumentation can hook into in order to collect measurements, alter workload
|
||||
execution, etc. Instrumentation implementation usually mirrors that of workloads, defining
|
||||
setup, teardown and result processing stages for a particular instrument. Instead of a ``run``,
|
||||
instruments usually implement a ``start`` and a ``stop`` which get triggered just before and just
|
||||
after a workload run. However, the signal dispatch mechanism give a high degree of flexibility
|
||||
to instruments allowing them to hook into almost any stage of a WA run (apart from the very
|
||||
early initialization).
|
||||
|
||||
Metrics and artifacts generated by workloads and instrumentation are accumulated by the framework
|
||||
and are then passed to active result processors. This happens after each individual workload
|
||||
execution and at the end of the run. A result process may chose to act at either or both of these
|
||||
points.
|
||||
|
||||
|
||||
Control Flow
|
||||
============
|
||||
|
||||
This section goes into more detail explaining the relationship between the major components of the
|
||||
framework and how control passes between them during a run. It will only go through the major
|
||||
transition and interactions and will not attempt to describe very single thing that happens.
|
||||
|
||||
.. note:: This is the control flow for the ``wa run`` command which is the main functionality
|
||||
of WA. Other commands are much simpler and most of what is described below does not
|
||||
apply to them.
|
||||
|
||||
#. ``wlauto.core.entry_point`` parses the command form the arguments and executes the run command
|
||||
(``wlauto.commands.run.RunCommand``).
|
||||
#. Run command initializes the output directory and creates a ``wlauto.core.agenda.Agenda`` based on
|
||||
the command line arguments. Finally, it instantiates a ``wlauto.core.execution.Executor`` and
|
||||
passes it the Agenda.
|
||||
#. The Executor uses the Agenda to create a ``wlauto.core.configuraiton.RunConfiguration`` fully
|
||||
defines the configuration for the run (it will be serialised into ``__meta`` subdirectory under
|
||||
the output directory.
|
||||
#. The Executor proceeds to instantiate and install instrumentation, result processors and the
|
||||
device interface, based on the RunConfiguration. The executor also initialise a
|
||||
``wlauto.core.execution.ExecutionContext`` which is used to track the current state of the run
|
||||
execution and also serves as a means of communication between the core framework and the
|
||||
plugins.
|
||||
#. Finally, the Executor instantiates a ``wlauto.core.execution.Runner``, initializes its job
|
||||
queue with workload specs from the RunConfiguraiton, and kicks it off.
|
||||
#. The Runner performs the run time initialization of the device and goes through the workload specs
|
||||
(in the order defined by ``execution_order`` setting), running each spec according to the
|
||||
execution model described in the previous section. The Runner sends signals (see below) at
|
||||
appropriate points during execution.
|
||||
#. At the end of the run, the control is briefly passed back to the Executor, which outputs a
|
||||
summary for the run.
|
||||
|
||||
|
||||
.. _signal_dispatch:
|
||||
|
||||
Signal Dispatch
|
||||
===============
|
||||
|
||||
WA uses the `louie <https://pypi.python.org/pypi/Louie/1.1>`_ (formerly, pydispatcher) library
|
||||
for signal dispatch. Callbacks can be registered for signals emitted during the run. WA uses a
|
||||
version of louie that has been modified to introduce priority to registered callbacks (so that
|
||||
callbacks that are know to be slow can be registered with a lower priority so that they do not
|
||||
interfere with other callbacks).
|
||||
|
||||
This mechanism is abstracted for instrumentation. Methods of an :class:`wlauto.core.Instrument`
|
||||
subclass automatically get hooked to appropriate signals based on their names when the instrument
|
||||
is "installed" for the run. Priority can be specified by adding ``very_fast_``, ``fast_`` ,
|
||||
``slow_`` or ``very_slow_`` prefixes to method names.
|
||||
|
||||
The full list of method names and the signals they map to may be viewed
|
||||
:ref:`here <instrumentation_method_map>`.
|
||||
|
||||
Signal dispatching mechanism may also be used directly, for example to dynamically register
|
||||
callbacks at runtime or allow plugins other than ``Instruments`` to access stages of the run
|
||||
they are normally not aware of.
|
||||
|
||||
The sending of signals is the responsibility of the Runner. Signals gets sent during transitions
|
||||
between execution stages and when special evens, such as errors or device reboots, occur.
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
instrumentation_method_map
|
58
doc/source/faq.rst
Normal file
58
doc/source/faq.rst
Normal file
@ -0,0 +1,58 @@
|
||||
.. _faq:
|
||||
|
||||
FAQ
|
||||
===
|
||||
|
||||
.. contents::
|
||||
:depth: 1
|
||||
:local:
|
||||
|
||||
---------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
**Q:** I receive the error: ``"<<Workload> file <file_name> file> could not be found."``
|
||||
-----------------------------------------------------------------------------------------
|
||||
|
||||
**A:** Some workload e.g. AdobeReader, GooglePhotos etc require external asset
|
||||
files. We host some additional workload dependencies in the `WA Assets Repo
|
||||
<https://github.com/ARM-software/workload-automation-assets>`_. To allow WA to
|
||||
try and automatically download required assets from the repository please add
|
||||
the following to your configuration:
|
||||
|
||||
.. code-block:: YAML
|
||||
|
||||
remote_assets_url: https://raw.githubusercontent.com/ARM-software/workload-automation-assets/master/dependencies
|
||||
|
||||
------------
|
||||
|
||||
**Q:** I receive the error: ``"No matching package found for workload <workload>"``
|
||||
------------------------------------------------------------------------------------
|
||||
|
||||
**A:** WA cannot locate the application required for the workload. Please either
|
||||
install the application onto the device or source the apk and place into
|
||||
``$WA_USER_DIRECTORY/dependencies/<workload>``
|
||||
|
||||
------------
|
||||
|
||||
**Q:** I have a big.LITTLE device but am unable to set parameters corresponding to the big or little core and receive the error ``"Unknown runtime parameter"``
|
||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
**A:** Please ensure you have the hot plugging module enabled for your device.
|
||||
|
||||
|
||||
**A:** This can occur if the device uses dynamic hot-plugging and although WA
|
||||
will try to online all cores to perform discovery sometimes this can fail
|
||||
causing to WA to incorrectly assume that only one cluster is present. To
|
||||
workaround this please set the ``core_names`` :ref:`parameter <core-names>` in the configuration for
|
||||
your device.
|
||||
|
||||
|
||||
**Q:** I receive the error ``Could not find plugin or alias "standard"``
|
||||
------------------------------------------------------------------------
|
||||
|
||||
**A:** Upon first use of WA3, your WA2 config file typically located at
|
||||
``$USER_HOME/config.py`` will have been convered to a WA3 config file located at
|
||||
``$USER_HOME/config.yaml``. The "standard" output processor, present in WA2, has
|
||||
been merged into the core framework and therefore no longer exists. To fix this
|
||||
error please remove the "standard" entry from the "augmentations" list in the
|
||||
WA3 config file.
|
19
doc/source/how_to.rst
Normal file
19
doc/source/how_to.rst
Normal file
@ -0,0 +1,19 @@
|
||||
========
|
||||
How Tos
|
||||
========
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 4
|
||||
:local:
|
||||
|
||||
Users
|
||||
""""""
|
||||
|
||||
.. include:: how_tos/users/agenda.rst
|
||||
.. include:: how_tos/users/device_setup.rst
|
||||
.. include:: how_tos/users/revent.rst
|
||||
|
||||
Developers
|
||||
"""""""""""
|
||||
|
||||
.. include:: how_tos/developers/adding_plugins.rst
|
527
doc/source/how_tos/developers/adding_plugins.rst
Normal file
527
doc/source/how_tos/developers/adding_plugins.rst
Normal file
@ -0,0 +1,527 @@
|
||||
.. _adding-a-workload:
|
||||
|
||||
Adding a Workload Examples
|
||||
==========================
|
||||
|
||||
The easiest way to create a new workload is to use the create command. ``wa
|
||||
create workload <args>``. This will use predefined templates to create a
|
||||
workload based on the options that are supplied to be used as a starting point
|
||||
for the workload. For more information on using the create workload command see
|
||||
``wa create workload -h``
|
||||
|
||||
The first thing to decide is the type of workload you want to create depending
|
||||
on the OS you will be using and the aim of the workload. The are currently 6
|
||||
available workload types to choose as detailed :ref:`here<workload-types>`.
|
||||
|
||||
Once you have decided what type of workload you wish to choose this can be
|
||||
specified with ``-k <workload_kind>`` followed by the workload name. This
|
||||
will automatically generate a workload in the your ``WA_CONFIG_DIR/plugins``. If
|
||||
you wish to specify a custom location this can be provided with ``-p
|
||||
<directory>``
|
||||
|
||||
Adding a Basic Workload Example
|
||||
--------------------------------
|
||||
|
||||
To add a basic workload you can simply use the command::
|
||||
|
||||
wa create workload basic
|
||||
|
||||
This will generate a very basic workload with dummy methods for the workload
|
||||
interface and it is left to the developer to add any required functionality to
|
||||
the workload.
|
||||
|
||||
This example shows a simple workload that times how long it takes to compress a
|
||||
file of a particular size on the device, not all the methods are required to be
|
||||
implements however as many as possible have been used to demonstrate their
|
||||
purpose.
|
||||
|
||||
.. note:: This is intended as an example of how to implement the Workload
|
||||
:ref: `interface <workload-interface>`. The methodology used to
|
||||
perform the actual measurement is not necessarily sound, and this
|
||||
Workload should not be used to collect real measurements.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import os
|
||||
from wa import Workload, Parameter
|
||||
|
||||
class ZipTestWorkload(Workload):
|
||||
|
||||
name = 'ziptest'
|
||||
description = '''
|
||||
Times how long it takes to gzip a file of a particular size on a device.
|
||||
|
||||
This workload was created for illustration purposes only. It should not be
|
||||
used to collect actual measurements.
|
||||
|
||||
'''
|
||||
|
||||
parameters = [
|
||||
Parameter('file_size', kind=int, default=2000000,
|
||||
description='Size of the file (in bytes) to be gzipped.')
|
||||
]
|
||||
|
||||
def setup(self, context):
|
||||
"""
|
||||
In the setup method we do any preparation that is required before
|
||||
the workload is ran, this is usually things like setting up required
|
||||
files on the device and generating commands from user input. In this
|
||||
case we will generate our input file on the host system and then
|
||||
push it to a known location on the target for use in the 'run'
|
||||
stage.
|
||||
"""
|
||||
super(ZipTestWorkload, self).setup(context)
|
||||
# Generate a file of the specified size containing random garbage.
|
||||
host_infile = os.path.join(context.output_directory, 'infile')
|
||||
command = 'openssl rand -base64 {} > {}'.format(self.file_size, host_infile)
|
||||
os.system(command)
|
||||
# Set up on-device paths
|
||||
devpath = self.target.path # os.path equivalent for the target
|
||||
self.target_infile = devpath.join(self.target.working_directory, 'infile')
|
||||
self.target_outfile = devpath.join(self.target.working_directory, 'outfile')
|
||||
# Push the file to the target
|
||||
self.target.push(host_infile, self.target_infile)
|
||||
|
||||
def run(self, context):
|
||||
"""
|
||||
The run method is where the actual 'work' of the workload takes
|
||||
place and is what is measured by any instrumentation. So for this
|
||||
example this is the execution of creating the zip file on the
|
||||
target.
|
||||
"""
|
||||
cmd = 'cd {} && (time gzip {}) &>> {}'
|
||||
self.target.execute(cmd.format(self.target.working_directory,
|
||||
self.target_infile,
|
||||
self.target_outfile))
|
||||
def extract_results(self, context):
|
||||
"""
|
||||
This method is used to extract any results from the target for
|
||||
example we want to pull the file containing the timing information
|
||||
that we will use to generate metrics for our workload and then we
|
||||
add this file as a artifact as a 'raw' file which once WA has
|
||||
finished processing it will allow it to decide whether to keep the
|
||||
file or not.
|
||||
"""
|
||||
super(ZipTestWorkload, self).extract_results(context)
|
||||
# Pull the results file to the host
|
||||
self.host_outfile = os.path.join(context.output_directory, 'timing_results')
|
||||
self.target.pull(self.target_outfile, self.host_outfile)
|
||||
context.add_artifact('ziptest-results', host_output_file, kind='raw')
|
||||
|
||||
def update_output(self, context):
|
||||
"""
|
||||
In this method we can do any generation of metric that we wish to
|
||||
for our workload. In this case we are going to simply convert the
|
||||
times reported to seconds and add them as 'metrics' to WA which can
|
||||
then be displayed to the user along with any others in a format
|
||||
dependant on which output processors they have enabled for the run.
|
||||
"""
|
||||
super(ZipTestWorkload, self).update_output(context)
|
||||
# Extract metrics form the file's contents and update the result
|
||||
# with them.
|
||||
content = iter(open(self.host_outfile).read().strip().split())
|
||||
for value, metric in zip(content, content):
|
||||
mins, secs = map(float, value[:-1].split('m'))
|
||||
context.add_metric(metric, secs + 60 * mins, 'seconds')
|
||||
|
||||
def teardown(self, context):
|
||||
"""
|
||||
Here we will perform any required clean up for the workload so we
|
||||
will delete the input and output files from the device.
|
||||
"""
|
||||
super(ZipTestWorkload, self).teardown(context)
|
||||
self.target.remove(self.target_infile)
|
||||
self.target.remove(self.target_outfile)
|
||||
|
||||
|
||||
.. _apkuiautomator-example:
|
||||
|
||||
Adding a ApkUiAutomator Workload Example
|
||||
-----------------------------------------
|
||||
|
||||
If we wish to create a workload to automate the testing of the Google Docs
|
||||
android app, we would choose to perform the automation using UIAutomator and we
|
||||
would want to automatically deploy and install the apk file to the target,
|
||||
therefore we would choose the :ref:`ApkUiAutomator workload
|
||||
<apkuiautomator-workload>` type with the following command::
|
||||
|
||||
$ wa create workload -k apkuiauto google_docs
|
||||
Workload created in $WA_USER_DIRECTORY/plugins/google_docs
|
||||
|
||||
|
||||
From here you can navigate to the displayed folder and you will find your
|
||||
``__init__.py`` and a ``uiauto`` directory. The former is your python WA
|
||||
workload and will look something like this::
|
||||
|
||||
from wa import Parameter, ApkUiautoWorkload
|
||||
class GoogleDocs(ApkUiautoWorkload):
|
||||
name = 'google_docs'
|
||||
description = "This is an placeholder description"
|
||||
# Replace with a list of supported package names in the APK file(s).
|
||||
package_names = ['package_name']
|
||||
|
||||
parameters = [
|
||||
# Workload parameters go here e.g.
|
||||
Parameter('example_parameter', kind=int, allowed_values=[1,2,3],
|
||||
default=1, override=True, mandatory=False,
|
||||
description='This is an example parameter')
|
||||
]
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
super(GoogleDocs, self).__init__(target, **kwargs)
|
||||
# Define any additional attributes required for the workload
|
||||
|
||||
def init_resources(self, context):
|
||||
super(GoogleDocs, self).init_resources(context)
|
||||
# This method may be used to perform early resource discovery and
|
||||
# initialization. This is invoked during the initial loading stage and
|
||||
# before the device is ready, so cannot be used for any device-dependent
|
||||
# initialization. This method is invoked before the workload instance is
|
||||
# validated.
|
||||
|
||||
def initialize(self, context):
|
||||
super(GoogleDocs, self).initialize(context)
|
||||
# This method should be used to perform once-per-run initialization of a
|
||||
# workload instance.
|
||||
|
||||
def validate(self):
|
||||
super(GoogleDocs, self).validate()
|
||||
# Validate inter-parameter assumptions etc
|
||||
|
||||
def setup(self, context):
|
||||
super(GoogleDocs, self).setup(context)
|
||||
# Perform any necessary setup before starting the UI automation
|
||||
|
||||
def extract_results(self, context):
|
||||
super(GoogleDocs, self).extract_results(context)
|
||||
# Extract results on the target
|
||||
|
||||
def update_output(self, context):
|
||||
super(GoogleDocs, self).update_output(context)
|
||||
# Update the output within the specified execution context with the
|
||||
# metrics and artifacts form this workload iteration.
|
||||
|
||||
def teardown(self, context):
|
||||
super(GoogleDocs, self).teardown(context)
|
||||
# Perform any final clean up for the Workload.
|
||||
|
||||
|
||||
Depending on the purpose of your workload you can choose to implement which
|
||||
methods you require. The main things that need setting are the list of
|
||||
``package_names`` which must be a list of strings containing the android package
|
||||
name that will be used during resource resolution to locate the relevant apk
|
||||
file for the workload. Additionally the the workload parameters will need to
|
||||
updating to any relevant parameters required by the workload as well as the
|
||||
description.
|
||||
|
||||
|
||||
The latter will contain a framework for performing the UI automation on the
|
||||
target, the files you will be most interested in will be
|
||||
``uiauto/app/src/main/java/arm/wa/uiauto/UiAutomation.java`` which will contain
|
||||
the actual code of the automation and will look something like::
|
||||
|
||||
package com.arm.wa.uiauto.google_docs;
|
||||
|
||||
import android.app.Activity;
|
||||
import android.os.Bundle;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import android.support.test.runner.AndroidJUnit4;
|
||||
|
||||
import android.util.Log;
|
||||
import android.view.KeyEvent;
|
||||
|
||||
// Import the uiautomator libraries
|
||||
import android.support.test.uiautomator.UiObject;
|
||||
import android.support.test.uiautomator.UiObjectNotFoundException;
|
||||
import android.support.test.uiautomator.UiScrollable;
|
||||
import android.support.test.uiautomator.UiSelector;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import com.arm.wa.uiauto.BaseUiAutomation;
|
||||
|
||||
@RunWith(AndroidJUnit4.class)
|
||||
public class UiAutomation extends BaseUiAutomation {
|
||||
|
||||
protected Bundle parameters;
|
||||
|
||||
public static String TAG = "google_docs";
|
||||
|
||||
@Before
|
||||
public void initilize() throws Exception {
|
||||
parameters = getParams();
|
||||
// Perform any parameter initialization here
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setup() throws Exception {
|
||||
// Optional: Perform any setup required before the main workload
|
||||
// is ran, e.g. dismissing welcome screens
|
||||
}
|
||||
|
||||
@Test
|
||||
public void runWorkload() throws Exception {
|
||||
// The main UI Automation code goes here
|
||||
}
|
||||
|
||||
@Test
|
||||
public void extractResults() throws Exception {
|
||||
// Optional: Extract any relevant results from the workload,
|
||||
}
|
||||
|
||||
@Test
|
||||
public void teardown() throws Exception {
|
||||
// Optional: Perform any clean up for the workload
|
||||
}
|
||||
}
|
||||
|
||||
Once you have implemented your java workload you can use the file
|
||||
``uiauto/build.sh`` to compile your automation into an apk file to perform the
|
||||
automation. The generated apk will be generated with the package name
|
||||
``com.arm.wa.uiauto.<workload_name>`` which when running your workload will be
|
||||
automatically detected by the resource getters and deployed to the device.
|
||||
|
||||
|
||||
Adding a ReventApk Workload Example
|
||||
------------------------------------
|
||||
|
||||
If we wish to create a workload to automate the testing of a UI based workload
|
||||
that we cannot / do not wish to use UiAutomator then we can perform the
|
||||
automation using revent. In this example we would want to automatically deploy
|
||||
and install an apk file to the target, therefore we would choose the
|
||||
:ref:`ApkRevent workload <apkrevent-workload>` type with the following
|
||||
command::
|
||||
|
||||
$ wa create workload -k apkrevent my_game
|
||||
Workload created in $WA_USER_DIRECTORY/plugins/my_game
|
||||
|
||||
This will generate a revent based workload you will end up with a very similar
|
||||
python file as to the one outlined in generating a :ref:`UiAutomator based
|
||||
workload <apkuiautomator-example>` except without the java automation files.
|
||||
|
||||
The main difference between the two is that this workload will subclass
|
||||
``ApkReventWorkload`` instead of ``ApkUiautomatorWorkload`` as shown below.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from wa import ApkReventWorkload
|
||||
|
||||
class MyGame(ApkReventWorkload):
|
||||
|
||||
name = 'mygame'
|
||||
package_names = ['com.mylogo.mygame']
|
||||
|
||||
# ..
|
||||
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
.. _adding-an-instrument-example:
|
||||
|
||||
Adding an Instrument Example
|
||||
=============================
|
||||
This is an example of how we would create a instrument which will trace device
|
||||
errors. For more detailed information please see :ref:`here <instrument-reference>`.
|
||||
The first thing to do is to subclass :class:`Instrument`, overwrite
|
||||
the variable name with what we want our instrument to be called and locate our
|
||||
binary for our instrument. ::
|
||||
|
||||
class TraceErrorsInstrument(Instrument):
|
||||
|
||||
name = 'trace-errors'
|
||||
|
||||
def __init__(self, target):
|
||||
super(TraceErrorsInstrument, self).__init__(target)
|
||||
self.binary_name = 'trace'
|
||||
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
|
||||
self.trace_on_target = None
|
||||
|
||||
We then declare and implement the required methods as detailed
|
||||
:ref:`here <instrument-api>`. For the ``initialize`` method, we want to install
|
||||
the executable file to the target so we can use the target's ``install``
|
||||
method which will try to copy the file to a location on the device that
|
||||
supports execution, will change the file mode appropriately and return the
|
||||
file path on the target. ::
|
||||
|
||||
def initialize(self, context):
|
||||
self.trace_on_target = self.target.install(self.binary_file)
|
||||
|
||||
Then we implemented the start method, which will simply run the file to start
|
||||
tracing. Supposing that the call to this binary requires some overhead to begin
|
||||
collecting errors we might want to decorate the method with the ``@slow``
|
||||
decorator to try and reduce the impact on other running instruments. For more
|
||||
information on prioritization please see :ref:`here <prioritization>`::
|
||||
|
||||
@slow
|
||||
def start(self, context):
|
||||
self.target.execute('{} start'.format(self.trace_on_target))
|
||||
|
||||
Lastly, we need to stop tracing once the workload stops and this happens in the
|
||||
stop method, assuming stopping the collection also require some overhead we have
|
||||
again decorated the method::
|
||||
|
||||
@slow
|
||||
def stop(self, context):
|
||||
self.target.execute('{} stop'.format(self.trace_on_target))
|
||||
|
||||
Once we have generated our result data we need to retrieve it from the device
|
||||
for further processing or adding directly to WA's output for that job. For
|
||||
example for trace data we will want to pull it to the device and add it as a
|
||||
:ref:`artifact <atrifact>` to WA's :ref:`context <context>` as shown below::
|
||||
|
||||
def extract_results(self, context):
|
||||
# pull the trace file from the target
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.target.pull(self.result, context.working_directory)
|
||||
context.add_artifact('error_trace', self.result, kind='export')
|
||||
|
||||
Once we have retrieved the data we can now do any further processing and add any
|
||||
relevant :ref:`Metrics <metrics>` to the :ref:`context <context>`. For this we
|
||||
will use the the ``add_metric`` method and to add the instruments results to the
|
||||
final result for that workload. The method can be passed 4 params, which are the
|
||||
metric `key`, `value`, `unit` and `lower_is_better`. ::
|
||||
|
||||
def update_output(self, context):
|
||||
# parse the file if needs to be parsed, or add result directly to
|
||||
# context.
|
||||
|
||||
metric = # ..
|
||||
context.add_metric('number_of_errors', metric, lower_is_better=True
|
||||
|
||||
At the end of each job we might want to delete any files generated by the
|
||||
instruments and the code to clear these file goes in teardown method. ::
|
||||
|
||||
def teardown(self, context):
|
||||
self.target.remove(os.path.join(self.target.working_directory, 'trace.txt'))
|
||||
|
||||
At the very end of the run we would want to uninstall the binary we deployed earlier ::
|
||||
|
||||
def finalize(self, context):
|
||||
self.target.uninstall(self.binary_name)
|
||||
|
||||
So the full example would look something like::
|
||||
|
||||
class TraceErrorsInstrument(Instrument):
|
||||
|
||||
name = 'trace-errors'
|
||||
|
||||
def __init__(self, target):
|
||||
super(TraceErrorsInstrument, self).__init__(target)
|
||||
self.binary_name = 'trace'
|
||||
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
|
||||
self.trace_on_target = None
|
||||
|
||||
def initialize(self, context):
|
||||
self.trace_on_target = self.target.install(self.binary_file)
|
||||
|
||||
@slow
|
||||
def start(self, context):
|
||||
self.target.execute('{} start'.format(self.trace_on_target))
|
||||
|
||||
@slow
|
||||
def stop(self, context):
|
||||
self.target.execute('{} stop'.format(self.trace_on_target))
|
||||
|
||||
def extract_results(self, context):
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.target.pull(self.result, context.working_directory)
|
||||
context.add_artifact('error_trace', self.result, kind='export')
|
||||
|
||||
def update_output(self, context):
|
||||
metric = # ..
|
||||
context.add_metric('number_of_errors', metric, lower_is_better=True
|
||||
|
||||
def teardown(self, context):
|
||||
self.target.remove(os.path.join(self.target.working_directory, 'trace.txt'))
|
||||
|
||||
def finalize(self, context):
|
||||
self.target.uninstall(self.binary_name)
|
||||
|
||||
|
||||
Adding an Output Processor Example
|
||||
===================================
|
||||
|
||||
This is an example of how we would create an output processor which will format
|
||||
the run metrics as a column-aligned table. The first thing to do is to subclass
|
||||
:class:`OutputProcessor` and overwrite the variable name with what we want our
|
||||
processor to be called and provide a short description.
|
||||
|
||||
Next we need to implement any relevant methods, (please see
|
||||
:ref:`adding an output processor <adding-an-output-processor>` for all the
|
||||
available methods). In this case we only want to implement the
|
||||
``export_run_output`` method as we are not generating any new artifacts and
|
||||
we only care about the overall output rather than the individual job
|
||||
outputs. And the implementation is very simple, it just loops through all
|
||||
the available metrics for all the available jobs and adds them to a list
|
||||
which is written to file and then added as an :ref:`artifact <artifact>` to
|
||||
the :ref:`context <context>`.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import os
|
||||
from wa import OutputProcessor
|
||||
from wa.utils.misc import write_table
|
||||
|
||||
|
||||
class Table(OutputProcessor):
|
||||
|
||||
name = 'table'
|
||||
description = 'Generates a text file containing a column-aligned table with run results.'
|
||||
|
||||
def export_run_output(self, output, target_info):
|
||||
rows = []
|
||||
|
||||
for job in output.jobs:
|
||||
for metric in job.metrics:
|
||||
rows.append([metric.name, str(metric.value), metric.units or '',
|
||||
metric.lower_is_better and '-' or '+'])
|
||||
|
||||
outfile = output.get_path('table.txt')
|
||||
with open(outfile, 'w') as wfh:
|
||||
write_table(rows, wfh)
|
||||
output.add_artifact('results_table', 'table.txt, 'export')
|
||||
|
||||
|
||||
.. _adding-custom-target-example:
|
||||
Adding a Custom Target Example
|
||||
===============================
|
||||
This is an example of how we would create a customised target, this is typically
|
||||
used where we would need to augment the existing functionality for example on
|
||||
development boards where we need to perform additional actions to implement some
|
||||
functionality. In this example we are going to assume that this particular
|
||||
device is running Android and requires a special "wakeup" command to be sent before it
|
||||
can execute any other command.
|
||||
|
||||
To add a new target to WA we will first create a new file in
|
||||
``$WA_USER_DIRECTORY/plugins/example_target.py``. In order to facilitate with
|
||||
creating a new target WA provides a helper function to create a description for
|
||||
the specified target class, and specified components. For components that are
|
||||
not explicitly specified it will attempt to guess sensible defaults based on the target
|
||||
class' bases.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Import our helper function
|
||||
from wa import add_description_for_target
|
||||
|
||||
# Import the Target that our custom implementation will be based on
|
||||
from devlib import AndroidTarget
|
||||
|
||||
class ExampleTarget(AndroidTarget):
|
||||
# Provide the name that will be used to identify your custom target
|
||||
name = 'example_target'
|
||||
|
||||
# Override our custom method(s)
|
||||
def execute(self, *args, **kwargs):
|
||||
super(ExampleTarget, self).execute('wakeup', check_exit_code=False)
|
||||
return super(ExampleTarget, self).execute(*args, **kwargs)
|
||||
|
||||
|
||||
description = '''An Android target which requires an explicit "wakeup" command
|
||||
to be sent before accepting any other command'''
|
||||
# Call the helper function with our newly created function and its description.
|
||||
add_description_for_target(ExampleTarget, description)
|
||||
|
@ -1,27 +1,20 @@
|
||||
.. _agenda:
|
||||
|
||||
======
|
||||
Agenda
|
||||
======
|
||||
|
||||
An agenda specifies what is to be done during a Workload Automation run,
|
||||
including which workloads will be run, with what configuration, which
|
||||
instruments and result processors will be enabled, etc. Agenda syntax is
|
||||
designed to be both succinct and expressive.
|
||||
augmentations will be enabled, etc. Agenda syntax is designed to be both
|
||||
succinct and expressive.
|
||||
|
||||
Agendas are specified using YAML_ notation. It is recommended that you
|
||||
familiarize yourself with the linked page.
|
||||
|
||||
.. _YAML: http://en.wikipedia.org/wiki/YAML
|
||||
|
||||
.. note:: Earlier versions of WA have supported CSV-style agendas. These were
|
||||
there to facilitate transition from WA1 scripts. The format was more
|
||||
awkward and supported only a limited subset of the features. Support
|
||||
for it has now been removed.
|
||||
|
||||
|
||||
Specifying which workloads to run
|
||||
=================================
|
||||
---------------------------------
|
||||
|
||||
The central purpose of an agenda is to specify what workloads to run. A
|
||||
minimalist agenda contains a single entry at the top level called "workloads"
|
||||
@ -32,11 +25,11 @@ that maps onto a list of workload names to run:
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
- cyclictest
|
||||
- rt_app
|
||||
|
||||
This specifies a WA run consisting of ``dhrystone`` followed by ``memcpy``, followed by
|
||||
``cyclictest`` workloads, and using instruments and result processors specified in
|
||||
config.py (see :ref:`configuration-specification` section).
|
||||
``rt_app`` workloads, and using the augmentations specified in
|
||||
config.yaml (see :ref:`configuration-specification` section).
|
||||
|
||||
.. note:: If you're familiar with YAML, you will recognize the above as a single-key
|
||||
associative array mapping onto a list. YAML has two notations for both
|
||||
@ -44,11 +37,11 @@ config.py (see :ref:`configuration-specification` section).
|
||||
in-line notation. This means that the above agenda can also be
|
||||
written in a single line as ::
|
||||
|
||||
workloads: [dhrystone, memcpy, cyclictest]
|
||||
workloads: [dhrystone, memcpy, rt-app]
|
||||
|
||||
(with the list in-lined), or ::
|
||||
|
||||
{workloads: [dhrystone, memcpy, cyclictest]}
|
||||
{workloads: [dhrystone, memcpy, rt-app]}
|
||||
|
||||
(with both the list and the associative array in-line). WA doesn't
|
||||
care which of the notations is used as they all get parsed into the
|
||||
@ -79,29 +72,27 @@ each spec, we have to explicitly name each field of the spec.
|
||||
|
||||
It is often the case that, as in in the example above, you will want to run all
|
||||
workloads for the same number of iterations. Rather than having to specify it
|
||||
for each and every spec, you can do with a single entry by adding a ``global``
|
||||
section to your agenda:
|
||||
for each and every spec, you can do with a single entry by adding `iterations`
|
||||
to your ``config`` section in your agenda:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
global:
|
||||
config:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
- cyclictest
|
||||
|
||||
The global section can contain the same fields as a workload spec. The
|
||||
fields in the global section will get added to each spec. If the same field is
|
||||
defined both in global section and in a spec, then the value in the spec will
|
||||
overwrite the global value. For example, suppose we wanted to run all our workloads
|
||||
for five iterations, except cyclictest which we want to run for ten (e.g.
|
||||
because we know it to be particularly unstable). This can be specified like
|
||||
this:
|
||||
If the same field is defined both in config section and in a spec, then the
|
||||
value in the spec will overwrite the value. For example, suppose we
|
||||
wanted to run all our workloads for five iterations, except cyclictest which we
|
||||
want to run for ten (e.g. because we know it to be particularly unstable). This
|
||||
can be specified like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
global:
|
||||
config:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- dhrystone
|
||||
@ -112,10 +103,10 @@ this:
|
||||
Again, because we are now specifying two fields for cyclictest spec, we have to
|
||||
explicitly name them.
|
||||
|
||||
Configuring workloads
|
||||
Configuring Workloads
|
||||
---------------------
|
||||
|
||||
Some workloads accept configuration parameters that modify their behavior. These
|
||||
Some workloads accept configuration parameters that modify their behaviour. These
|
||||
parameters are specific to a particular workload and can alter the workload in
|
||||
any number of ways, e.g. set the duration for which to run, or specify a media
|
||||
file to be used, etc. The vast majority of workload parameters will have some
|
||||
@ -124,13 +115,13 @@ order for WA to run it. However, sometimes you want more control over how a
|
||||
workload runs.
|
||||
|
||||
For example, by default, dhrystone will execute 10 million loops across four
|
||||
threads. Suppose you device has six cores available and you want the workload to
|
||||
threads. Suppose your device has six cores available and you want the workload to
|
||||
load them all. You also want to increase the total number of loops accordingly
|
||||
to 15 million. You can specify this using dhrystone's parameters:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
global:
|
||||
config:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
@ -142,30 +133,32 @@ to 15 million. You can specify this using dhrystone's parameters:
|
||||
iterations: 10
|
||||
|
||||
.. note:: You can find out what parameters a workload accepts by looking it up
|
||||
in the :ref:`Workloads` section. You can also look it up using WA itself
|
||||
with "show" command::
|
||||
in the :ref:`Workloads` section or using WA itself with "show"
|
||||
command::
|
||||
|
||||
wa show dhrystone
|
||||
|
||||
see the :ref:`Invocation` section for details.
|
||||
|
||||
In addition to configuring the workload itself, we can also specify
|
||||
configuration for the underlying device. This can be done by setting runtime
|
||||
parameters in the workload spec. For example, suppose we want to ensure the
|
||||
maximum score for our benchmarks, at the expense of power consumption, by
|
||||
setting the cpufreq governor to "performance" on cpu0 (assuming all our cores
|
||||
are in the same DVFS domain and so setting the governor for cpu0 will affect all
|
||||
cores). This can be done like this:
|
||||
configuration for the underlying device which can be done by setting runtime
|
||||
parameters in the workload spec. Explict runtime paremeters have been exposed for
|
||||
configuring cpufreq, hotplug and cpuidle. For more detailed information on Runtime
|
||||
Parameters see the :ref:`runtime parmeters <runtime-parmeters>` section. For
|
||||
example, suppose we want to ensure the maximum score for our benchmarks, at the
|
||||
expense of power consumption so we want to set the cpufreq governor to
|
||||
"performance" and enable all of the cpus on the device, (assuming there are 8
|
||||
cpus available), which can be done like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
global:
|
||||
config:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
governor: performance
|
||||
num_cores: 8
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
@ -174,40 +167,131 @@ cores). This can be done like this:
|
||||
iterations: 10
|
||||
|
||||
|
||||
Here, we're specifying ``sysfile_values`` runtime parameter for the device. The
|
||||
value for this parameter is a mapping (an associative array, in YAML) of file
|
||||
paths onto values that should be written into those files. ``sysfile_values`` is
|
||||
the only runtime parameter that is available for any (Linux) device. Other
|
||||
runtime parameters will depend on the specifics of the device used (e.g. its
|
||||
CPU cores configuration). I've renamed ``params`` to ``workload_params`` for
|
||||
clarity, but that wasn't strictly necessary as ``params`` is interpreted as
|
||||
I've renamed ``params`` to ``workload_params`` for clarity,
|
||||
but that wasn't strictly necessary as ``params`` is interpreted as
|
||||
``workload_params`` inside a workload spec.
|
||||
|
||||
.. note:: ``params`` field is interpreted differently depending on whether it's in a
|
||||
workload spec or the global section. In a workload spec, it translates to
|
||||
``workload_params``, in the global section it translates to ``runtime_params``.
|
||||
|
||||
Runtime parameters do not automatically reset at the end of workload spec
|
||||
execution, so all subsequent iterations will also be affected unless they
|
||||
explicitly change the parameter (in the example above, performance governor will
|
||||
also be used for ``memcpy`` and ``cyclictest``. There are two ways around this:
|
||||
either set ``reboot_policy`` WA setting (see :ref:`configuration-specification` section) such that
|
||||
the device gets rebooted between spec executions, thus being returned to its
|
||||
initial state, or set the default runtime parameter values in the ``global``
|
||||
section of the agenda so that they get set for every spec that doesn't
|
||||
explicitly override them.
|
||||
either set ``reboot_policy`` WA setting (see :ref:`configuration-specification`
|
||||
section) such that the device gets rebooted between spec executions, thus being
|
||||
returned to its initial state, or set the default runtime parameter values in
|
||||
the ``config`` section of the agenda so that they get set for every spec that
|
||||
doesn't explicitly override them.
|
||||
|
||||
If additional configuration of the device is required which are not exposed via
|
||||
the built in runtime parameters, you can write a value to any file exposed on
|
||||
the device using ``sysfile_values``, for example we could have also performed
|
||||
the same configuration manually (assuming we have a big.LITTLE system and our
|
||||
cores 0-3 and 4-7 are in 2 separate DVFS domains and so setting the governor for
|
||||
cpu0 and cpu4 will affect all our cores) e.g.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
||||
config:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
/sys/devices/system/cpu/cpu4/cpufreq/scaling_governor: performance
|
||||
/sys/devices/system/cpu/cpu0/online: 1
|
||||
/sys/devices/system/cpu/cpu1/online: 1
|
||||
/sys/devices/system/cpu/cpu2/online: 1
|
||||
/sys/devices/system/cpu/cpu3/online: 1
|
||||
/sys/devices/system/cpu/cpu4/online: 1
|
||||
/sys/devices/system/cpu/cpu5/online: 1
|
||||
/sys/devices/system/cpu/cpu6/online: 1
|
||||
/sys/devices/system/cpu/cpu7/online: 1
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- memcpy
|
||||
- name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
Here, we're specifying a ``sysfile_values`` runtime parameter for the device.
|
||||
The value for this parameter is a mapping (an associative array, in YAML) of
|
||||
file paths onto values that should be written into those files. Runtime
|
||||
parameters will depend on the specifics of the device used (e.g. its CPU cores
|
||||
configuration).
|
||||
|
||||
|
||||
APK Workloads
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
WA has various resource getters that can be configured to locate APK files but
|
||||
for most people APK files should be kept in the
|
||||
``$WA_USER_DIRECTORY/dependencies/SOME_WORKLOAD/`` directory. (by default
|
||||
``~/.workload_automation/dependencies/SOME_WORKLOAD/``). The
|
||||
``WA_USER_DIRECTORY`` enviroment variable can be used to change the location of
|
||||
this folder. The APK files need to be put into the corresponding directories for
|
||||
the workload they belong to. The name of the file can be anything but as
|
||||
explained below may need to contain certain peices of information.
|
||||
|
||||
All ApkWorkloads have parameters that affect the way in which APK files are
|
||||
resolved, ``exact_abi``, ``force_install`` and ``prefer_host_package``. Their
|
||||
exact behaviours are outlined below.
|
||||
|
||||
.. confval:: exact_abi
|
||||
|
||||
If this setting is enabled WA's resource resolvers will look for the devices
|
||||
ABI with any native code present in the apk. By default this setting is
|
||||
disabled since most apks will work across all devices. You may wish to enable
|
||||
this feature when working with devices that support multiple ABI's (like
|
||||
64-bit devices that can run 32-bit APK files) and are specifically trying to
|
||||
test one or the other.
|
||||
|
||||
.. confval:: force_install
|
||||
|
||||
If this setting is enabled WA will *always* use the APK file on the host, and
|
||||
re-install it on every iteration. If there is no APK on the host that is a
|
||||
suitable version and/or ABI for the workload WA will error when
|
||||
``force_install`` is enabled.
|
||||
|
||||
.. confval:: prefer_host_package
|
||||
|
||||
This parameter is used to specify a preference over host or target versions
|
||||
of the app. When set to ``True`` WA will prefer the host side version of the
|
||||
APK. It will check if the host has the APK and if the host APK meets the
|
||||
version requirements of the workload. If does and the target already has same
|
||||
version nothing will be done, other wise it will overwrite the targets app
|
||||
with the host version. If the hosts is missing the APK or it does not meet
|
||||
version requirements WA will fall back to the app on the target if it has the
|
||||
app and it is of a suitable version. When this parameter is set to ``false``
|
||||
WA will prefer to use the version already on the target if it meets the
|
||||
workloads version requirements. If it does not it will fall back to search
|
||||
the host for the correct version. In both modes if neither the host nor
|
||||
target have a suitable version, WA will error and not run the workload.
|
||||
|
||||
Some workloads will also feature the follow parameters which will alter the way
|
||||
their APK files are resolved.
|
||||
|
||||
.. confval:: version
|
||||
|
||||
This parameter is used to specify which version of uiautomation for the
|
||||
workload is used. In some workloads e.g. ``geekbench`` multiple versions with
|
||||
drastically different UI's are supported. A APKs version will be
|
||||
automatically extracted therefore it is possible to have mutiple apks for
|
||||
different versions of a workload present on the host and select between which
|
||||
is used for a particular job by specifying the relevant version in your
|
||||
:ref:`agenda <agenda>`.
|
||||
|
||||
.. confval:: variant_name
|
||||
|
||||
Some workloads use variants of APK files, this is usually the case with web
|
||||
browser APK files, these work in exactly the same way as the version.
|
||||
|
||||
.. note:: "In addition to ``runtime_params`` there are also ``boot_params`` that
|
||||
work in a similar way, but they get passed to the device when it
|
||||
reboots. At the moment ``TC2`` is the only device that defines a boot
|
||||
parameter, which is explained in ``TC2`` documentation, so boot
|
||||
parameters will not be mentioned further.
|
||||
|
||||
IDs and Labels
|
||||
--------------
|
||||
|
||||
It is possible to list multiple specs with the same workload in an agenda. You
|
||||
may wish to this if you want to run a workload with different parameter values
|
||||
may wish to do this if you want to run a workload with different parameter values
|
||||
or under different runtime configurations of the device. The workload name
|
||||
therefore does not uniquely identify a spec. To be able to distinguish between
|
||||
different specs (e.g. in reported results), each spec has an ID which is unique
|
||||
@ -226,13 +310,13 @@ unique to the agenda. However, is usually better to keep them reasonably short
|
||||
(they don't need to be *globally* unique), and to stick with alpha-numeric
|
||||
characters and underscores/dashes. While WA can handle other characters as well,
|
||||
getting too adventurous with your IDs may cause issues further down the line
|
||||
when processing WA results (e.g. when uploading them to a database that may have
|
||||
when processing WA output (e.g. when uploading them to a database that may have
|
||||
its own restrictions).
|
||||
|
||||
In addition to IDs, you can also specify labels for your workload specs. These
|
||||
are similar to IDs but do not have the uniqueness restriction. If specified,
|
||||
labels will be used by some result processes instead of (or in addition to) the
|
||||
workload name. For example, the ``csv`` result processor will put the label in the
|
||||
labels will be used by some output processes instead of (or in addition to) the
|
||||
workload name. For example, the ``csv`` output processor will put the label in the
|
||||
"workload" column of the CSV file.
|
||||
|
||||
It is up to you how you chose to use IDs and labels. WA itself doesn't expect
|
||||
@ -240,74 +324,16 @@ any particular format (apart from uniqueness for IDs). Below is the earlier
|
||||
example updated to specify explicit IDs and label dhrystone spec to reflect
|
||||
parameters used.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
|
||||
Result Processors and Instrumentation
|
||||
=====================================
|
||||
|
||||
Result Processors
|
||||
-----------------
|
||||
|
||||
Result processors, as the name suggests, handle the processing of results
|
||||
generated form running workload specs. By default, WA enables a couple of basic
|
||||
result processors (e.g. one generates a csv file with all scores reported by
|
||||
workloads), which you can see in ``~/.workload_automation/config.py``. However,
|
||||
WA has a number of other, more specialized, result processors (e.g. for
|
||||
uploading to databases). You can list available result processors with
|
||||
``wa list result_processors`` command. If you want to permanently enable a
|
||||
result processor, you can add it to your ``config.py``. You can also enable a
|
||||
result processor for a particular run by specifying it in the ``config`` section
|
||||
in the agenda. As the name suggests, ``config`` section mirrors the structure of
|
||||
``config.py``\ (although using YAML rather than Python), and anything that can
|
||||
be specified in the latter, can also be specified in the former.
|
||||
|
||||
As with workloads, result processors may have parameters that define their
|
||||
behavior. Parameters of result processors are specified a little differently,
|
||||
however. Result processor parameter values are listed in the config section,
|
||||
namespaced under the name of the result processor.
|
||||
|
||||
For example, suppose we want to be able to easily query the results generated by
|
||||
the workload specs we've defined so far. We can use ``sqlite`` result processor
|
||||
to have WA create an sqlite_ database file with the results. By default, this
|
||||
file will be generated in WA's output directory (at the same level as
|
||||
results.csv); but suppose we want to store the results in the same file for
|
||||
every run of the agenda we do. This can be done by specifying an alternative
|
||||
database file with ``database`` parameter of the result processor:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
cpu0_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
@ -317,167 +343,10 @@ database file with ``database`` parameter of the result processor:
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
A couple of things to observe here:
|
||||
|
||||
- There is no need to repeat the result processors listed in ``config.py``. The
|
||||
processors listed in ``result_processors`` entry in the agenda will be used
|
||||
*in addition to* those defined in the ``config.py``.
|
||||
- The database file is specified under "sqlite" entry in the config section.
|
||||
Note, however, that this entry alone is not enough to enable the result
|
||||
processor, it must be listed in ``result_processors``, otherwise the "sqilte"
|
||||
config entry will be ignored.
|
||||
- The database file must be specified as an absolute path, however it may use
|
||||
the user home specifier '~' and/or environment variables.
|
||||
|
||||
.. _sqlite: http://www.sqlite.org/
|
||||
|
||||
|
||||
Instrumentation
|
||||
---------------
|
||||
|
||||
WA can enable various "instruments" to be used during workload execution.
|
||||
Instruments can be quite diverse in their functionality, but the majority of
|
||||
instruments available in WA today are there to collect additional data (such as
|
||||
trace) from the device during workload execution. You can view the list of
|
||||
available instruments by using ``wa list instruments`` command. As with result
|
||||
processors, a few are enabled by default in the ``config.py`` and additional
|
||||
ones may be added in the same place, or specified in the agenda using
|
||||
``instrumentation`` entry.
|
||||
|
||||
For example, we can collect core utilisation statistics (for what proportion of
|
||||
workload execution N cores were utilized above a specified threshold) using
|
||||
``coreutil`` instrument.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
instrumentation: [coreutil]
|
||||
coreutil:
|
||||
threshold: 80
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
Instrumentation isn't "free" and it is advisable not to have too many
|
||||
instruments enabled at once as that might skew results. For example, you don't
|
||||
want to have power measurement enabled at the same time as event tracing, as the
|
||||
latter may prevent cores from going into idle states and thus affecting the
|
||||
reading collected by the former.
|
||||
|
||||
Unlike result processors, instrumentation may be enabled (and disabled -- see below)
|
||||
on per-spec basis. For example, suppose we want to collect /proc/meminfo from the
|
||||
device when we run ``memcpy`` workload, but not for the other two. We can do that using
|
||||
``sysfs_extractor`` instrument, and we will only enable it for ``memcpy``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
instrumentation: [coreutil]
|
||||
coreutil:
|
||||
threshold: 80
|
||||
sysfs_extractor:
|
||||
paths: [/proc/meminfo]
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
instrumentation: [sysfs_extractor]
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
As with ``config`` sections, ``instrumentation`` entry in the spec needs only to
|
||||
list additional instruments and does not need to repeat instruments specified
|
||||
elsewhere.
|
||||
|
||||
.. note:: At present, it is only possible to enable/disable instrumentation on
|
||||
per-spec base. It is *not* possible to provide configuration on
|
||||
per-spec basis in the current version of WA (e.g. in our example, it
|
||||
is not possible to specify different ``sysfs_extractor`` paths for
|
||||
different workloads). This restriction may be lifted in future
|
||||
versions of WA.
|
||||
|
||||
Disabling result processors and instrumentation
|
||||
-----------------------------------------------
|
||||
|
||||
As seen above, plugins specified with ``instrumentation`` and
|
||||
``result_processor`` clauses get added to those already specified previously.
|
||||
Just because an instrument specified in ``config.py`` is not listed in the
|
||||
``config`` section of the agenda, does not mean it will be disabled. If you do
|
||||
want to disable an instrument, you can always remove/comment it out from
|
||||
``config.py``. However that will be introducing a permanent configuration change
|
||||
to your environment (one that can be easily reverted, but may be just as
|
||||
easily forgotten). If you want to temporarily disable a result processor or an
|
||||
instrument for a particular run, you can do that in your agenda by prepending a
|
||||
tilde (``~``) to its name.
|
||||
|
||||
For example, let's say we want to disable ``cpufreq`` instrument enabled in our
|
||||
``config.py`` (suppose we're going to send results via email and so want to
|
||||
reduce to total size of the output directory):
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
instrumentation: [coreutil, ~cpufreq]
|
||||
coreutil:
|
||||
threshold: 80
|
||||
sysfs_extractor:
|
||||
paths: [/proc/meminfo]
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
instrumentation: [sysfs_extractor]
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
.. _sections:
|
||||
|
||||
Sections
|
||||
========
|
||||
--------
|
||||
|
||||
It is a common requirement to be able to run the same set of workloads under
|
||||
different device configurations. E.g. you may want to investigate impact of
|
||||
@ -489,7 +358,7 @@ For example, suppose what we really want, is to measure the impact of using
|
||||
interactive cpufreq governor vs the performance governor on the three
|
||||
benchmarks. We could create another three workload spec entries similar to the
|
||||
ones we already have and change the sysfile value being set to "interactive".
|
||||
However, this introduces a lot of duplication; and what if we want to change
|
||||
However, this introduces a lot of duplication; and what if we want to change
|
||||
spec configuration? We would have to change it in multiple places, running the
|
||||
risk of forgetting one.
|
||||
|
||||
@ -499,25 +368,21 @@ governor:
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
instrumentation: [coreutil, ~cpufreq]
|
||||
coreutil:
|
||||
threshold: 80
|
||||
iterations: 5
|
||||
augmentations:
|
||||
- ~cpufreq
|
||||
- csv
|
||||
sysfs_extractor:
|
||||
paths: [/proc/meminfo]
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
csv:
|
||||
use_all_classifiers: True
|
||||
sections:
|
||||
- id: perf
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
cpu0_governor: performance
|
||||
- id: inter
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive
|
||||
cpu0_governor: interactive
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
@ -527,13 +392,10 @@ governor:
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
instrumentation: [sysfs_extractor]
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
augmentations: [sysfs_extractor]
|
||||
|
||||
A section, just like an workload spec, needs to have a unique ID. Apart from
|
||||
that, a "section" is similar to the ``global`` section we've already seen --
|
||||
that, a "section" is similar to the ``config`` section we've already seen --
|
||||
everything that goes into a section will be applied to each workload spec.
|
||||
Workload specs defined under top-level ``workloads`` entry will be executed for
|
||||
each of the sections listed under ``sections``.
|
||||
@ -544,24 +406,225 @@ each of the sections listed under ``sections``.
|
||||
|
||||
In order to maintain the uniqueness requirement of workload spec IDs, they will
|
||||
be namespaced under each section by prepending the section ID to the spec ID
|
||||
with an under score. So in the agenda above, we no longer have a workload spec
|
||||
with ID ``01_dhry``, instead there are two specs with IDs ``perf_01_dhry`` and
|
||||
``inter_01_dhry``.
|
||||
with a dash. So in the agenda above, we no longer have a workload spec
|
||||
with ID ``01_dhry``, instead there are two specs with IDs ``perf-01-dhry`` and
|
||||
``inter-01_dhry``.
|
||||
|
||||
Note that the ``global`` section still applies to every spec in the agenda. So
|
||||
Note that the ``config`` section still applies to every spec in the agenda. So
|
||||
the precedence order is -- spec settings override section settings, which in
|
||||
turn override global settings.
|
||||
|
||||
|
||||
|
||||
Output Processors and Instruments
|
||||
----------------------------------
|
||||
|
||||
Output Processors
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Output processors, as the name suggests, handle the processing of output
|
||||
generated form running workload specs. By default, WA enables a couple of basic
|
||||
output processors (e.g. one generates a csv file with all scores reported by
|
||||
workloads), which you can see in ``~/.workload_automation/config.yaml``. However,
|
||||
WA has a number of other, more specialized, output processors (e.g. for
|
||||
uploading to databases). You can list available output processors with
|
||||
``wa list output_processors`` command. If you want to permanently enable a
|
||||
output processor, you can add it to your ``config.yaml``. You can also enable a
|
||||
output processor for a particular run by specifying it in the ``config`` section
|
||||
in the agenda. As the name suggests, ``config`` section mirrors the structure of
|
||||
``config.yaml``, and anything that can be specified in the latter, can also be
|
||||
specified in the former.
|
||||
|
||||
As with workloads, output processors may have parameters that define their
|
||||
behaviour. Parameters of output processors are specified a little differently,
|
||||
however. Output processor parameter values are listed in the config section,
|
||||
namespaced under the name of the output processor.
|
||||
|
||||
For example, suppose we want to be able to easily query the output generated by
|
||||
the workload specs we've defined so far. We can use ``sqlite`` output processor
|
||||
to have WA create an sqlite_ database file with the results. By default, this
|
||||
file will be generated in WA's output directory (at the same level as
|
||||
results.csv); but suppose we want to store the results in the same file for
|
||||
every run of the agenda we do. This can be done by specifying an alternative
|
||||
database file with ``database`` parameter of the output processor:
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations:
|
||||
- sqlite
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
cpu0_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
A couple of things to observe here:
|
||||
|
||||
- There is no need to repeat the output processors listed in ``config.yaml``. The
|
||||
processors listed in ``augmentations`` entry in the agenda will be used
|
||||
*in addition to* those defined in the ``config.yaml``.
|
||||
- The database file is specified under "sqlite" entry in the config section.
|
||||
Note, however, that this entry alone is not enough to enable the output
|
||||
processor, it must be listed in ``augmentations``, otherwise the "sqilte"
|
||||
config entry will be ignored.
|
||||
- The database file must be specified as an absolute path, however it may use
|
||||
the user home specifier '~' and/or environment variables.
|
||||
|
||||
.. _sqlite: http://www.sqlite.org/
|
||||
|
||||
|
||||
Instruments
|
||||
^^^^^^^^^^^
|
||||
|
||||
WA can enable various "instruments" to be used during workload execution.
|
||||
Instruments can be quite diverse in their functionality, but the majority of
|
||||
instruments available in WA today are there to collect additional data (such as
|
||||
trace) from the device during workload execution. You can view the list of
|
||||
available instruments by using ``wa list instruments`` command. As with output
|
||||
processors, a few are enabled by default in the ``config.yaml`` and additional
|
||||
ones may be added in the same place, or specified in the agenda using
|
||||
``augmentations`` entry.
|
||||
|
||||
For example, we can collect power events from trace cmd by using the ``trace-cmd``
|
||||
instrument.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations:
|
||||
- trace-cmd
|
||||
- csv
|
||||
trace-cmd:
|
||||
trace_events: ['power*']
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
cpu0_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
Instruments are not "free" and it is advisable not to have too many enabled at
|
||||
once as that might skew results. For example, you don't want to have power
|
||||
measurement enabled at the same time as event tracing, as the latter may prevent
|
||||
cores from going into idle states and thus affecting the reading collected by
|
||||
the former.
|
||||
|
||||
Instruments, like output processors, may be enabled (and disabled -- see below)
|
||||
on per-spec basis. For example, suppose we want to collect /proc/meminfo from the
|
||||
device when we run ``memcpy`` workload, but not for the other two. We can do that using
|
||||
``sysfs_extractor`` instrument, and we will only enable it for ``memcpy``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations:
|
||||
- trace-cmd
|
||||
- csv
|
||||
trace-cmd:
|
||||
trace_events: ['power*']
|
||||
iterations: 5
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
cpu0_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
augmentations: [sysfs_extractor]
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
As with ``config`` sections, the ``augmentations`` entry in the spec needs only to
|
||||
list additional instruments and does not need to repeat instruments specified
|
||||
elsewhere.
|
||||
|
||||
.. note:: At present, it is only possible to enable/disable instruments on
|
||||
per-spec base. It is *not* possible to provide configuration on
|
||||
per-spec basis in the current version of WA (e.g. in our example, it
|
||||
is not possible to specify different ``sysfs_extractor`` paths for
|
||||
different workloads). This restriction may be lifted in future
|
||||
versions of WA.
|
||||
|
||||
Disabling augmentations
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
As seen above, plugins specified with ``augmentations`` clauses get added to
|
||||
those already specified previously. Just because an instrument specified in
|
||||
``config.yaml`` is not listed in the ``config`` section of the agenda, does
|
||||
not mean it will be disabled. If you do want to disable an instrument, you can
|
||||
always remove/comment it out from ``config.yaml``. However that will be
|
||||
introducing a permanent configuration change to your environment (one that can
|
||||
be easily reverted, but may be just as easily forgotten). If you want to
|
||||
temporarily disable a output processor or an instrument for a particular run,
|
||||
you can do that in your agenda by prepending a tilde (``~``) to its name.
|
||||
|
||||
For example, let's say we want to disable ``cpufreq`` instrument enabled in our
|
||||
``config.yaml`` (suppose we're going to send results via email and so want to
|
||||
reduce to total size of the output directory):
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
iterations: 5
|
||||
augmentations:
|
||||
- ~cpufreq
|
||||
- csv
|
||||
sysfs_extractor:
|
||||
paths: [/proc/meminfo]
|
||||
csv:
|
||||
use_all_classifiers: True
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
label: dhrystone_15over6
|
||||
runtime_params:
|
||||
cpu0_governor: performance
|
||||
workload_params:
|
||||
threads: 6
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
augmentations: [sysfs_extractor]
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
||||
Other Configuration
|
||||
===================
|
||||
-------------------
|
||||
|
||||
.. _configuration_in_agenda:
|
||||
|
||||
As mentioned previously, ``config`` section in an agenda can contain anything
|
||||
that can be defined in ``config.py`` (with Python syntax translated to the
|
||||
equivalent YAML). Certain configuration (e.g. ``run_name``) makes more sense
|
||||
to define in an agenda than a config file. Refer to the
|
||||
that can be defined in ``config.yaml``. Certain configuration (e.g. ``run_name``)
|
||||
makes more sense to define in an agenda than a config file. Refer to the
|
||||
:ref:`configuration-specification` section for details.
|
||||
|
||||
.. code-block:: yaml
|
||||
@ -573,25 +636,22 @@ to define in an agenda than a config file. Refer to the
|
||||
device: generic_android
|
||||
reboot_policy: never
|
||||
|
||||
instrumentation: [coreutil, ~cpufreq]
|
||||
coreutil:
|
||||
threshold: 80
|
||||
iterations: 5
|
||||
augmentations:
|
||||
- ~cpufreq
|
||||
- csv
|
||||
sysfs_extractor:
|
||||
paths: [/proc/meminfo]
|
||||
result_processors: [sqlite]
|
||||
sqlite:
|
||||
database: ~/my_wa_results.sqlite
|
||||
global:
|
||||
iterations: 5
|
||||
csv:
|
||||
use_all_classifiers: True
|
||||
sections:
|
||||
- id: perf
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance
|
||||
cpu0_governor: performance
|
||||
- id: inter
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive
|
||||
cpu0_governor: interactive
|
||||
workloads:
|
||||
- id: 01_dhry
|
||||
name: dhrystone
|
||||
@ -601,8 +661,7 @@ to define in an agenda than a config file. Refer to the
|
||||
mloops: 15
|
||||
- id: 02_memc
|
||||
name: memcpy
|
||||
instrumentation: [sysfs_extractor]
|
||||
augmentations: [sysfs_extractor]
|
||||
- id: 03_cycl
|
||||
name: cyclictest
|
||||
iterations: 10
|
||||
|
299
doc/source/how_tos/users/device_setup.rst
Normal file
299
doc/source/how_tos/users/device_setup.rst
Normal file
@ -0,0 +1,299 @@
|
||||
.. _setting-up-a-device:
|
||||
|
||||
Setting Up A Device
|
||||
===================
|
||||
|
||||
WA should work with most Android devices out-of-the box, as long as the device
|
||||
is discoverable by ``adb`` (i.e. gets listed when you run ``adb devices``). For
|
||||
USB-attached devices, that should be the case; for network devices, ``adb connect``
|
||||
would need to be invoked with the IP address of the device. If there is only one
|
||||
device connected to the host running WA, then no further configuration should be
|
||||
necessary (though you may want to :ref:`tweak some Android settings <configuring-android>`\ ).
|
||||
|
||||
If you have multiple devices connected, have a non-standard Android build (e.g.
|
||||
on a development board), or want to use of the more advanced WA functionality,
|
||||
further configuration will be required.
|
||||
|
||||
Android
|
||||
-------
|
||||
|
||||
General Device Setup
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can specify the device interface by setting ``device`` setting in
|
||||
``~/.workload_automation/config.yaml``. Available interfaces can be viewed by
|
||||
running ``wa list targets`` command. If you don't see your specific platform
|
||||
listed (which is likely unless you're using one of the Arm-supplied platforms), then
|
||||
you should use ``generic_android`` interface (this is set in the config by
|
||||
default).
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device: generic_android
|
||||
|
||||
The device interface may be configured through ``device_config`` setting, who's
|
||||
value is a ``dict`` mapping setting names to their values. Some of the most
|
||||
common parameters you might want to change are outlined below.
|
||||
|
||||
.. confval:: device
|
||||
|
||||
If you have multiple Android devices connected to the host machine, you will
|
||||
need to set this to indicate to WA which device you want it to use.
|
||||
|
||||
.. confval:: working_directory
|
||||
|
||||
WA needs a "working" directory on the device which it will use for collecting
|
||||
traces, caching assets it pushes to the device, etc. By default, it will
|
||||
create one under ``/sdcard`` which should be mapped and writable on standard
|
||||
Android builds. If this is not the case for your device, you will need to
|
||||
specify an alternative working directory (e.g. under ``/data/local``).
|
||||
|
||||
.. _core-names:
|
||||
|
||||
.. confval:: core_names
|
||||
|
||||
``core_names`` should be a list of core names matching the order in which
|
||||
they are exposed in sysfs. For example, Arm TC2 SoC is a 2x3 big.LITTLE
|
||||
system; its core_names would be ``['a7', 'a7', 'a7', 'a15', 'a15']``,
|
||||
indicating that cpu0-cpu2 in cpufreq sysfs structure are A7's and cpu3 and
|
||||
cpu4 are A15's.
|
||||
|
||||
.. note:: This should not usually need to be provided as it will be
|
||||
automatically extracted from the target.
|
||||
|
||||
|
||||
A typical ``device_config`` inside ``config.yaml`` may look something like
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device_config:
|
||||
device: 0123456789ABCDEF
|
||||
working_direcory: '/sdcard/wa-working'
|
||||
# ...
|
||||
|
||||
.. _configuring-android:
|
||||
|
||||
Configuring Android
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
There are a few additional tasks you may need to perform once you have a device
|
||||
booted into Android (especially if this is an initial boot of a fresh OS
|
||||
deployment):
|
||||
|
||||
- You have gone through FTU (first time usage) on the home screen and
|
||||
in the apps menu.
|
||||
- You have disabled the screen lock.
|
||||
- You have set sleep timeout to the highest possible value (30 mins on
|
||||
most devices).
|
||||
- You have set the locale language to "English" (this is important for
|
||||
some workloads in which UI automation looks for specific text in UI
|
||||
elements).
|
||||
|
||||
|
||||
Juno Setup
|
||||
----------
|
||||
|
||||
.. note:: At the time of writing, the Android software stack on Juno was still
|
||||
very immature. Some workloads may not run, and there maybe stability
|
||||
issues with the device.
|
||||
|
||||
|
||||
The full software stack can be obtained from Linaro:
|
||||
|
||||
https://releases.linaro.org/14.08/members/arm/android/images/armv8-android-juno-lsk
|
||||
|
||||
Please follow the instructions on the "Binary Image Installation" tab on that
|
||||
page. More up-to-date firmware and kernel may also be obtained by registered
|
||||
members from ARM Connected Community: http://www.arm.com/community/ (though this
|
||||
is not guaranteed to work with the Linaro file system).
|
||||
|
||||
UEFI
|
||||
^^^^
|
||||
|
||||
Juno uses UEFI_ to boot the kernel image. UEFI supports multiple boot
|
||||
configurations, and presents a menu on boot to select (in default configuration
|
||||
it will automatically boot the first entry in the menu if not interrupted before
|
||||
a timeout). WA will look for a specific entry in the UEFI menu
|
||||
(``'WA'`` by default, but that may be changed by setting ``uefi_entry`` in the
|
||||
``device_config``). When following the UEFI instructions on the above Linaro
|
||||
page, please make sure to name the entry appropriately (or to correctly set the
|
||||
``uefi_entry``).
|
||||
|
||||
.. _UEFI: http://en.wikipedia.org/wiki/UEFI
|
||||
|
||||
There are two supported ways for Juno to discover kernel images through UEFI. It
|
||||
can either load them from NOR flash on the board, or from the boot partition on
|
||||
the file system. The setup described on the Linaro page uses the boot partition
|
||||
method.
|
||||
|
||||
If WA does not find the UEFI entry it expects, it will create one. However, it
|
||||
will assume that the kernel image resides in NOR flash, which means it will not
|
||||
work with Linaro file system. So if you're replicating the Linaro setup exactly,
|
||||
you will need to create the entry manually, as outline on the above-linked page.
|
||||
|
||||
Rebooting
|
||||
^^^^^^^^^
|
||||
|
||||
At the time of writing, normal Android reboot did not work properly on Juno
|
||||
Android, causing the device to crash into an irrecoverable state. Therefore, WA
|
||||
will perform a hard reset to reboot the device. It will attempt to do this by
|
||||
toggling the DTR line on the serial connection to the device. In order for this
|
||||
to work, you need to make sure that SW1 configuration switch on the back panel of
|
||||
the board (the right-most DIP switch) is toggled *down*.
|
||||
|
||||
|
||||
Linux
|
||||
-----
|
||||
|
||||
General Device Setup
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can specify the device interface by setting ``device`` setting in
|
||||
``~/.workload_automation/config.yaml``. Available interfaces can be viewed by
|
||||
running ``wa list targets`` command. If you don't see your specific platform
|
||||
listed (which is likely unless you're using one of the Arm-supplied platforms), then
|
||||
you should use ``generic_linux`` interface (this is set in the config by
|
||||
default).
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device: generic_linux
|
||||
|
||||
The device interface may be configured through ``device_config`` setting, who's
|
||||
value is a ``dict`` mapping setting names to their values. Some of the most
|
||||
common parameters you might want to change are outlined below.
|
||||
|
||||
|
||||
.. confval:: host
|
||||
|
||||
This should be either the the DNS name or IP address of the device.
|
||||
|
||||
.. confval:: username
|
||||
|
||||
The login name of the user on the device that WA will use. This user should
|
||||
have a home directory (unless an alternative working directory is specified
|
||||
using ``working_directory`` config -- see below), and, for full
|
||||
functionality, the user should have sudo rights (WA will be able to use
|
||||
sudo-less acounts but some instruments or workload may not work).
|
||||
|
||||
.. confval:: password
|
||||
|
||||
Password for the account on the device. Either this of a ``keyfile`` (see
|
||||
below) must be specified.
|
||||
|
||||
.. confval:: keyfile
|
||||
|
||||
If key-based authentication is used, this may be used to specify the SSH identity
|
||||
file instead of the password.
|
||||
|
||||
.. confval:: property_files
|
||||
|
||||
This is a list of paths that will be pulled for each WA run into the __meta
|
||||
subdirectory in the results. The intention is to collect meta-data about the
|
||||
device that may aid in reporducing the results later. The paths specified do
|
||||
not have to exist on the device (they will be ignored if they do not). The
|
||||
default list is ``['/proc/version', '/etc/debian_version', '/etc/lsb-release', '/etc/arch-release']``
|
||||
|
||||
|
||||
In addition, ``working_directory``, ``core_names``, and can also
|
||||
be specified and have the same meaning as for Android devices (see above).
|
||||
|
||||
A typical ``device_config`` inside ``config.yaml`` may look something like
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device_config:
|
||||
host: 192.168.0.7
|
||||
username: guest
|
||||
password: guest
|
||||
# ...
|
||||
|
||||
Chrome OS
|
||||
---------
|
||||
|
||||
General Device Setup
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can specify the device interface by setting ``device`` setting in
|
||||
``~/.workload_automation/config.yaml``. Available interfaces can be viewed by
|
||||
running ``wa list targets`` command. If you don't see your specific platform
|
||||
listed (which is likely unless you're using one of the Arm-supplied platforms), then
|
||||
you should use ``generic_chromeos`` interface (this is set in the config by
|
||||
default).
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device: generic_chromeos
|
||||
|
||||
The device interface may be configured through ``device_config`` setting, who's
|
||||
value is a ``dict`` mapping setting names to their values. The chrome os target
|
||||
is essentially the same as a linux device and requires a similar setup, however
|
||||
it also optionally supports connecting to an android container running on the
|
||||
device which will be automatically deted if present. If the device supports
|
||||
android applications then the android configuration is also supported. In order
|
||||
to support this then WA will open 2 connections to the device, one via SSH to
|
||||
the main ChomeOS OS and another via ADB to the android container where a limited
|
||||
subset of functionality can be performed.
|
||||
|
||||
In order to distinguish between the two connections some of the android specific
|
||||
configuration has been renamed to reflect the destination.
|
||||
|
||||
.. confval:: android_working_directory
|
||||
|
||||
WA needs a "working" directory on the device which it will use for collecting
|
||||
traces, caching assets it pushes to the device, etc. By default, it will
|
||||
create one under ``/sdcard`` which should be mapped and writable on standard
|
||||
Android builds. If this is not the case for your device, you will need to
|
||||
specify an alternative working directory (e.g. under ``/data/local``).
|
||||
|
||||
|
||||
A typical ``device_config`` inside ``config.yaml`` for a ChromeOS device may
|
||||
look something like
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device_config:
|
||||
host: 192.168.0.7
|
||||
username: root
|
||||
android_working_direcory: '/sdcard/wa-working'
|
||||
# ...
|
||||
|
||||
.. note:: This assumes that your Chromebook is in developer mode and is
|
||||
configured to run an SSH server with the appropriate ssh keys added to the
|
||||
authorized_keys file on the device.
|
||||
|
||||
|
||||
Related Settings
|
||||
----------------
|
||||
|
||||
Reboot Policy
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
This indicates when during WA execution the device will be rebooted. By default
|
||||
this is set to ``as_needed``, indicating that WA will not reboot the device. Please
|
||||
see ``reboot_policy`` documentation in :ref:`configuration-specification` for
|
||||
more details.
|
||||
|
||||
Execution Order
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
``execution_order`` defines the order in which WA will execute workloads.
|
||||
``by_iteration`` (set by default) will execute the first iteration of each spec
|
||||
first, followed by the second iteration of each spec (that defines more than one
|
||||
iteration) and so forth. The alternative will loop through all iterations for
|
||||
the first first spec first, then move on to second spec, etc. Again, please see
|
||||
:ref:`configuration-specification` for more details.
|
||||
|
||||
|
||||
Adding a new device interface
|
||||
-----------------------------
|
||||
|
||||
If you are working with a particularly unusual device (e.g. a early stage
|
||||
development board) or need to be able to handle some quirk of your Android build,
|
||||
configuration available in ``generic_android`` interface may not be enough for
|
||||
you. In that case, you may need to write a custom interface for your device. A
|
||||
device interface is an ``Extension`` (a plug-in) type in WA and is implemented
|
||||
similar to other extensions (such as workloads or instruments). Pleaser refer to
|
||||
:ref:`adding_a_device` section for information on how this may be done.
|
155
doc/source/how_tos/users/revent.rst
Normal file
155
doc/source/how_tos/users/revent.rst
Normal file
@ -0,0 +1,155 @@
|
||||
.. _revent_files_creation:
|
||||
|
||||
Revent
|
||||
======
|
||||
|
||||
Overview and Usage
|
||||
------------------
|
||||
|
||||
The revent utility can be used to record and later play back a sequence of user
|
||||
input events, such as key presses and touch screen taps. This is an alternative
|
||||
to Android UI Automator for providing automation for workloads.
|
||||
|
||||
Using revent with workloads
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Some workloads (pretty much all games) rely on recorded revents for their
|
||||
execution. ReventWorkloads will require between 1 and 4 revent files be be ran.
|
||||
There is one mandatory recording ``run`` for performing the actual execution of
|
||||
the workload and the remaining are optional. ``setup`` can be used to perform
|
||||
the initial setup (navigating menus, selecting game modes, etc).
|
||||
``extract_results`` can be used to perform any actions after the main stage of
|
||||
the workload for example to navigate a results or summary screen of the app. And
|
||||
finally ``teardown`` can be used to perform any final actions for example
|
||||
exiting the app.
|
||||
|
||||
Because revents are very device-specific\ [*]_, these files would need to
|
||||
be recorded for each device.
|
||||
|
||||
The files must be called ``<device name>.(setup|run|extract_results|teardown).revent``
|
||||
, where ``<device name>`` is the name of your device (as defined by the ``name``
|
||||
attribute of your device's class). WA will look for these files in two
|
||||
places: ``<install dir>/wa/workloads/<workload name>/revent_files``
|
||||
and ``~/.workload_automation/dependencies/<workload name>``. The first
|
||||
location is primarily intended for revent files that come with WA (and if
|
||||
you did a system-wide install, you'll need sudo to add files there), so it's
|
||||
probably easier to use the second location for the files you record. Also,
|
||||
if revent files for a workload exist in both locations, the files under
|
||||
``~/.workload_automation/dependencies`` will be used in favour of those
|
||||
installed with WA.
|
||||
|
||||
.. [*] It's not just about screen resolution -- the event codes may be different
|
||||
even if devices use the same screen.
|
||||
|
||||
.. _revent-recording:
|
||||
|
||||
Recording
|
||||
^^^^^^^^^
|
||||
|
||||
WA features a ``record`` command that will automatically deploy and start revent
|
||||
on the target device.
|
||||
|
||||
If you want to simply record a single recording on the device then the following
|
||||
command can be used which will save the recording in the current directory::
|
||||
|
||||
wa record
|
||||
|
||||
There is one mandatory stage called 'run' and 3 optional stages: 'setup',
|
||||
'extract_results' and 'teardown' which are used for playback of a workload.
|
||||
The different stages are distinguished by the suffix in the recording file path.
|
||||
In order to facilitate in creating these recordings you can specify ``--setup``,
|
||||
``--extract-results``, ``--teardown`` or ``--all`` to indicate which stages you
|
||||
would like to create recordings for and the appropriate file name will be generated.
|
||||
|
||||
You can also directly specify a workload to create recordings for and WA will
|
||||
walk you through the relevant steps. For example if we waned to create
|
||||
recordings for the Anybirds Rio workload we can specify the ``workload`` flag
|
||||
with ``-w``. And in this case WA can be used to automatically deploy and launch
|
||||
the workload and record ``setup`` (``-s``) , ``run`` (``-r``) and ``teardown``
|
||||
(``-t``) stages for the workload. In order to do this we would use the following
|
||||
command with an example output shown below::
|
||||
|
||||
wa record -srt -w angrybirds_rio
|
||||
|
||||
::
|
||||
|
||||
INFO Setting up target
|
||||
INFO Deploying angrybirds_rio
|
||||
INFO Press Enter when you are ready to record SETUP...
|
||||
[Pressed Enter]
|
||||
INFO Press Enter when you have finished recording SETUP...
|
||||
[Pressed Enter]
|
||||
INFO Pulling '<device_model>setup.revent' from device
|
||||
INFO Press Enter when you are ready to record RUN...
|
||||
[Pressed Enter]
|
||||
INFO Press Enter when you have finished recording RUN...
|
||||
[Pressed Enter]
|
||||
INFO Pulling '<device_model>.run.revent' from device
|
||||
INFO Press Enter when you are ready to record TEARDOWN...
|
||||
[Pressed Enter]
|
||||
INFO Press Enter when you have finished recording TEARDOWN...
|
||||
[Pressed Enter]
|
||||
INFO Pulling '<device_model>.teardown.revent' from device
|
||||
INFO Tearing down angrybirds_rio
|
||||
INFO Recording(s) are available at: '$WA_USER_DIRECTORY/dependencies/angrybirds_rio/revent_files'
|
||||
|
||||
Once you have made your desired recordings, you can either manually playback
|
||||
individual recordings using the :ref:`reply <replay-command>` command or, with
|
||||
the recordings in the appropriate dependencies location, simply run the workload
|
||||
using the :ref:`run <run-command>` command and then all the available recordings will be
|
||||
played back automatically.
|
||||
|
||||
For more information on available arguments please see the :ref:`Record <record_command>`
|
||||
command.
|
||||
|
||||
.. note:: By default revent recordings are not portable across devices and
|
||||
therefore will require recording for each new device you wish to use the
|
||||
workload on. Alternatively a "gamepad" recording mode is also supported.
|
||||
This mode requires a gamepad to be connected to the device when recording
|
||||
but the recordings produced in this mode should be portable across devices.
|
||||
|
||||
.. _revent_replaying:
|
||||
|
||||
Replaying
|
||||
^^^^^^^^^
|
||||
|
||||
If you want to replay a single recorded file, you can use ``wa replay``
|
||||
providing it with the file you want to replay. An example of the command output
|
||||
is shown below::
|
||||
|
||||
wa replay my_recording.revent
|
||||
INFO Setting up target
|
||||
INFO Pushing file to target
|
||||
INFO Starting replay
|
||||
INFO Finished replay
|
||||
|
||||
If you are using a device that supports android you can optionally specify a
|
||||
package name to launch before replaying the recording.
|
||||
|
||||
If you have recorded the required files for your workload and have placed the in
|
||||
the appropriate location (or specified the workload during recording) then you
|
||||
can simply run the relevant workload and your recordings will be replayed at the
|
||||
appropriate times automatically.
|
||||
|
||||
For more information run please read :ref:`replay-command`
|
||||
|
||||
Revent vs UiAutomator
|
||||
----------------------
|
||||
|
||||
In general, Android UI Automator is the preferred way of automating user input
|
||||
for Android workloads because, unlike revent, UI Automator does not depend on a
|
||||
particular screen resolution, and so is more portable across different devices.
|
||||
It also gives better control and can potentially be faster for doing UI
|
||||
manipulations, as input events are scripted based on the available UI elements,
|
||||
rather than generated by human input.
|
||||
|
||||
On the other hand, revent can be used to manipulate pretty much any workload,
|
||||
where as UI Automator only works for Android UI elements (such as text boxes or
|
||||
radio buttons), which makes the latter useless for things like games. Recording
|
||||
revent sequence is also faster than writing automation code (on the other hand,
|
||||
one would need maintain a different revent log for each screen resolution).
|
||||
|
||||
.. note:: For ChromeOS targets, UI Automator can only be used with android
|
||||
applications and not the ChomeOS host applications themselves.
|
||||
|
||||
|
@ -1,13 +1,10 @@
|
||||
.. Workload Automation 2 documentation master file, created by
|
||||
sphinx-quickstart on Mon Jul 15 09:00:46 2013.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
.. Workload Automation 3 documentation master file,
|
||||
================================================
|
||||
Welcome to Documentation for Workload Automation
|
||||
================================================
|
||||
|
||||
Workload Automation (WA) is a framework for running workloads on real hardware devices. WA
|
||||
supports a number of output formats as well as additional instrumentation (such as Streamline
|
||||
supports a number of output formats as well as additional instruments (such as Streamline
|
||||
traces). A number of workloads are included with the framework.
|
||||
|
||||
|
||||
@ -15,124 +12,82 @@ traces). A number of workloads are included with the framework.
|
||||
|
||||
|
||||
What's New
|
||||
~~~~~~~~~~
|
||||
==========
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
changes
|
||||
migration_guide
|
||||
|
||||
User Information
|
||||
================
|
||||
|
||||
Usage
|
||||
~~~~~
|
||||
|
||||
This section lists general usage documentation. If you're new to WA2, it is
|
||||
recommended you start with the :doc:`quickstart` page. This section also contains
|
||||
This section lists general usage documentation. If you're new to WA3, it is
|
||||
recommended you start with the :doc:`user_guide` page. This section also contains
|
||||
installation and configuration guides.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
quickstart
|
||||
installation
|
||||
device_setup
|
||||
invocation
|
||||
agenda
|
||||
configuration
|
||||
user_guide
|
||||
user_reference
|
||||
|
||||
|
||||
Plugins
|
||||
~~~~~~~~~~
|
||||
How To Guides
|
||||
===============
|
||||
|
||||
This section lists plugins that currently come with WA2. Each package below
|
||||
represents a particular type of plugin (e.g. a workload); each sub-package of
|
||||
that package is a particular instance of that plugin (e.g. the Andebench
|
||||
workload). Clicking on a link will show what the individual plugin does,
|
||||
what configuration parameters it takes, etc.
|
||||
|
||||
For how to implement you own plugins, please refer to the guides in the
|
||||
:ref:`in-depth` section.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<style>
|
||||
td {
|
||||
vertical-align: text-top;
|
||||
}
|
||||
</style>
|
||||
<table <tr><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:maxdepth: 3
|
||||
|
||||
plugins/workloads
|
||||
how_to
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
FAQ
|
||||
====
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/instruments
|
||||
faq
|
||||
|
||||
.. User Reference
|
||||
.. ===============
|
||||
|
||||
.. .. toctree::
|
||||
.. :maxdepth: 2
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/result_processors
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/devices
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td></tr></table>
|
||||
|
||||
.. _in-depth:
|
||||
|
||||
In-depth
|
||||
~~~~~~~~
|
||||
Developer Information
|
||||
=====================
|
||||
|
||||
This section contains more advanced topics, such how to write your own plugins
|
||||
This section contains more advanced topics, such how to write your own Plugins
|
||||
and detailed descriptions of how WA functions under the hood.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
conventions
|
||||
writing_plugins
|
||||
execution_model
|
||||
resources
|
||||
additional_topics
|
||||
daq_device_setup
|
||||
revent
|
||||
contributing
|
||||
developer_reference
|
||||
|
||||
API Reference
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 5
|
||||
:maxdepth: 2
|
||||
|
||||
api/modules
|
||||
plugins
|
||||
api_reference
|
||||
|
||||
.. :ref:`FAQ <faq>`
|
||||
.. ================
|
||||
|
||||
Indices and tables
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
.. Indices and tables
|
||||
.. ==================
|
||||
|
||||
.. .. * :ref:`genindex`
|
||||
.. .. * :ref:`modindex`
|
||||
.. * :ref:`search`
|
||||
|
@ -2,9 +2,17 @@
|
||||
Installation
|
||||
============
|
||||
|
||||
.. module:: wlauto
|
||||
.. contents:: Contents
|
||||
:depth: 2
|
||||
:local:
|
||||
|
||||
This page describes how to install Workload Automation 2.
|
||||
------------------------------------------------------------
|
||||
|
||||
.. module:: wa
|
||||
|
||||
This page describes the 3 methods of installing Workload Automation 3. The first
|
||||
option is to use :ref:`pip` which
|
||||
will install the latest release of WA, the latest development version from :ref:`github <github>` or via a :ref:`dockerfile`.
|
||||
|
||||
|
||||
Prerequisites
|
||||
@ -13,7 +21,7 @@ Prerequisites
|
||||
Operating System
|
||||
----------------
|
||||
|
||||
WA runs on a native Linux install. It was tested with Ubuntu 12.04,
|
||||
WA runs on a native Linux install. It was tested with Ubuntu 14.04,
|
||||
but any recent Linux distribution should work. It should run on either
|
||||
32-bit or 64-bit OS, provided the correct version of Android (see below)
|
||||
was installed. Officially, **other environments are not supported**. WA
|
||||
@ -43,7 +51,7 @@ to your ``PATH``. To test that you've installed it properly, run ``adb
|
||||
version``. The output should be similar to this::
|
||||
|
||||
adb version
|
||||
Android Debug Bridge version 1.0.31
|
||||
Android Debug Bridge version 1.0.39
|
||||
|
||||
.. _here: https://developer.android.com/sdk/index.html
|
||||
|
||||
@ -63,8 +71,9 @@ the install location of the SDK (i.e. ``<path_to_android_sdk>/sdk``).
|
||||
Python
|
||||
------
|
||||
|
||||
Workload Automation 2 requires Python 2.7 (Python 3 is not supported at the moment).
|
||||
Workload Automation 3 requires Python 2.7 (Python 3 is not supported at the moment).
|
||||
|
||||
.. _pip:
|
||||
|
||||
pip
|
||||
---
|
||||
@ -87,25 +96,36 @@ similar distributions, this may be done with APT::
|
||||
If you do run into this issue after already installing some packages,
|
||||
you can resolve it by running ::
|
||||
|
||||
sudo chmod -R a+r /usr/local/lib/python2.7/dist-packagessudo
|
||||
sudo chmod -R a+r /usr/local/lib/python2.7/dist-packagessudo
|
||||
find /usr/local/lib/python2.7/dist-packages -type d -exec chmod a+x {} \;
|
||||
|
||||
(The paths above will work for Ubuntu; they may need to be adjusted
|
||||
for other distros).
|
||||
|
||||
|
||||
Python Packages
|
||||
---------------
|
||||
|
||||
.. note:: pip should automatically download and install missing dependencies,
|
||||
so if you're using pip, you can skip this section.
|
||||
so if you're using pip, you can skip this section. However some
|
||||
packages the will be installed have C plugins and will require Python
|
||||
development headers to install. You can get those by installing
|
||||
``python-dev`` package in apt on Ubuntu (or the equivalent for your
|
||||
distribution).
|
||||
|
||||
Workload Automation 2 depends on the following additional libraries:
|
||||
Workload Automation 3 depends on the following additional libraries:
|
||||
|
||||
* pexpect
|
||||
* docutils
|
||||
* pySerial
|
||||
* pyYAML
|
||||
* python-dateutil
|
||||
* louie
|
||||
* pandas
|
||||
* devlib
|
||||
* wrapt
|
||||
* requests
|
||||
* colorama
|
||||
|
||||
You can install these with pip::
|
||||
|
||||
@ -114,6 +134,12 @@ You can install these with pip::
|
||||
sudo -H pip install pyyaml
|
||||
sudo -H pip install docutils
|
||||
sudo -H pip install python-dateutil
|
||||
sudo -H pip install devlib
|
||||
sudo -H pip install pandas
|
||||
sudo -H pip install louie
|
||||
sudo -H pip install wrapt
|
||||
sudo -H pip install requests
|
||||
sudo -H pip install colorama
|
||||
|
||||
Some of these may also be available in your distro's repositories, e.g. ::
|
||||
|
||||
@ -128,7 +154,7 @@ distro package names may differ from pip packages.
|
||||
Optional Python Packages
|
||||
------------------------
|
||||
|
||||
.. note:: unlike the mandatory dependencies in the previous section,
|
||||
.. note:: Unlike the mandatory dependencies in the previous section,
|
||||
pip will *not* install these automatically, so you will have
|
||||
to explicitly install them if/when you need them.
|
||||
|
||||
@ -142,28 +168,26 @@ install them upfront (e.g. if you're planning to use WA to an environment that
|
||||
may not always have Internet access).
|
||||
|
||||
* nose
|
||||
* pandas
|
||||
* PyDAQmx
|
||||
* pymongo
|
||||
* jinja2
|
||||
|
||||
|
||||
.. note:: Some packages have C plugins and will require Python development
|
||||
headers to install. You can get those by installing ``python-dev``
|
||||
package in apt on Ubuntu (or the equivalent for your distribution).
|
||||
|
||||
.. _github:
|
||||
|
||||
Installing
|
||||
==========
|
||||
|
||||
Installing the latest released version from PyPI (Python Package Index)::
|
||||
|
||||
sudo -H pip install wlauto
|
||||
sudo -H pip install wa
|
||||
|
||||
This will install WA along with its mandatory dependencies. If you would like to
|
||||
install all optional dependencies at the same time, do the following instead::
|
||||
|
||||
sudo -H pip install wlauto[all]
|
||||
sudo -H pip install wa[all]
|
||||
|
||||
|
||||
Alternatively, you can also install the latest development version from GitHub
|
||||
(you will need git installed for this to work)::
|
||||
@ -177,8 +201,24 @@ If the above succeeds, try ::
|
||||
|
||||
wa --version
|
||||
|
||||
Hopefully, this should output something along the lines of "Workload Automation
|
||||
version $version".
|
||||
Hopefully, this should output something along the lines of ::
|
||||
|
||||
"Workload Automation version $version".
|
||||
|
||||
.. _dockerfile:
|
||||
|
||||
Dockerfile
|
||||
============
|
||||
|
||||
As an alternative we also provide a Dockerfile that will create an image called
|
||||
wadocker, and is preconfigured to run WA and devlib. Please note that the build
|
||||
process automatically accepts the licenses for the Android SDK, so please be
|
||||
sure that you are willing to accept these prior to building and running the
|
||||
image in a container.
|
||||
|
||||
The Dockerfile can be found in the "extras" folder or online at
|
||||
`<https://github.com/ARM-software /workload- automation/blob/next/extras/Dockerfile>`_
|
||||
which contains addional information about how to build and to use the file.
|
||||
|
||||
|
||||
(Optional) Post Installation
|
||||
@ -191,59 +231,22 @@ so will need to be supplied by the user. They should be placed into
|
||||
them (you may need to create the directory if it doesn't already exist). You
|
||||
only need to provide the dependencies for workloads you want to use.
|
||||
|
||||
.. _apk_files:
|
||||
|
||||
APK Files
|
||||
---------
|
||||
|
||||
APKs are applicaton packages used by Android. These are necessary to install an
|
||||
application onto devices that do not have Google Play (e.g. devboards running
|
||||
AOSP). The following is a list of workloads that will need one, including the
|
||||
version(s) for which UI automation has been tested. Automation may also work
|
||||
with other versions (especially if it's only a minor or revision difference --
|
||||
major version differens are more likely to contain incompatible UI changes) but
|
||||
this has not been tested.
|
||||
APKs are application packages used by Android. These are necessary to install on
|
||||
a device when running an :ref:`ApkWorkload <apk-workload>` or derivative.
|
||||
PLease see the workload description using the :ref:`show command <show-command>`
|
||||
to see which version of the apk the UI automation has been tested with and place the apk in the corresponding
|
||||
Automation may also work with other versions (especially if it's only a minor or
|
||||
revision difference -- major version differences are more likely to contain
|
||||
incompatible UI changes) but this has not been tested. As a general rule we do
|
||||
not guarantee support for the latest version of an app and they are updated on
|
||||
as needed basis. We do however attempt to support backwards compatibility with
|
||||
previous major releases however beyond this support will likely be dropped.
|
||||
|
||||
================ ============================================ ========================= ============ ============
|
||||
workload package name version code version name
|
||||
================ ============================================ ========================= ============ ============
|
||||
andebench com.eembc.coremark AndEBench v1383a 1383
|
||||
angrybirds com.rovio.angrybirds Angry Birds 2.1.1 2110
|
||||
angrybirds_rio com.rovio.angrybirdsrio Angry Birds 1.3.2 1320
|
||||
anomaly2 com.elevenbitstudios.anomaly2Benchmark A2 Benchmark 1.1 50
|
||||
antutu com.antutu.ABenchMark AnTuTu Benchmark 5.3 5030000
|
||||
antutu com.antutu.ABenchMark AnTuTu Benchmark 3.3.2 3322
|
||||
antutu com.antutu.ABenchMark AnTuTu Benchmark 4.0.3 4000300
|
||||
benchmarkpi gr.androiddev.BenchmarkPi BenchmarkPi 1.11 5
|
||||
caffeinemark com.flexycore.caffeinemark CaffeineMark 1.2.4 9
|
||||
castlebuilder com.ettinentertainment.castlebuilder Castle Builder 1.0 1
|
||||
castlemaster com.alphacloud.castlemaster Castle Master 1.09 109
|
||||
cfbench eu.chainfire.cfbench CF-Bench 1.2 7
|
||||
citadel com.epicgames.EpicCitadel Epic Citadel 1.07 901107
|
||||
dungeondefenders com.trendy.ddapp Dungeon Defenders 5.34 34
|
||||
facebook com.facebook.katana Facebook 3.4 258880
|
||||
geekbench ca.primatelabs.geekbench2 Geekbench 2 2.2.7 202007
|
||||
geekbench com.primatelabs.geekbench3 Geekbench 3 3.0.0 135
|
||||
glb_corporate net.kishonti.gfxbench GFXBench 3.0.0 1
|
||||
glbenchmark com.glbenchmark.glbenchmark25 GLBenchmark 2.5 2.5 4
|
||||
glbenchmark com.glbenchmark.glbenchmark27 GLBenchmark 2.7 2.7 1
|
||||
gunbros2 com.glu.gunbros2 GunBros2 1.2.2 122
|
||||
ironman com.gameloft.android.ANMP.GloftIMHM Iron Man 3 1.3.1 1310
|
||||
krazykart com.polarbit.sg2.krazyracers Krazy Kart Racing 1.2.7 127
|
||||
linpack com.greenecomputing.linpackpro Linpack Pro for Android 1.2.9 31
|
||||
nenamark se.nena.nenamark2 NenaMark2 2.4 5
|
||||
peacekeeper com.android.chrome Chrome 18.0.1025469 1025469
|
||||
peacekeeper org.mozilla.firefox Firefox 23.0 2013073011
|
||||
quadrant com.aurorasoftworks.quadrant.ui.professional Quadrant Professional 2.0 2000000
|
||||
realracing3 com.ea.games.r3_row Real Racing 3 1.3.5 1305
|
||||
smartbench com.smartbench.twelve Smartbench 2012 1.0.0 5
|
||||
sqlite com.redlicense.benchmark.sqlite RL Benchmark 1.3 5
|
||||
templerun com.imangi.templerun Temple Run 1.0.8 11
|
||||
thechase com.unity3d.TheChase The Chase 1.0 1
|
||||
truckerparking3d com.tapinator.truck.parking.bus3d Truck Parking 3D 2.5 7
|
||||
vellamo com.quicinc.vellamo Vellamo 3.0 3001
|
||||
vellamo com.quicinc.vellamo Vellamo 2.0.3 2003
|
||||
videostreaming tw.com.freedi.youtube.player FREEdi YT Player 2.1.13 79
|
||||
================ ============================================ ========================= ============ ============
|
||||
|
||||
Gaming Workloads
|
||||
----------------
|
||||
@ -258,32 +261,13 @@ it :ref:`here <revent_files_creation>`.
|
||||
|
||||
This is the list of workloads that rely on such recordings:
|
||||
|
||||
+------------------+
|
||||
| angrybirds |
|
||||
+------------------+
|
||||
| angrybirds_rio |
|
||||
+------------------+
|
||||
| anomaly2 |
|
||||
| templerun2 |
|
||||
+------------------+
|
||||
| castlebuilder |
|
||||
+------------------+
|
||||
| castlemastera |
|
||||
+------------------+
|
||||
| citadel |
|
||||
+------------------+
|
||||
| dungeondefenders |
|
||||
+------------------+
|
||||
| gunbros2 |
|
||||
+------------------+
|
||||
| ironman |
|
||||
+------------------+
|
||||
| krazykart |
|
||||
+------------------+
|
||||
| realracing3 |
|
||||
+------------------+
|
||||
| templerun |
|
||||
+------------------+
|
||||
| truckerparking3d |
|
||||
|
||||
|
||||
+------------------+
|
||||
|
||||
.. _assets_repository:
|
||||
@ -307,8 +291,8 @@ that location.
|
||||
|
||||
If you have installed Workload Automation via ``pip`` and wish to remove it, run this command to
|
||||
uninstall it::
|
||||
|
||||
sudo -H pip uninstall wlauto
|
||||
|
||||
sudo -H pip uninstall wa
|
||||
|
||||
.. Note:: This will *not* remove any user configuration (e.g. the ~/.workload_automation directory)
|
||||
|
||||
@ -317,5 +301,5 @@ uninstall it::
|
||||
====================
|
||||
|
||||
To upgrade Workload Automation to the latest version via ``pip``, run::
|
||||
|
||||
sudo -H pip install --upgrade --no-deps wlauto
|
||||
|
||||
sudo -H pip install --upgrade --no-deps wa
|
||||
|
@ -1,73 +0,0 @@
|
||||
Instrumentation Signal-Method Mapping
|
||||
=====================================
|
||||
|
||||
.. _instrumentation_method_map:
|
||||
|
||||
Instrument methods get automatically hooked up to signals based on their names. Mostly, the method
|
||||
name correponds to the name of the signal, however there are a few convienience aliases defined
|
||||
(listed first) to make easier to relate instrumenation code to the workload execution model.
|
||||
|
||||
======================================== =========================================
|
||||
method name signal
|
||||
======================================== =========================================
|
||||
initialize run-init-signal
|
||||
setup successful-workload-setup-signal
|
||||
start before-workload-execution-signal
|
||||
stop after-workload-execution-signal
|
||||
process_workload_result successful-iteration-result-update-signal
|
||||
update_result after-iteration-result-update-signal
|
||||
teardown after-workload-teardown-signal
|
||||
finalize run-fin-signal
|
||||
on_run_start start-signal
|
||||
on_run_end end-signal
|
||||
on_workload_spec_start workload-spec-start-signal
|
||||
on_workload_spec_end workload-spec-end-signal
|
||||
on_iteration_start iteration-start-signal
|
||||
on_iteration_end iteration-end-signal
|
||||
before_initial_boot before-initial-boot-signal
|
||||
on_successful_initial_boot successful-initial-boot-signal
|
||||
after_initial_boot after-initial-boot-signal
|
||||
before_first_iteration_boot before-first-iteration-boot-signal
|
||||
on_successful_first_iteration_boot successful-first-iteration-boot-signal
|
||||
after_first_iteration_boot after-first-iteration-boot-signal
|
||||
before_boot before-boot-signal
|
||||
on_successful_boot successful-boot-signal
|
||||
after_boot after-boot-signal
|
||||
on_spec_init spec-init-signal
|
||||
on_run_init run-init-signal
|
||||
on_iteration_init iteration-init-signal
|
||||
before_workload_setup before-workload-setup-signal
|
||||
on_successful_workload_setup successful-workload-setup-signal
|
||||
after_workload_setup after-workload-setup-signal
|
||||
before_workload_execution before-workload-execution-signal
|
||||
on_successful_workload_execution successful-workload-execution-signal
|
||||
after_workload_execution after-workload-execution-signal
|
||||
before_workload_result_update before-iteration-result-update-signal
|
||||
on_successful_workload_result_update successful-iteration-result-update-signal
|
||||
after_workload_result_update after-iteration-result-update-signal
|
||||
before_workload_teardown before-workload-teardown-signal
|
||||
on_successful_workload_teardown successful-workload-teardown-signal
|
||||
after_workload_teardown after-workload-teardown-signal
|
||||
before_overall_results_processing before-overall-results-process-signal
|
||||
on_successful_overall_results_processing successful-overall-results-process-signal
|
||||
after_overall_results_processing after-overall-results-process-signal
|
||||
on_error error_logged
|
||||
on_warning warning_logged
|
||||
======================================== =========================================
|
||||
|
||||
|
||||
The names above may be prefixed with one of pre-defined prefixes to set the priority of the
|
||||
Instrument method realive to other callbacks registered for the signal (within the same priority
|
||||
level, callbacks are invoked in the order they were registered). The table below shows the mapping
|
||||
of the prifix to the corresponding priority:
|
||||
|
||||
=========== ========
|
||||
prefix priority
|
||||
=========== ========
|
||||
very_fast\_ 20
|
||||
fast\_ 10
|
||||
normal\_ 0
|
||||
slow\_ -10
|
||||
very_slow\_ -20
|
||||
=========== ========
|
||||
|
@ -1,17 +0,0 @@
|
||||
Instrumentation Signal-Method Mapping
|
||||
=====================================
|
||||
|
||||
.. _instrumentation_method_map:
|
||||
|
||||
Instrument methods get automatically hooked up to signals based on their names. Mostly, the method
|
||||
name correponds to the name of the signal, however there are a few convienience aliases defined
|
||||
(listed first) to make easier to relate instrumenation code to the workload execution model.
|
||||
|
||||
$signal_names
|
||||
|
||||
The names above may be prefixed with one of pre-defined prefixes to set the priority of the
|
||||
Instrument method realive to other callbacks registered for the signal (within the same priority
|
||||
level, callbacks are invoked in the order they were registered). The table below shows the mapping
|
||||
of the prifix to the corresponding priority:
|
||||
|
||||
$priority_prefixes
|
@ -1,135 +0,0 @@
|
||||
.. _invocation:
|
||||
|
||||
========
|
||||
Commands
|
||||
========
|
||||
|
||||
Installing the wlauto package will add ``wa`` command to your system,
|
||||
which you can run from anywhere. This has a number of sub-commands, which can
|
||||
be viewed by executing ::
|
||||
|
||||
wa -h
|
||||
|
||||
Individual sub-commands are discussed in detail below.
|
||||
|
||||
run
|
||||
---
|
||||
|
||||
The most common sub-command you will use is ``run``. This will run specfied
|
||||
workload(s) and process resulting output. This takes a single mandatory
|
||||
argument that specifies what you want WA to run. This could be either a
|
||||
workload name, or a path to an "agenda" file that allows to specify multiple
|
||||
workloads as well as a lot additional configuration (see :ref:`agenda`
|
||||
section for details). Executing ::
|
||||
|
||||
wa run -h
|
||||
|
||||
Will display help for this subcommand that will look somehtign like this::
|
||||
|
||||
usage: run [-d DIR] [-f] AGENDA
|
||||
|
||||
Execute automated workloads on a remote device and process the resulting
|
||||
output.
|
||||
|
||||
positional arguments:
|
||||
AGENDA Agenda for this workload automation run. This defines
|
||||
which workloads will be executed, how many times, with
|
||||
which tunables, etc. See /usr/local/lib/python2.7
|
||||
/dist-packages/wlauto/agenda-example.csv for an
|
||||
example of how this file should be structured.
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.py
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--version Output the version of Workload Automation and exit.
|
||||
--debug Enable debug mode. Note: this implies --verbose.
|
||||
-d DIR, --output-directory DIR
|
||||
Specify a directory where the output will be
|
||||
generated. If the directoryalready exists, the script
|
||||
will abort unless -f option (see below) is used,in
|
||||
which case the contents of the directory will be
|
||||
overwritten. If this optionis not specified, then
|
||||
wa_output will be used instead.
|
||||
-f, --force Overwrite output directory if it exists. By default,
|
||||
the script will abort in thissituation to prevent
|
||||
accidental data loss.
|
||||
-i ID, --id ID Specify a workload spec ID from an agenda to run. If
|
||||
this is specified, only that particular spec will be
|
||||
run, and other workloads in the agenda will be
|
||||
ignored. This option may be used to specify multiple
|
||||
IDs.
|
||||
|
||||
|
||||
Output Directory
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
The exact contents on the output directory will depend on configuration options
|
||||
used, instrumentation and output processors enabled, etc. Typically, the output
|
||||
directory will contain a results file at the top level that lists all
|
||||
measurements that were collected (currently, csv and json formats are
|
||||
supported), along with a subdirectory for each iteration executed with output
|
||||
for that specific iteration.
|
||||
|
||||
At the top level, there will also be a run.log file containing the complete log
|
||||
output for the execution. The contents of this file is equivalent to what you
|
||||
would get in the console when using --verbose option.
|
||||
|
||||
Finally, there will be a __meta subdirectory. This will contain a copy of the
|
||||
agenda file used to run the workloads along with any other device-specific
|
||||
configuration files used during execution.
|
||||
|
||||
|
||||
list
|
||||
----
|
||||
|
||||
This lists all plugins of a particular type. For example ::
|
||||
|
||||
wa list workloads
|
||||
|
||||
will list all workloads currently included in WA. The list will consist of
|
||||
plugin names and short descriptions of the functionality they offer.
|
||||
|
||||
|
||||
show
|
||||
----
|
||||
|
||||
This will show detailed information about an plugin, including more in-depth
|
||||
description and any parameters/configuration that are available. For example
|
||||
executing ::
|
||||
|
||||
wa show andebench
|
||||
|
||||
will produce something like ::
|
||||
|
||||
|
||||
andebench
|
||||
|
||||
AndEBench is an industry standard Android benchmark provided by The Embedded Microprocessor Benchmark Consortium
|
||||
(EEMBC).
|
||||
|
||||
parameters:
|
||||
|
||||
number_of_threads
|
||||
Number of threads that will be spawned by AndEBench.
|
||||
type: int
|
||||
|
||||
single_threaded
|
||||
If ``true``, AndEBench will run with a single thread. Note: this must not be specified if ``number_of_threads``
|
||||
has been specified.
|
||||
type: bool
|
||||
|
||||
http://www.eembc.org/andebench/about.php
|
||||
|
||||
From the website:
|
||||
|
||||
- Initial focus on CPU and Dalvik interpreter performance
|
||||
- Internal algorithms concentrate on integer operations
|
||||
- Compares the difference between native and Java performance
|
||||
- Implements flexible multicore performance analysis
|
||||
- Results displayed in Iterations per second
|
||||
- Detailed log file for comprehensive engineering analysis
|
||||
|
||||
|
||||
|
216
doc/source/migration_guide.rst
Normal file
216
doc/source/migration_guide.rst
Normal file
@ -0,0 +1,216 @@
|
||||
.. _migration-guide:
|
||||
|
||||
Migration Guide
|
||||
================
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 4
|
||||
:local:
|
||||
|
||||
Users
|
||||
"""""
|
||||
|
||||
Configuration
|
||||
--------------
|
||||
|
||||
Default configuration file change
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Instead of the standard ``config.py`` file located at ``$WA_USER_HOME/config.py`
|
||||
WA not use a ``confg.yaml`` files which is written in the YAML format instead of
|
||||
python. Additionally upon first invocation WA3 will automatically try and detect
|
||||
whether a WA2 config file is present and convert it to use the new WA3 format.
|
||||
During this process any know parameter name changes should be detected and
|
||||
updated accordingly.
|
||||
|
||||
Plugin Changes
|
||||
^^^^^^^^^^^^^^^
|
||||
Please note that not all plugins that were available for WA2 are currently
|
||||
available for WA3 so you may need to remove plugins that are no longer present
|
||||
from your config files. One plugin of note is the ``standard`` results
|
||||
processor, this has been removed and it's functionality built into the core
|
||||
framework.
|
||||
|
||||
--------------------------------------------------------
|
||||
|
||||
Agendas
|
||||
-------
|
||||
|
||||
WA3 is designed to keep configuration as backwards compatible as possible so
|
||||
most agendas should work out of the box, however the main changes in the style
|
||||
of WA3 agendas are:
|
||||
|
||||
Global Section
|
||||
^^^^^^^^^^^^^^
|
||||
The ``global`` and ``config`` sections have been merged so now all configuration
|
||||
that was specified under the "global" keyword can now also be specified under
|
||||
"config". Although "global" is still a valid keyword you will need to ensure that
|
||||
there are not duplicated entries in each section.
|
||||
|
||||
Instrumentation and Results Processors merged
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``instrumentation`` and ``results_processors`` sections from WA2 have now
|
||||
been merged into a single ``augmentations`` section to simplify the
|
||||
configuration process. Although for backwards compatibility, support for the old
|
||||
sections has be retained.
|
||||
|
||||
|
||||
Per workload enabling of augmentations
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
All augmentations can now been enabled and disabled on a per workload basis.
|
||||
|
||||
|
||||
Setting Runtime Parameters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
:ref:`Runtime Parameters <runtime-parmeters>` are now the preferred way of
|
||||
configuring, cpufreq, hotplug and cpuidle rather setting the corresponding
|
||||
sysfile values as this will perform additional validation and ensure the nodes
|
||||
are set in the correct order to avoid any conflicts.
|
||||
|
||||
Parameter Changes ### TODO
|
||||
^^^^^^^^^^^^^^^^^
|
||||
Any parameter names changes listed below will also have their old names
|
||||
specified as aliases and should continue to work as normal, however going forward
|
||||
the new parameter names should be preferred:
|
||||
|
||||
- The workload parameter ``clean_up`` has be renamed to ``cleanup_assets`` to
|
||||
better reflect its purpose.
|
||||
|
||||
- The workload parameter ``check_apk`` has been renamed to
|
||||
``prefer_host_package`` to be more explicit in it's functionality to indicated
|
||||
whether a package on the target for the host should have priority when
|
||||
searching for a suitable package.
|
||||
|
||||
|
||||
Output ### TODO
|
||||
^^^^^^^
|
||||
Output Directory
|
||||
~~~~~~~~~~~~~~~~
|
||||
The :ref:`output directory <output_directory>`'s structure has changed layout
|
||||
and now includes additional subdirectories. There is now a ``__meta`` directory
|
||||
that contains copies of the agenda and config files supplied to WA for that
|
||||
particular run so that all the relevant config is self contained. Additionally
|
||||
if one or more jobs fail during a run then corresponding output folder will be
|
||||
moved into a ``__failed`` subdirectory to allow for quicker analysis.
|
||||
|
||||
|
||||
Output API
|
||||
~~~~~~~~~~
|
||||
There is now an Output API which can be used to more easily post process the
|
||||
output from a workload. For more information please see the
|
||||
:ref:`Output API <output-api>` documentation.
|
||||
|
||||
|
||||
-----------------------------------------------------------
|
||||
|
||||
Developers
|
||||
""""""""""""
|
||||
|
||||
Framework
|
||||
---------
|
||||
|
||||
Imports
|
||||
^^^^^^^
|
||||
|
||||
To distinguish between the different versions of WA, WA3's package name has been
|
||||
renamed to ``wa``. This means that all the old ``wlauto`` imports will need to
|
||||
be updated. For more information please see the corresponding section in the
|
||||
:ref:`developer reference section<developer_reference>`
|
||||
|
||||
Asset Deployment
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
WA3 now contains a generic assets deployment and clean up mechanism so if a
|
||||
workload was previously doing this in an ad-hoc manner this should be updated to
|
||||
utilize the new functionality. To make use of this functionality a list of
|
||||
assets should be set as the workload ``deployable_assets`` attribute, these will
|
||||
be automatically retrieved via WA's resource getters and deployed either to the
|
||||
targets working directory or a custom folder specified as the workloads
|
||||
``assets_directory`` attribute. If a custom implementation is required the
|
||||
``deploy_assets`` method should be overridden inside the workload. To allow for
|
||||
the removal of the additional assets any additional file paths should be added
|
||||
to the ``self.deployed_assets`` list which is used to keep track of any assets
|
||||
that have been deployed for the workload. This is what is used by the generic
|
||||
``remove_assets`` method to clean up any files deployed to the target.
|
||||
Optionally if the file structure of the deployed assets requires additional
|
||||
logic then the ``remove_assets`` method can be overridden for a particular
|
||||
workload as well.
|
||||
|
||||
--------------------------------------------------------
|
||||
|
||||
Workloads
|
||||
---------
|
||||
|
||||
Python Workload Structure
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
The ``update_results`` method has been split out into 2 stages. There is now
|
||||
``extract_results`` and ``update_output`` which should be used for extracting
|
||||
any results from the target back to the host system and to update the output
|
||||
with any metrics or artefacts for the specific workload iteration respectively.
|
||||
|
||||
APK Functionality
|
||||
^^^^^^^^^^^^^^^^^
|
||||
All apk functionality has re-factored into an APKHandler object which is
|
||||
available as the apk attribute of the workload. This means that for example
|
||||
``self.launchapplication()`` would now become ``self.apk.start_activity()``
|
||||
|
||||
|
||||
UiAutomator Java Structure
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Instead of a single ``runUiAutomation`` method to perform all of the UiAutomation,
|
||||
the structure has been refactored into 4 methods that can optionally be overridden.
|
||||
The available methods are ``initialize``, ``setup``, ``runWorkload``, ``extactResults``
|
||||
and ``teardown`` to better mimic the different stages in the python workload.
|
||||
|
||||
|
||||
``initialize`` should have the ``@Before`` tag attached to the method which will cause it
|
||||
to be ran before each of the stages of the workload. This method should be used to retrieve
|
||||
and set any relevant parameters required during the workload.
|
||||
|
||||
The remaining method all have the ``@Test`` tag attached to the method to indicate that this
|
||||
is a test stage that should be called at the appropriate time.
|
||||
|
||||
``setup`` should be used to perform any setup required for the workload, for
|
||||
example dismissing popups or configuring and required settings.
|
||||
|
||||
``runWorkload`` should be used to perform the actual measurable work of the workload.
|
||||
|
||||
``extractResults`` should be used to extract any relevant results from the
|
||||
target after the workload has been completed.
|
||||
|
||||
``teardown`` should be used to perform any final clean up of the workload on the target.
|
||||
|
||||
GUI Functionality
|
||||
^^^^^^^^^^^^^^^^^
|
||||
For UI based applications all UI functionality has been re-factored to into a
|
||||
``gui`` attribute which currently will be either a ``UiAutomatorGUI`` object or
|
||||
a ``ReventGUI`` depending on the workload type. This means that for example if
|
||||
you wish to pass parameters to a UiAuotmator workload you will now need to use
|
||||
``self.gui.uiauto_params['Parameter Name'] = value``
|
||||
|
||||
Attributes
|
||||
^^^^^^^^^^
|
||||
The ``device`` attribute of the workload is now a devlib ``target``. Some of the
|
||||
command names remain the same, however there will be differences. The API can be
|
||||
found here: http://devlib.readthedocs.io/en/latest/target.html however some of
|
||||
the more common changes can be found below:
|
||||
|
||||
|
||||
+----------------------------------------------+---------------------------------+
|
||||
| Original Method | New Method |
|
||||
+----------------------------------------------+---------------------------------+
|
||||
|``self.device.pull_file(file)`` | ``self.target.pull(file)`` |
|
||||
+----------------------------------------------+---------------------------------+
|
||||
|``self.device.push_file(file)`` | ``self.target.push(file)`` |
|
||||
+----------------------------------------------+---------------------------------+
|
||||
|``self.device.install_executable(file)`` | ``self.target.install(file)`` |
|
||||
+----------------------------------------------+---------------------------------+
|
||||
|``self.device.execute(cmd, background=True)`` | ``self.target.background(cmd)``|
|
||||
+----------------------------------------------+---------------------------------+
|
||||
|
||||
|
||||
The old ``package`` attribute has been replaced by ``package_names`` which
|
||||
expects a list of strings which allows for multiple package names to be
|
||||
specified if required. It is also no longer required to explicitly state the
|
||||
launch-able activity, this will be automatically discovered from the apk so this
|
||||
workload attribute can be removed.
|
||||
|
61
doc/source/plugins.rst
Normal file
61
doc/source/plugins.rst
Normal file
@ -0,0 +1,61 @@
|
||||
.. _plugin-reference:
|
||||
|
||||
=================
|
||||
Plugin Reference
|
||||
=================
|
||||
|
||||
This section lists Plugins that currently come with WA3. Each package below
|
||||
represents a particular type of extension (e.g. a workload); each sub-package of
|
||||
that package is a particular instance of that extension (e.g. the Andebench
|
||||
workload). Clicking on a link will show what the individual extension does,
|
||||
what configuration parameters it takes, etc.
|
||||
|
||||
For how to implement you own Plugins, please refer to the guides in the
|
||||
:ref:`writing plugins <writing-plugins>` section.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<style>
|
||||
td {
|
||||
vertical-align: text-top;
|
||||
}
|
||||
</style>
|
||||
<table <tr><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/workloads
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/instruments
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/output_processors
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td><td>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
plugins/targets
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</td></tr></table>
|
||||
|
@ -1,284 +0,0 @@
|
||||
==========
|
||||
Quickstart
|
||||
==========
|
||||
|
||||
This guide will show you how to quickly start running workloads using
|
||||
Workload Automation 2.
|
||||
|
||||
|
||||
Install
|
||||
=======
|
||||
|
||||
.. note:: This is a quick summary. For more detailed instructions, please see
|
||||
the :doc:`installation` section.
|
||||
|
||||
Make sure you have Python 2.7 and a recent Android SDK with API level 18 or above
|
||||
installed on your system. A complete install of the Android SDK is required, as
|
||||
WA uses a number of its utilities, not just adb. For the SDK, make sure that either
|
||||
``ANDROID_HOME`` environment variable is set, or that ``adb`` is in your ``PATH``.
|
||||
|
||||
.. Note:: If you plan to run Workload Automation on Linux devices only, SSH is required,
|
||||
and Android SDK is optional if you wish to run WA on Android devices at a
|
||||
later time.
|
||||
|
||||
However, you would be starting off with a limited number of workloads that
|
||||
will run on Linux devices.
|
||||
|
||||
In addition to the base Python 2.7 install, you will also need to have ``pip``
|
||||
(Python's package manager) installed as well. This is usually a separate package.
|
||||
|
||||
Once you have those, you can install WA with::
|
||||
|
||||
sudo -H pip install wlauto
|
||||
|
||||
This will install Workload Automation on your system, along with its mandatory
|
||||
dependencies.
|
||||
|
||||
(Optional) Verify installation
|
||||
-------------------------------
|
||||
|
||||
Once the tarball has been installed, try executing ::
|
||||
|
||||
wa -h
|
||||
|
||||
You should see a help message outlining available subcommands.
|
||||
|
||||
|
||||
(Optional) APK files
|
||||
--------------------
|
||||
|
||||
A large number of WA workloads are installed as APK files. These cannot be
|
||||
distributed with WA and so you will need to obtain those separately.
|
||||
|
||||
For more details, please see the :doc:`installation` section.
|
||||
|
||||
|
||||
Configure Your Device
|
||||
=====================
|
||||
|
||||
Locate the device configuration file, config.py, under the
|
||||
~/.workload_automation directory. Then adjust the device
|
||||
configuration settings accordingly to the device you are using.
|
||||
|
||||
Android
|
||||
-------
|
||||
|
||||
By default, the device is set to 'generic_android'. WA is configured to work
|
||||
with a generic Android device through ``adb``. If you only have one device listed
|
||||
when you execute ``adb devices``, and your device has a standard Android
|
||||
configuration, then no extra configuration is required.
|
||||
|
||||
However, if your device is connected via network, you will have to manually execute
|
||||
``adb connect <device ip>`` so that it appears in the device listing.
|
||||
|
||||
If you have multiple devices connected, you will need to tell WA which one you
|
||||
want it to use. You can do that by setting ``adb_name`` in device_config section.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# ...
|
||||
|
||||
device_config = dict(
|
||||
adb_name = 'abcdef0123456789',
|
||||
# ...
|
||||
)
|
||||
|
||||
# ...
|
||||
|
||||
Linux
|
||||
-----
|
||||
|
||||
First, set the device to 'generic_linux'
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# ...
|
||||
device = 'generic_linux'
|
||||
# ...
|
||||
|
||||
Find the device_config section and add these parameters
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# ...
|
||||
|
||||
device_config = dict(
|
||||
host = '192.168.0.100',
|
||||
username = 'root',
|
||||
password = 'password'
|
||||
# ...
|
||||
)
|
||||
|
||||
# ...
|
||||
|
||||
Parameters:
|
||||
|
||||
- Host is the IP of your target Linux device
|
||||
- Username is the user for the device
|
||||
- Password is the password for the device
|
||||
|
||||
Enabling and Disabling Instrumentation
|
||||
---------------------------------------
|
||||
|
||||
Some instrumentation tools are enabled after your initial install of WA.
|
||||
|
||||
.. note:: Some Linux devices may not be able to run certain instruments
|
||||
provided by WA (e.g. cpufreq is disabled or unsupported by the
|
||||
device).
|
||||
|
||||
As a start, keep the 'execution_time' instrument enabled while commenting out
|
||||
the rest to disable them.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# ...
|
||||
|
||||
Instrumentation = [
|
||||
# Records the time it took to run the workload
|
||||
'execution_time',
|
||||
|
||||
# Collects /proc/interrupts before and after execution and does a diff.
|
||||
# 'interrupts',
|
||||
|
||||
# Collects the contents of/sys/devices/system/cpu before and after execution and does a diff.
|
||||
# 'cpufreq',
|
||||
|
||||
# ...
|
||||
)
|
||||
|
||||
|
||||
|
||||
This should give you basic functionality. If you are working with a development
|
||||
board or you need some advanced functionality (e.g. big.LITTLE tuning parameters),
|
||||
additional configuration may be required. Please see the :doc:`device_setup`
|
||||
section for more details.
|
||||
|
||||
|
||||
Running Your First Workload
|
||||
===========================
|
||||
|
||||
The simplest way to run a workload is to specify it as a parameter to WA ``run``
|
||||
sub-command::
|
||||
|
||||
wa run dhrystone
|
||||
|
||||
You will see INFO output from WA as it executes each stage of the run. A
|
||||
completed run output should look something like this::
|
||||
|
||||
INFO Initializing
|
||||
INFO Running workloads
|
||||
INFO Connecting to device
|
||||
INFO Initializing device
|
||||
INFO Running workload 1 dhrystone (iteration 1)
|
||||
INFO Setting up
|
||||
INFO Executing
|
||||
INFO Processing result
|
||||
INFO Tearing down
|
||||
INFO Processing overall results
|
||||
INFO Status available in wa_output/status.txt
|
||||
INFO Done.
|
||||
INFO Ran a total of 1 iterations: 1 OK
|
||||
INFO Results can be found in wa_output
|
||||
|
||||
Once the run has completed, you will find a directory called ``wa_output``
|
||||
in the location where you have invoked ``wa run``. Within this directory,
|
||||
you will find a "results.csv" file which will contain results obtained for
|
||||
dhrystone, as well as a "run.log" file containing detailed log output for
|
||||
the run. You will also find a sub-directory called 'drystone_1_1' that
|
||||
contains the results for that iteration. Finally, you will find a copy of the
|
||||
agenda file in the ``wa_output/__meta`` subdirectory. The contents of
|
||||
iteration-specific subdirectories will vary from workload to workload, and,
|
||||
along with the contents of the main output directory, will depend on the
|
||||
instrumentation and result processors that were enabled for that run.
|
||||
|
||||
The ``run`` sub-command takes a number of options that control its behavior,
|
||||
you can view those by executing ``wa run -h``. Please see the :doc:`invocation`
|
||||
section for details.
|
||||
|
||||
|
||||
Create an Agenda
|
||||
================
|
||||
|
||||
Simply running a single workload is normally of little use. Typically, you would
|
||||
want to specify several workloads, setup the device state and, possibly, enable
|
||||
additional instrumentation. To do this, you would need to create an "agenda" for
|
||||
the run that outlines everything you want WA to do.
|
||||
|
||||
Agendas are written using YAML_ markup language. A simple agenda might look
|
||||
like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
instrumentation: [~execution_time]
|
||||
result_processors: [json]
|
||||
global:
|
||||
iterations: 2
|
||||
workloads:
|
||||
- memcpy
|
||||
- name: dhrystone
|
||||
params:
|
||||
mloops: 5
|
||||
threads: 1
|
||||
|
||||
This agenda
|
||||
|
||||
- Specifies two workloads: memcpy and dhrystone.
|
||||
- Specifies that dhrystone should run in one thread and execute five million loops.
|
||||
- Specifies that each of the two workloads should be run twice.
|
||||
- Enables json result processor, in addition to the result processors enabled in
|
||||
the config.py.
|
||||
- Disables execution_time instrument, if it is enabled in the config.py
|
||||
|
||||
An agenda can be created in a text editor and saved as a YAML file. Please make note of
|
||||
where you have saved the agenda.
|
||||
|
||||
Please see :doc:`agenda` section for more options.
|
||||
|
||||
.. _YAML: http://en.wikipedia.org/wiki/YAML
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
These examples show some useful options with the ``wa run`` command.
|
||||
|
||||
To run your own agenda::
|
||||
|
||||
wa run <path/to/agenda> (e.g. wa run ~/myagenda.yaml)
|
||||
|
||||
To redirect the output to a different directory other than wa_output::
|
||||
|
||||
wa run dhrystone -d my_output_directory
|
||||
|
||||
To use a different config.py file::
|
||||
|
||||
wa run -c myconfig.py dhrystone
|
||||
|
||||
To use the same output directory but override existing contents to
|
||||
store new dhrystone results::
|
||||
|
||||
wa run -f dhrystone
|
||||
|
||||
To display verbose output while running memcpy::
|
||||
|
||||
wa run --verbose memcpy
|
||||
|
||||
Uninstall
|
||||
=========
|
||||
|
||||
If you have installed Workload Automation via ``pip``, then run this command to
|
||||
uninstall it::
|
||||
|
||||
sudo pip uninstall wlauto
|
||||
|
||||
|
||||
.. Note:: It will *not* remove any user configuration (e.g. the ~/.workload_automation
|
||||
directory).
|
||||
|
||||
Upgrade
|
||||
=======
|
||||
|
||||
To upgrade Workload Automation to the latest version via ``pip``, run::
|
||||
|
||||
sudo pip install --upgrade --no-deps wlauto
|
||||
|
@ -1,47 +0,0 @@
|
||||
.. _resources:
|
||||
|
||||
Dynamic Resource Resolution
|
||||
===========================
|
||||
|
||||
Introduced in version 2.1.3.
|
||||
|
||||
The idea is to decouple resource identification from resource discovery.
|
||||
Workloads/instruments/devices/etc state *what* resources they need, and not
|
||||
*where* to look for them -- this instead is left to the resource resolver that
|
||||
is now part of the execution context. The actual discovery of resources is
|
||||
performed by resource getters that are registered with the resolver.
|
||||
|
||||
A resource type is defined by a subclass of
|
||||
:class:`wlauto.core.resource.Resource`. An instance of this class describes a
|
||||
resource that is to be obtained. At minimum, a ``Resource`` instance has an
|
||||
owner (which is typically the object that is looking for the resource), but
|
||||
specific resource types may define other parameters that describe an instance of
|
||||
that resource (such as file names, URLs, etc).
|
||||
|
||||
An object looking for a resource invokes a resource resolver with an instance of
|
||||
``Resource`` describing the resource it is after. The resolver goes through the
|
||||
getters registered for that resource type in priority order attempting to obtain
|
||||
the resource; once the resource is obtained, it is returned to the calling
|
||||
object. If none of the registered getters could find the resource, ``None`` is
|
||||
returned instead.
|
||||
|
||||
The most common kind of object looking for resources is a ``Workload``, and
|
||||
since v2.1.3, ``Workload`` class defines
|
||||
:py:meth:`wlauto.core.workload.Workload.init_resources` method that may be
|
||||
overridden by subclasses to perform resource resolution. For example, a workload
|
||||
looking for an APK file would do so like this::
|
||||
|
||||
from wlauto import Workload
|
||||
from wlauto.common.resources import ApkFile
|
||||
|
||||
class AndroidBenchmark(Workload):
|
||||
|
||||
# ...
|
||||
|
||||
def init_resources(self, context):
|
||||
self.apk_file = context.resource.get(ApkFile(self))
|
||||
|
||||
# ...
|
||||
|
||||
|
||||
Currently available resource types are defined in :py:mod:`wlauto.common.resources`.
|
54
doc/source/run_config/Meta_Configuration.rst
Normal file
54
doc/source/run_config/Meta_Configuration.rst
Normal file
@ -0,0 +1,54 @@
|
||||
user_directory:
|
||||
type: ``'str'``
|
||||
|
||||
Path to the user directory. This is the location WA will look for
|
||||
user configuration, additional plugins and plugin dependencies.
|
||||
|
||||
default: ``'~/.workload_automation'``
|
||||
|
||||
assets_repository:
|
||||
type: ``'str'``
|
||||
|
||||
The local mount point for the filer hosting WA assets.
|
||||
|
||||
logging:
|
||||
type: ``'LoggingConfig'``
|
||||
|
||||
WA logging configuration. This should be a dict with a subset
|
||||
of the following keys::
|
||||
|
||||
:normal_format: Logging format used for console output
|
||||
:verbose_format: Logging format used for verbose console output
|
||||
:file_format: Logging format used for run.log
|
||||
:color: If ``True`` (the default), console logging output will
|
||||
contain bash color escape codes. Set this to ``False`` if
|
||||
console output will be piped somewhere that does not know
|
||||
how to handle those.
|
||||
|
||||
default: ::
|
||||
|
||||
{
|
||||
color: True,
|
||||
verbose_format: %(asctime)s %(levelname)-8s %(name)s: %(message)s,
|
||||
regular_format: %(levelname)-8s %(message)s,
|
||||
file_format: %(asctime)s %(levelname)-8s %(name)s: %(message)s
|
||||
}
|
||||
|
||||
verbosity:
|
||||
type: ``'integer'``
|
||||
|
||||
Verbosity of console output.
|
||||
|
||||
default_output_directory:
|
||||
type: ``'str'``
|
||||
|
||||
The default output directory that will be created if not
|
||||
specified when invoking a run.
|
||||
|
||||
default: ``'wa_output'``
|
||||
|
||||
extra_plugin_paths:
|
||||
type: ``'list_of_strs'``
|
||||
|
||||
A list of additional paths to scan for plugins.
|
||||
|
136
doc/source/run_config/Run_Configuration.rst
Normal file
136
doc/source/run_config/Run_Configuration.rst
Normal file
@ -0,0 +1,136 @@
|
||||
execution_order:
|
||||
type: ``'str'``
|
||||
|
||||
Defines the order in which the agenda spec will be executed. At the
|
||||
moment, the following execution orders are supported:
|
||||
|
||||
``"by_iteration"``
|
||||
The first iteration of each workload spec is executed one after
|
||||
the other, so all workloads are executed before proceeding on
|
||||
to the second iteration. E.g. A1 B1 C1 A2 C2 A3. This is the
|
||||
default if no order is explicitly specified.
|
||||
|
||||
In case of multiple sections, this will spread them out, such
|
||||
that specs from the same section are further part. E.g. given
|
||||
sections X and Y, global specs A and B, and two iterations,
|
||||
this will run ::
|
||||
|
||||
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
|
||||
|
||||
``"by_section"``
|
||||
Same as ``"by_iteration"``, however this will group specs from
|
||||
the same section together, so given sections X and Y, global
|
||||
specs A and B, and two iterations, this will run ::
|
||||
|
||||
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
|
||||
|
||||
``"by_spec"``
|
||||
All iterations of the first spec are executed before moving on
|
||||
to the next spec. E.g. A1 A2 A3 B1 C1 C2.
|
||||
|
||||
``"random"``
|
||||
Execution order is entirely random.
|
||||
|
||||
allowed values: ``'by_iteration'``, ``'by_spec'``, ``'by_section'``, ``'random'``
|
||||
|
||||
default: ``'by_iteration'``
|
||||
|
||||
reboot_policy:
|
||||
type: ``'RebootPolicy'``
|
||||
|
||||
This defines when during execution of a run the Device will be
|
||||
rebooted. The possible values are:
|
||||
|
||||
``"as_needed"``
|
||||
The device will only be rebooted if the need arises (e.g. if it
|
||||
becomes unresponsive.
|
||||
|
||||
``"never"``
|
||||
The device will never be rebooted.
|
||||
|
||||
``"initial"``
|
||||
The device will be rebooted when the execution first starts,
|
||||
just before executing the first workload spec.
|
||||
|
||||
``"each_spec"``
|
||||
The device will be rebooted before running a new workload spec.
|
||||
|
||||
.. note:: this acts the same as each_iteration when execution order
|
||||
is set to by_iteration
|
||||
|
||||
``"each_iteration"``
|
||||
The device will be rebooted before each new iteration.
|
||||
|
||||
allowed values: ``'never'``, ``'as_needed'``, ``'initial'``, ``'each_spec'``, ``'each_iteration'``
|
||||
|
||||
default: ``'as_needed'``
|
||||
|
||||
device:
|
||||
type: ``'str'``
|
||||
|
||||
This setting defines what specific Device subclass will be used to
|
||||
interact the connected device. Obviously, this must match your
|
||||
setup.
|
||||
|
||||
default: ``'generic_android'``
|
||||
|
||||
retry_on_status:
|
||||
type: ``'list_of_Enums'``
|
||||
|
||||
This is list of statuses on which a job will be considered to have
|
||||
failed and will be automatically retried up to ``max_retries``
|
||||
times. This defaults to ``["FAILED", "PARTIAL"]`` if not set.
|
||||
Possible values are::
|
||||
|
||||
``"OK"``
|
||||
This iteration has completed and no errors have been detected
|
||||
|
||||
``"PARTIAL"``
|
||||
One or more instruments have failed (the iteration may still be running).
|
||||
|
||||
``"FAILED"``
|
||||
The workload itself has failed.
|
||||
|
||||
``"ABORTED"``
|
||||
The user interrupted the workload
|
||||
|
||||
allowed values: ``RUNNING``, ``OK``, ``PARTIAL``, ``FAILED``, ``ABORTED``, ``SKIPPED``
|
||||
|
||||
default: ``['FAILED', 'PARTIAL']``
|
||||
|
||||
max_retries:
|
||||
type: ``'integer'``
|
||||
|
||||
The maximum number of times failed jobs will be retried before
|
||||
giving up. If not set.
|
||||
|
||||
.. note:: this number does not include the original attempt
|
||||
|
||||
default: ``2``
|
||||
|
||||
bail_on_init_failure:
|
||||
type: ``'boolean'``
|
||||
|
||||
When jobs fail during their main setup and run phases, WA will
|
||||
continue attempting to run the remaining jobs. However, by default,
|
||||
if they fail during their early initialization phase, the entire run
|
||||
will end without continuing to run jobs. Setting this to ``False``
|
||||
means that WA will instead skip all the jobs from the job spec that
|
||||
failed, but continue attempting to run others.
|
||||
|
||||
default: ``True``
|
||||
|
||||
allow_phone_home:
|
||||
type: ``'boolean'``
|
||||
|
||||
Setting this to ``False`` prevents running any workloads that are marked
|
||||
with 'phones_home', meaning they are at risk of exposing information
|
||||
about the device to the outside world. For example, some benchmark
|
||||
applications upload device data to a database owned by the
|
||||
maintainers.
|
||||
|
||||
This can be used to minimise the risk of accidentally running such
|
||||
workloads when testing confidential devices.
|
||||
|
||||
default: ``True``
|
||||
|
472
doc/source/user_guide.rst
Normal file
472
doc/source/user_guide.rst
Normal file
@ -0,0 +1,472 @@
|
||||
==========
|
||||
User Guide
|
||||
==========
|
||||
|
||||
This guide will show you how to quickly start running workloads using
|
||||
Workload Automation 3.
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 2
|
||||
:local:
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
|
||||
Install
|
||||
=======
|
||||
|
||||
.. note:: This is a quick summary. For more detailed instructions, please see
|
||||
the :doc:`installation` section.
|
||||
|
||||
Make sure you have Python 2.7 and a recent Android SDK with API level 18 or above
|
||||
installed on your system. A complete install of the Android SDK is required, as
|
||||
WA uses a number of its utilities, not just adb. For the SDK, make sure that either
|
||||
``ANDROID_HOME`` environment variable is set, or that ``adb`` is in your ``PATH``.
|
||||
|
||||
.. Note:: If you plan to run Workload Automation on Linux devices only, SSH is required,
|
||||
and Android SDK is optional if you wish to run WA on Android devices at a
|
||||
later time.
|
||||
|
||||
However, you would be starting off with a limited number of workloads that
|
||||
will run on Linux devices.
|
||||
|
||||
In addition to the base Python 2.7 install, you will also need to have ``pip``
|
||||
(Python's package manager) installed as well. This is usually a separate package.
|
||||
|
||||
Once you have those, you can install WA with::
|
||||
|
||||
sudo -H pip install wa
|
||||
|
||||
This will install Workload Automation on your system, along with its mandatory
|
||||
dependencies.
|
||||
|
||||
Alternatively we provide a Dockerfile that which can be used to create a Docker
|
||||
image for running WA along with its dependencies. More information can be found
|
||||
:ref:`here <dockerfile>`.
|
||||
|
||||
(Optional) Verify installation
|
||||
-------------------------------
|
||||
|
||||
Once the tarball has been installed, try executing ::
|
||||
|
||||
wa -h
|
||||
|
||||
You should see a help message outlining available subcommands.
|
||||
|
||||
|
||||
(Optional) APK files
|
||||
--------------------
|
||||
|
||||
A large number of WA workloads are installed as APK files. These cannot be
|
||||
distributed with WA and so you will need to obtain those separately.
|
||||
|
||||
For more details, please see the :ref:`installation <apk_files>` section.
|
||||
|
||||
|
||||
List Command
|
||||
============
|
||||
In order to get started with using WA we first we need to find
|
||||
out what is available to use. In order to do this we can use the "list"
|
||||
command followed by the type of plugin that you wish to see.
|
||||
|
||||
For example to see what workloads are available along with a short description
|
||||
of each you run::
|
||||
|
||||
wa list workloads
|
||||
|
||||
Which will give an output in the format of:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
adobereader: The Adobe Reader workflow carries out the following typical
|
||||
productivity tasks.
|
||||
androbench: Executes storage performance benchmarks
|
||||
angrybirds_rio: Angry Birds Rio game.
|
||||
antutu: Executes Antutu 3D, UX, CPU and Memory tests
|
||||
applaunch: This workload launches and measures the launch time of applications
|
||||
for supporting workloads.
|
||||
benchmarkpi: Measures the time the target device takes to run and complete the
|
||||
Pi calculation algorithm.
|
||||
dhrystone: Runs the Dhrystone benchmark.
|
||||
exoplayer: Android ExoPlayer
|
||||
geekbench: Geekbench provides a comprehensive set of benchmarks engineered to
|
||||
quickly and accurately measure
|
||||
processor and memory performance.
|
||||
#..
|
||||
|
||||
The same syntax can be used to display ``commands``,
|
||||
``energy_instrument_backends``, ``instruments``, ``output_processors``,
|
||||
``resource_getters`` and ``targets``. Alternatively please see the
|
||||
:ref:`Plugin Reference <plugin-reference>` for an online version.
|
||||
|
||||
|
||||
Configure Your Device
|
||||
=====================
|
||||
|
||||
There are multiple options for configuring your device depending on your
|
||||
particular use case.
|
||||
|
||||
You can either add your configuration to the default configuration file
|
||||
``config.yaml``, under the ``$WA_USER_HOME/`` directory or you can specify it in
|
||||
the ``config`` section of your agenda directly.
|
||||
|
||||
Alternatively if you are using multiple devices, you may want to create separate
|
||||
config files for each of your devices you will be using. This allows you to
|
||||
specify which device you would like to use for a particular run and pass it as
|
||||
an argument when invoking with the ``-c`` flag.
|
||||
::
|
||||
|
||||
wa run dhrystone -c my_device.yaml
|
||||
|
||||
By default WA will use the “most specific” configuration available for example
|
||||
any configuration specified inside an agenda will override a passed
|
||||
configuration file which will in turn overwrite the default configuration file.
|
||||
|
||||
.. note:: For a more information about configuring your
|
||||
device please see :ref:`Setting Up A Device <setting-up-a-device>`.
|
||||
|
||||
Android
|
||||
-------
|
||||
|
||||
By default, the device is set to 'generic_android'. WA is configured to work
|
||||
with a generic Android device through ``adb``. If you only have one device listed
|
||||
when you execute ``adb devices``, and your device has a standard Android
|
||||
configuration, then no extra configuration is required.
|
||||
|
||||
However, if your device is connected via network, you will have to manually execute
|
||||
``adb connect <device ip>`` so that it appears in the device listing.
|
||||
|
||||
If you have multiple devices connected, you will need to tell WA which one you
|
||||
want it to use. You can do that by setting ``device`` in the device_config section.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# ...
|
||||
|
||||
device_config:
|
||||
device: 'abcdef0123456789'
|
||||
# ...
|
||||
# ...
|
||||
|
||||
Linux
|
||||
-----
|
||||
|
||||
First, set the device to 'generic_linux'
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# ...
|
||||
device: 'generic_linux'
|
||||
# ...
|
||||
|
||||
Find the device_config section and add these parameters
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# ...
|
||||
|
||||
device_config:
|
||||
host: '192.168.0.100'
|
||||
username: 'root'
|
||||
password: 'password'
|
||||
# ...
|
||||
# ...
|
||||
|
||||
Parameters:
|
||||
|
||||
- Host is the IP of your target Linux device
|
||||
- Username is the user for the device
|
||||
- Password is the password for the device
|
||||
|
||||
Enabling and Disabling Augmentations
|
||||
---------------------------------------
|
||||
|
||||
Augmentations are the collective name for "instruments" and "results
|
||||
processors" in WA3.
|
||||
|
||||
Some augmentations are enabled by default after your initial install of WA,
|
||||
which are specified in the ``config.yaml`` file located in your
|
||||
``WA_USER_DIRECTORY``, typically ``~/.workload_autoamation``.
|
||||
|
||||
.. note:: Some Linux devices may not be able to run certain augmentations
|
||||
provided by WA (e.g. cpufreq is disabled or unsupported by the
|
||||
device).
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# ...
|
||||
|
||||
augmentations:
|
||||
# Records the time it took to run the workload
|
||||
- execution_time
|
||||
|
||||
# Collects /proc/interrupts before and after execution and does a diff.
|
||||
- interrupts
|
||||
|
||||
# Collects the contents of/sys/devices/system/cpu before and after
|
||||
# execution and does a diff.
|
||||
- cpufreq
|
||||
|
||||
# Generate a txt file containing general status information about
|
||||
# which runs failed and which were successful.
|
||||
- status
|
||||
|
||||
# ...
|
||||
|
||||
If you only wanted to keep the 'execution_time' instrument enabled, you can comment out
|
||||
the rest of the list augmentations to disable them.
|
||||
|
||||
This should give you basic functionality. If you are working with a development
|
||||
board or you need some advanced functionality additional configuration may be required.
|
||||
Please see the :ref:`device_setup` section for more details.
|
||||
|
||||
.. note:: In WA2 'Instrumentation' and 'Result Processors' were divided up into their
|
||||
own sections in the agenda. In WA3 they now fall under the same category of
|
||||
'augmentations'. For compatibility the old naming structure is still valid
|
||||
however using the new entry names is recommended.
|
||||
|
||||
|
||||
|
||||
Running Your First Workload
|
||||
===========================
|
||||
|
||||
The simplest way to run a workload is to specify it as a parameter to WA ``run``
|
||||
:ref:`run <run-command>` sub-command::
|
||||
|
||||
wa run dhrystone
|
||||
|
||||
You will see INFO output from WA as it executes each stage of the run. A
|
||||
completed run output should look something like this::
|
||||
|
||||
INFO Creating output directory.
|
||||
INFO Initializing run
|
||||
INFO Connecting to target
|
||||
INFO Setting up target
|
||||
INFO Initializing execution context
|
||||
INFO Generating jobs
|
||||
INFO Loading job wk1 (dhrystone) [1]
|
||||
INFO Installing instruments
|
||||
INFO Installing output processors
|
||||
INFO Starting run
|
||||
INFO Initializing run
|
||||
INFO Initializing job wk1 (dhrystone) [1]
|
||||
INFO Running job wk1
|
||||
INFO Configuring augmentations
|
||||
INFO Configuring target for job wk1 (dhrystone) [1]
|
||||
INFO Setting up job wk1 (dhrystone) [1]
|
||||
INFO Running job wk1 (dhrystone) [1]
|
||||
INFO Tearing down job wk1 (dhrystone) [1]
|
||||
INFO Completing job wk1
|
||||
INFO Job completed with status OK
|
||||
INFO Finalizing run
|
||||
INFO Finalizing job wk1 (dhrystone) [1]
|
||||
INFO Done.
|
||||
INFO Run duration: 9 seconds
|
||||
INFO Ran a total of 1 iterations: 1 OK
|
||||
INFO Results can be found in wa_output
|
||||
|
||||
|
||||
Once the run has completed, you will find a directory called ``wa_output``
|
||||
in the location where you have invoked ``wa run``. Within this directory,
|
||||
you will find a "results.csv" file which will contain results obtained for
|
||||
dhrystone, as well as a "run.log" file containing detailed log output for
|
||||
the run. You will also find a sub-directory called 'wk1-dhrystone-1' that
|
||||
contains the results for that iteration. Finally, you will find various additional
|
||||
information in the ``wa_output/__meta`` subdirectory for example information
|
||||
extracted from the target and a copy of the agenda file. The contents of
|
||||
iteration-specific subdirectories will vary from workload to workload, and,
|
||||
along with the contents of the main output directory, will depend on the
|
||||
augmentations that were enabled for that run.
|
||||
|
||||
The ``run`` sub-command takes a number of options that control its behaviour,
|
||||
you can view those by executing ``wa run -h``. Please see the :ref:`invocation`
|
||||
section for details.
|
||||
|
||||
|
||||
Create an Agenda
|
||||
================
|
||||
|
||||
Simply running a single workload is normally of little use. Typically, you would
|
||||
want to specify several workloads, setup the device state and, possibly, enable
|
||||
additional augmentations. To do this, you would need to create an "agenda" for
|
||||
the run that outlines everything you want WA to do.
|
||||
|
||||
Agendas are written using YAML_ markup language. A simple agenda might look
|
||||
like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations:
|
||||
- ~execution_time
|
||||
- json
|
||||
iterations: 2
|
||||
workloads:
|
||||
- memcpy
|
||||
- name: dhrystone
|
||||
params:
|
||||
mloops: 5
|
||||
threads: 1
|
||||
|
||||
This agenda:
|
||||
|
||||
- Specifies two workloads: memcpy and dhrystone.
|
||||
- Specifies that dhrystone should run in one thread and execute five million loops.
|
||||
- Specifies that each of the two workloads should be run twice.
|
||||
- Enables json output processor, in addition to the output processors enabled in
|
||||
the config.yaml.
|
||||
- Disables execution_time instrument, if it is enabled in the config.yaml
|
||||
|
||||
An agenda can be created using WA's ``create`` :ref:`command <using-the-create-command>`
|
||||
or in a text editor and saved as a YAML file.
|
||||
|
||||
For more options please see the :ref:`agenda` documentation.
|
||||
|
||||
.. _YAML: http://en.wikipedia.org/wiki/YAML
|
||||
|
||||
.. _using-the-create-command:
|
||||
|
||||
Using the Create Command
|
||||
-------------------------
|
||||
The easiest way to create an agenda is to use the 'create' command. For more
|
||||
in-depth information please see the :ref:`Create Command <create-command>` documentation.
|
||||
|
||||
In order to populate the agenda with relevant information you can supply all of
|
||||
the plugins you wish to use as arguments to the command, for example if we want
|
||||
to create an agenda file for running ``dhystrone`` on a 'generic android' device and we
|
||||
want to enable the ``execution_time`` and ``trace-cmd`` instruments and display the
|
||||
metrics using the ``csv`` output processor. We would use the following command::
|
||||
|
||||
wa create agenda generic_android dhrystone execution_time trace-cmd csv -d my_agenda.yaml
|
||||
|
||||
This will produce a `my_agenda.yaml` file containing all the relevant
|
||||
configuration for the specified plugins along with their default values as shown
|
||||
below:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
config:
|
||||
augmentations:
|
||||
- execution_time
|
||||
- trace-cmd
|
||||
- csv
|
||||
iterations: 1
|
||||
device: generic_android
|
||||
device_config:
|
||||
adb_server: null
|
||||
big_core: null
|
||||
core_clusters: null
|
||||
core_names: null
|
||||
device: null
|
||||
disable_selinux: true
|
||||
executables_directory: null
|
||||
load_default_modules: true
|
||||
logcat_poll_period: null
|
||||
model: null
|
||||
modules: null
|
||||
package_data_directory: /data/data
|
||||
shell_prompt: !<tag:wa:regex> '8:^.*(shell|root)@.*:/\S* [#$] '
|
||||
working_directory: null
|
||||
execution_time: {}
|
||||
trace-cmd:
|
||||
buffer_size: null
|
||||
buffer_size_step: 1000
|
||||
events:
|
||||
- sched*
|
||||
- irq*
|
||||
- power*
|
||||
- thermal*
|
||||
functions: null
|
||||
no_install: false
|
||||
report: true
|
||||
report_on_target: false
|
||||
csv:
|
||||
extra_columns: null
|
||||
use_all_classifiers: false
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
params:
|
||||
cleanup_assets: true
|
||||
delay: 0
|
||||
duration: 0
|
||||
mloops: 0
|
||||
taskset_mask: 0
|
||||
threads: 4
|
||||
|
||||
|
||||
Run Command
|
||||
============
|
||||
These examples show some useful options that can be used with WA's ``run`` command.
|
||||
|
||||
Once we have created an agenda to use it with WA we can pass it as a argument to
|
||||
the run command e.g.::
|
||||
|
||||
wa run <path/to/agenda> (e.g. wa run ~/myagenda.yaml)
|
||||
|
||||
By default WA will use the "wa_output" directory to stores its output however to
|
||||
redirect the output to a different directory we can use::
|
||||
|
||||
wa run dhrystone -d my_output_directory
|
||||
|
||||
We can also tell WA to use a different config.yaml file by supplying with with the ``-c`` argument.
|
||||
One use case for passing additional config files is if you have multiple devices
|
||||
you wish test with WA, you can store the relevant device configuration in
|
||||
individual config files and then pass the file corresponding to the device you wish
|
||||
to use for that particular test.::
|
||||
|
||||
wa run -c myconfig.yaml dhrystone
|
||||
|
||||
To use the same output directory but override the existing contents to
|
||||
store new dhrystone resultswe::
|
||||
|
||||
wa run -f dhrystone
|
||||
|
||||
To display verbose output while running memcpy::
|
||||
|
||||
wa run --verbose memcpy
|
||||
|
||||
|
||||
.. _output_directory:
|
||||
|
||||
Output Directory
|
||||
================
|
||||
|
||||
The exact contents on the output directory will depend on configuration options
|
||||
used, instruments and output processors enabled, etc. Typically, the output
|
||||
directory will contain a results file at the top level that lists all
|
||||
measurements that were collected (currently, csv and json formats are
|
||||
supported), along with a subdirectory for each iteration executed with output
|
||||
for that specific iteration.
|
||||
|
||||
At the top level, there will also be a ``run.log`` file containing the complete log
|
||||
output for the execution. The contents of this file is equivalent to what you
|
||||
would get in the console when using --verbose option.
|
||||
|
||||
If a job fails to complete for some reason, then the output directory for that
|
||||
job will be moved into a new directory called ``__failed``. If the job was
|
||||
running on a platform that supports android then WA will take a screen capture
|
||||
and UI dump from the device.
|
||||
|
||||
Finally, there will be a ``__meta`` subdirectory. This will contain a copy of
|
||||
the agenda file used to run the workloads along with any other configuration
|
||||
files that were supplied for execution.
|
||||
|
||||
Uninstall
|
||||
=========
|
||||
|
||||
If you have installed Workload Automation via ``pip``, then run this command to
|
||||
uninstall it::
|
||||
|
||||
sudo pip uninstall wa
|
||||
|
||||
|
||||
.. Note:: It will *not* remove any user configuration (e.g. the ~/.workload_automation
|
||||
directory).
|
||||
|
||||
Upgrade
|
||||
=======
|
||||
|
||||
To upgrade Workload Automation to the latest version via ``pip``, run::
|
||||
|
||||
sudo pip install --upgrade --no-deps wa
|
||||
|
16
doc/source/user_reference.rst
Normal file
16
doc/source/user_reference.rst
Normal file
@ -0,0 +1,16 @@
|
||||
.. _user_reference:
|
||||
|
||||
===============
|
||||
User Reference
|
||||
===============
|
||||
|
||||
|
||||
.. contents:: Contents
|
||||
:depth: 2
|
||||
:local:
|
||||
|
||||
---------------------------------------------------------------
|
||||
|
||||
.. include:: user_reference/configuration.rst
|
||||
.. include:: user_reference/invocation.rst
|
||||
.. include:: user_reference/output_api.rst
|
54
doc/source/user_reference/configuration.rst
Normal file
54
doc/source/user_reference/configuration.rst
Normal file
@ -0,0 +1,54 @@
|
||||
.. _configuration-specification:
|
||||
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
Run Configuration
|
||||
------------------
|
||||
In addition to specifying run execution parameters through an agenda, the
|
||||
behaviour of WA can be modified through configuration file(s). The default
|
||||
configuration file is ``~/.workload_automation/config.yaml`` (the location can
|
||||
be changed by setting ``WA_USER_DIRECTORY`` environment variable, see
|
||||
:ref:`envvars` section below). This file will be created when you first run WA
|
||||
if it does not already exist. This file must always exist and will always be
|
||||
loaded. You can add to or override the contents of that file on invocation of
|
||||
Workload Automation by specifying an additional configuration file using
|
||||
``--config`` option. Variables with specific names will be picked up by the
|
||||
framework and used to modify the behaviour of Workload automation.
|
||||
|
||||
.. _available_settings:
|
||||
|
||||
.. include:: run_config/Run_Configuration.rst
|
||||
|
||||
Meta Configuration
|
||||
------------------
|
||||
|
||||
There are also a couple of settings are used to provide additional metadata
|
||||
for a run. These may get picked up by instruments or output processors to
|
||||
attach context to results.
|
||||
|
||||
.. include:: run_config/Meta_Configuration.rst
|
||||
|
||||
|
||||
.. _envvars:
|
||||
|
||||
Environment Variables
|
||||
---------------------
|
||||
|
||||
In addition to standard configuration described above, WA behaviour can be
|
||||
altered through environment variables. These can determine where WA looks for
|
||||
various assets when it starts.
|
||||
|
||||
.. confval:: WA_USER_DIRECTORY
|
||||
|
||||
This is the location WA will look for config.yaml, plugins, dependencies,
|
||||
and it will also be used for local caches, etc. If this variable is not set,
|
||||
the default location is ``~/.workload_automation`` (this is created when WA
|
||||
is installed).
|
||||
|
||||
.. note:: This location **must** be writable by the user who runs WA.
|
||||
|
||||
|
||||
.. include:: user_reference/runtime_parameters.rst
|
||||
|
317
doc/source/user_reference/invocation.rst
Normal file
317
doc/source/user_reference/invocation.rst
Normal file
@ -0,0 +1,317 @@
|
||||
.. _invocation:
|
||||
|
||||
Commands
|
||||
========
|
||||
|
||||
Installing the wa package will add ``wa`` command to your system,
|
||||
which you can run from anywhere. This has a number of sub-commands, which can
|
||||
be viewed by executing ::
|
||||
|
||||
wa -h
|
||||
|
||||
Individual sub-commands are discussed in detail below.
|
||||
|
||||
.. _run-command:
|
||||
|
||||
Run
|
||||
---
|
||||
|
||||
The most common sub-command you will use is ``run``. This will run specified
|
||||
workload(s) and process resulting output. This takes a single mandatory
|
||||
argument that specifies what you want WA to run. This could be either a
|
||||
workload name, or a path to an "agenda" file that allows to specify multiple
|
||||
workloads as well as a lot additional configuration (see :ref:`agenda`
|
||||
section for details). Executing ::
|
||||
|
||||
wa run -h
|
||||
|
||||
Will display help for this subcommand that will look something like this::
|
||||
|
||||
usage: wa run [-h] [-c CONFIG] [-v] [--version] [-d DIR] [-f] [-i ID]
|
||||
[--disable INSTRUMENT]
|
||||
AGENDA
|
||||
|
||||
Execute automated workloads on a remote device and process the resulting
|
||||
output.
|
||||
|
||||
positional arguments:
|
||||
AGENDA Agenda for this workload automation run. This defines
|
||||
which workloads will be executed, how many times, with
|
||||
which tunables, etc. See example agendas in
|
||||
/usr/local/lib/python2.7/dist-packages/wa for an
|
||||
example of how this file should be structured.
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.yaml
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--version show program's version number and exit
|
||||
-d DIR, --output-directory DIR
|
||||
Specify a directory where the output will be
|
||||
generated. If the directory already exists, the script
|
||||
will abort unless -f option (see below) is used, in
|
||||
which case the contents of the directory will be
|
||||
overwritten. If this option is not specified, then
|
||||
wa_output will be used instead.
|
||||
-f, --force Overwrite output directory if it exists. By default,
|
||||
the script will abort in this situation to prevent
|
||||
accidental data loss.
|
||||
-i ID, --id ID Specify a workload spec ID from an agenda to run. If
|
||||
this is specified, only that particular spec will be
|
||||
run, and other workloads in the agenda will be
|
||||
ignored. This option may be used to specify multiple
|
||||
IDs.
|
||||
--disable INSTRUMENT Specify an instrument or output processor to disable
|
||||
from the command line. This equivalent to adding
|
||||
"~{metavar}" to the instruments list in the
|
||||
agenda. This can be used to temporarily disable a
|
||||
troublesome instrument for a particular run without
|
||||
introducing permanent change to the config (which one
|
||||
might then forget to revert). This option may be
|
||||
specified multiple times.
|
||||
|
||||
.. _list-command:
|
||||
|
||||
List
|
||||
----
|
||||
|
||||
This lists all plugins of a particular type. For example ::
|
||||
|
||||
wa list instruments
|
||||
|
||||
will list all instruments currently included in WA. The list will consist of
|
||||
plugin names and short descriptions of the functionality they offer e.g.
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
#..
|
||||
cpufreq: Collects dynamic frequency (DVFS) settings before and after
|
||||
workload execution.
|
||||
dmesg: Collected dmesg output before and during the run.
|
||||
energy_measurement: This instrument is designed to be used as an interface to
|
||||
the various energy measurement instruments located
|
||||
in devlib.
|
||||
execution_time: Measure how long it took to execute the run() methods of
|
||||
a Workload.
|
||||
file_poller: Polls the given files at a set sample interval. The values
|
||||
are output in CSV format.
|
||||
fps: Measures Frames Per Second (FPS) and associated metrics for
|
||||
a workload.
|
||||
#..
|
||||
|
||||
|
||||
You can use the same syntax to quickly display information about ``commands``,
|
||||
``energy_instrument_backends``, ``instruments``, ``output_processors``, ``resource_getters``,
|
||||
``targets`` and ``workloads``
|
||||
|
||||
.. _show-command:
|
||||
|
||||
Show
|
||||
----
|
||||
|
||||
This will show detailed information about an plugin, including more in-depth
|
||||
description and any parameters/configuration that are available. For example
|
||||
executing ::
|
||||
|
||||
wa show benchmarkpi
|
||||
|
||||
will produce something like: ::
|
||||
|
||||
|
||||
benchmarkpi
|
||||
-----------
|
||||
|
||||
Measures the time the target device takes to run and complete the Pi
|
||||
calculation algorithm.
|
||||
|
||||
http://androidbenchmark.com/howitworks.php
|
||||
|
||||
from the website:
|
||||
|
||||
The whole idea behind this application is to use the same Pi calculation
|
||||
algorithm on every Android Device and check how fast that proccess is.
|
||||
Better calculation times, conclude to faster Android devices. This way you
|
||||
can also check how lightweight your custom made Android build is. Or not.
|
||||
|
||||
As Pi is an irrational number, Benchmark Pi does not calculate the actual Pi
|
||||
number, but an approximation near the first digits of Pi over the same
|
||||
calculation circles the algorithms needs.
|
||||
|
||||
So, the number you are getting in miliseconds is the time your mobile device
|
||||
takes to run and complete the Pi calculation algorithm resulting in a
|
||||
approximation of the first Pi digits.
|
||||
|
||||
parameters
|
||||
~~~~~~~~~~
|
||||
|
||||
cleanup_assets : boolean
|
||||
If ``True``, if assets are deployed as part of the workload they
|
||||
will be removed again from the device as part of finalize.
|
||||
|
||||
default: ``True``
|
||||
|
||||
package_name : str
|
||||
The package name that can be used to specify
|
||||
the workload apk to use.
|
||||
|
||||
install_timeout : integer
|
||||
Timeout for the installation of the apk.
|
||||
|
||||
constraint: ``value > 0``
|
||||
|
||||
default: ``300``
|
||||
|
||||
version : str
|
||||
The version of the package to be used.
|
||||
|
||||
variant : str
|
||||
The variant of the package to be used.
|
||||
|
||||
strict : boolean
|
||||
Whether to throw an error if the specified package cannot be found
|
||||
on host.
|
||||
|
||||
force_install : boolean
|
||||
Always re-install the APK, even if matching version is found already installed
|
||||
on the device.
|
||||
|
||||
uninstall : boolean
|
||||
If ``True``, will uninstall workload's APK as part of teardown.'
|
||||
|
||||
exact_abi : boolean
|
||||
If ``True``, workload will check that the APK matches the target
|
||||
device ABI, otherwise any suitable APK found will be used.
|
||||
|
||||
markers_enabled : boolean
|
||||
If set to ``True``, workloads will insert markers into logs
|
||||
at various points during execution. These markes may be used
|
||||
by other plugins or post-processing scripts to provide
|
||||
measurments or statistics for specific parts of the workload
|
||||
execution.
|
||||
|
||||
|
||||
.. _create-command:
|
||||
|
||||
Create
|
||||
------
|
||||
|
||||
This aids in the creation of new WA-related objects for example agendas and workloads.
|
||||
For more detailed information on creating workloads please see the
|
||||
:ref:`adding a workload <adding-a-workload>` section for more details.
|
||||
|
||||
agendas:
|
||||
As an example to create an agenda that will run the dhrystone and memcpy workloads
|
||||
that will use the status and hwmon augumentations, run each test 3 times and save
|
||||
into the file ``my_agenda.yaml`` the following command can be used::
|
||||
|
||||
wa create agenda dhrystone memcpy status hwmon -i 3 -o my_agenda.yaml
|
||||
|
||||
Which will produce something like::
|
||||
|
||||
config:
|
||||
augmentations:
|
||||
- status
|
||||
- hwmon
|
||||
status: {}
|
||||
hwmon: {}
|
||||
iterations: 3
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
params:
|
||||
cleanup_assets: true
|
||||
delay: 0
|
||||
duration: 0
|
||||
mloops: 0
|
||||
taskset_mask: 0
|
||||
threads: 4
|
||||
- name: memcpy
|
||||
params:
|
||||
buffer_size: 5242880
|
||||
cleanup_assets: true
|
||||
cpus: null
|
||||
iterations: 1000
|
||||
|
||||
This will be populated with default values which can then be customised for the
|
||||
particular use case.
|
||||
|
||||
|
||||
.. _record_command:
|
||||
|
||||
Record
|
||||
------
|
||||
|
||||
This command simiplifies the process of recording revent files. It will
|
||||
automatically deploy revent and has options to automatically open apps and
|
||||
record specified stages of a workload. Revent allows you to record raw inputs
|
||||
such as screen swipes or button presses. This can be useful for recording inputs
|
||||
for workloads such as games that don't have XML UI layouts that can be used with
|
||||
UIAutomator. As a drawback from this, revent recordings are specific to the
|
||||
device type they were recorded on. WA uses two parts to the names of revent
|
||||
recordings in the format, ``{device_name}.{suffix}.revent``. - device_name can
|
||||
either be specified manually with the ``-d`` argument or it can be automatically
|
||||
determined. On Android device it will be obtained from ``build.prop``, on Linux
|
||||
devices it is obtained from ``/proc/device-tree/model``. - suffix is used by WA
|
||||
to determine which part of the app execution the recording is for, currently
|
||||
these are either ``setup``, ``run``, ``extract_results`` or ``teardown``. All
|
||||
stages except ``run`` are optional for playback and to specify which stages
|
||||
shoule be recorded the ``-s``, ``-r``, ``-e`` or ``-t`` arguments respectively,
|
||||
or optionally ``-a`` to indicate all stages should be recorded.
|
||||
|
||||
|
||||
The full set of options for this command are::
|
||||
|
||||
usage: wa record [-h] [-c CONFIG] [-v] [--version] [-d DEVICE] [-o FILE] [-s]
|
||||
[-e] [-t] [-a] [-C] [-p PACKAGE | -w WORKLOAD]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.yaml
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--version show program's version number and exit
|
||||
-d DEVICE, --device DEVICE
|
||||
Specify the device on which to run. This will take
|
||||
precedence over the device (if any) specified in
|
||||
configuration.
|
||||
-o FILE, --output FILE
|
||||
Specify the output file
|
||||
-s, --setup Record a recording for setup stage
|
||||
-e, --extract_results
|
||||
Record a recording for extract_results stage
|
||||
-t, --teardown Record a recording for teardown stage
|
||||
-a, --all Record recordings for available stages
|
||||
-C, --clear Clear app cache before launching it
|
||||
-p PACKAGE, --package PACKAGE
|
||||
Android package to launch before recording
|
||||
-w WORKLOAD, --workload WORKLOAD
|
||||
Name of a revent workload (mostly games)
|
||||
|
||||
For more information please see :ref:`Revent Recording <revent-recording>`.
|
||||
|
||||
.. _replay-command:
|
||||
|
||||
Replay
|
||||
------
|
||||
|
||||
Along side ``record`` wa also has a command to playback a single recorded revent file.
|
||||
It behaves similar to the ``record`` command taking a subset of the same options allowing you to automatically launch a package on the device ::
|
||||
|
||||
usage: wa replay [-h] [-c CONFIG] [-v] [--debug] [--version] [-p PACKAGE] [-C]
|
||||
revent
|
||||
|
||||
positional arguments:
|
||||
revent The name of the file to replay
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.py
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--debug Enable debug mode. Note: this implies --verbose.
|
||||
--version show program's version number and exit
|
||||
-p PACKAGE, --package PACKAGE
|
||||
Package to launch before recording
|
||||
-C, --clear Clear app cache before launching it
|
||||
|
||||
For more information please see :ref:`Revent Replaying <revent_replaying>`.
|
82
doc/source/user_reference/output_api.rst
Normal file
82
doc/source/user_reference/output_api.rst
Normal file
@ -0,0 +1,82 @@
|
||||
.. _output-api:
|
||||
|
||||
Output API
|
||||
==========
|
||||
|
||||
WA3 now has an output API that can be used to post process a run's
|
||||
:ref:`Output Directory Structure<output_directory>` which can be performed by using WA's
|
||||
``RunOutput`` object.
|
||||
|
||||
Example:
|
||||
|
||||
If we have an existing WA output called ``wa_output`` in the current working
|
||||
directory we can initialize a ``RunOutput`` as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
In [1]: from wa import RunOutput
|
||||
...:
|
||||
...: output_folder = 'wa_output'
|
||||
...: run_output = RunOutput(output_folder)
|
||||
|
||||
|
||||
|
||||
From here we can retrieve different information about the run. For example if we
|
||||
want to see what the status of the run was and retrieve the metrics recorded from
|
||||
the first run we can do the following:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
In [2]: run_output.status
|
||||
Out[2]: OK(7)
|
||||
|
||||
In [3]: run_output.jobs
|
||||
Out[3]:
|
||||
[<wa.framework.output.JobOutput at 0x7f70358a1f10>,
|
||||
<wa.framework.output.JobOutput at 0x7f70358a1150>,
|
||||
<wa.framework.output.JobOutput at 0x7f7035862810>,
|
||||
<wa.framework.output.JobOutput at 0x7f7035875090>]
|
||||
|
||||
In [4]: job_1 = run_output.jobs[0]
|
||||
|
||||
In [5]: job_1.label
|
||||
Out[5]: u'dhrystone'
|
||||
|
||||
In [6]: job_1.metrics
|
||||
Out[6]:
|
||||
[<thread 0 score: 14423105 (+)>,
|
||||
<thread 0 DMIPS: 8209 (+)>,
|
||||
<thread 1 score: 14423105 (+)>,
|
||||
<thread 1 DMIPS: 8209 (+)>,
|
||||
<thread 2 score: 14423105 (+)>,
|
||||
<thread 2 DMIPS: 8209 (+)>,
|
||||
<thread 3 score: 18292638 (+)>,
|
||||
<thread 3 DMIPS: 10411 (+)>,
|
||||
<thread 4 score: 17045532 (+)>,
|
||||
<thread 4 DMIPS: 9701 (+)>,
|
||||
<thread 5 score: 14150917 (+)>,
|
||||
<thread 5 DMIPS: 8054 (+)>,
|
||||
<time: 0.184497 seconds (-)>,
|
||||
<total DMIPS: 52793 (+)>,
|
||||
<total score: 92758402 (+)>]
|
||||
|
||||
|
||||
We can also retrieve information about the device that the run was performed on:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
In [7]: run_output.target_info.os
|
||||
Out[7]: u'android'
|
||||
|
||||
In [8]: run_output.target_info.os_version
|
||||
Out[8]:
|
||||
OrderedDict([(u'all_codenames', u'REL'),
|
||||
(u'incremental', u'3687331'),
|
||||
(u'preview_sdk', u'0'),
|
||||
(u'base_os', u''),
|
||||
(u'release', u'7.1.1'),
|
||||
(u'codename', u'REL'),
|
||||
(u'security_patch', u'2017-03-05'),
|
||||
(u'sdk', u'25')])
|
||||
|
||||
|
229
doc/source/user_reference/runtime_parameters.rst
Normal file
229
doc/source/user_reference/runtime_parameters.rst
Normal file
@ -0,0 +1,229 @@
|
||||
.. _runtime-parmeters:
|
||||
|
||||
Runtime Parameters
|
||||
------------------
|
||||
|
||||
.. contents:: Contents
|
||||
:local:
|
||||
|
||||
Runtime parameters are options that can be specified to automatically configure
|
||||
device at runtime. They can be specified at the global level in the agenda or
|
||||
for individual workloads.
|
||||
|
||||
Example
|
||||
^^^^^^^
|
||||
Say we want to perform an experiment on an Android big.LITTLE devices to compare
|
||||
the power consumption between the big and LITTLE clusters running the dhrystone
|
||||
workload. Assuming we have additional instrumentation active for this device
|
||||
that we can measure the power the device is consuming, to reduce external
|
||||
factors we want to ensure that the device has its screen turned off and that it
|
||||
is in airplane mode turned on for both tests. We will then run 2 :ref:`sections
|
||||
<sections>` will each enable a single cluster on the device, set the cores to their
|
||||
maximum frequency and disable all available idle states.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
#..
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
iterations: 1
|
||||
runtime_parameters:
|
||||
airplane_mode: true
|
||||
screen_on: false
|
||||
sections:
|
||||
- id: LITTLES
|
||||
runtime_parameters:
|
||||
num_little_cores: 4
|
||||
little_governor: userspace
|
||||
little_frequency: max
|
||||
little_idle_states: none
|
||||
num_big_cores: 0
|
||||
|
||||
- id: BIGS
|
||||
runtime_parameters:
|
||||
num_big_cores: 4
|
||||
big_governor: userspace
|
||||
big_frequency: max
|
||||
big_idle_states: none
|
||||
num_little_cores: 0
|
||||
|
||||
|
||||
HotPlug
|
||||
^^^^^^^
|
||||
|
||||
Parameters:
|
||||
|
||||
:num_cores: An ``int`` that specifies the total number of cpu cores to be online.
|
||||
|
||||
:num_<core_name>_cores: An ``int`` that specifies the total number of that particular core
|
||||
to be online, the target will be queried and if the core_names can
|
||||
be determine a parameter for each of the unique core names will be
|
||||
available.
|
||||
|
||||
:cpu<core_no>_online: A ``boolean`` that specifies whether that particular cpu, e.g. cpu0 will
|
||||
be online.
|
||||
|
||||
If big.LITTLE is detected for the device and additional 2 parameters are available:
|
||||
|
||||
:num_big_cores: An ``int`` that specifies the total number of `big` cpu cores to be online.
|
||||
|
||||
:num_little_cores: An ``int`` that specifies the total number of `little` cpu cores to be online.
|
||||
|
||||
|
||||
|
||||
.. Note:: Please note that if the device in question is operating its own dynamic
|
||||
hotplugging then WA may be unable to set the CPU state or will be overridden.
|
||||
Unfortunately the method of disabling dynamic hot plugging will vary from
|
||||
device to device.
|
||||
|
||||
|
||||
CPUFreq
|
||||
^^^^^^^
|
||||
|
||||
:frequency: An ``int`` that can be used to specify a frequency for all cores if there are common frequencies available.
|
||||
|
||||
.. Note:: When settings the frequency, if the governor is not set to userspace then WA will attempt to set the maximum
|
||||
and minimum frequencies to mimic the desired behaviour.
|
||||
|
||||
:max_frequency: An ``int`` that can be used to specify a maximum frequency for all cores if there are common frequencies available.
|
||||
|
||||
:min_frequency: An ``int`` that can be used to specify a minimum frequency for all cores if there are common frequencies available.
|
||||
|
||||
:governor: A ``string`` that can be used to specify the governor for all cores if there are common governors available.
|
||||
|
||||
:governor: A ``string`` that can be used to specify the governor for all cores if there are common governors available.
|
||||
|
||||
:governor_tunable: A ``dict`` that can be used to specify governor
|
||||
tunables for all cores, unlike the other common parameters these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
|
||||
:<core_name>_frequency: An ``int`` that can be used to specify a frequency for cores of a particular type e.g. 'A72'.
|
||||
|
||||
:<core_name>_max_frequency: An ``int`` that can be used to specify a maximum frequency for cores of a particular type e.g. 'A72'.
|
||||
|
||||
:<core_name>_min_frequency: An ``int`` that can be used to specify a minimum frequency for cores of a particular type e.g. 'A72'.
|
||||
|
||||
:<core_name>_governor: A ``string`` that can be used to specify the governor for cores of a particular type e.g. 'A72'.
|
||||
|
||||
:<core_name>_governor: A ``string`` that can be used to specify the governor for cores of a particular type e.g. 'A72'.
|
||||
|
||||
:<core_name>_governor_tunable: A ``dict`` that can be used to specify governor
|
||||
tunables for cores of a particular type e.g. 'A72', these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
|
||||
|
||||
:cpu<no>_frequency: An ``int`` that can be used to specify a frequency for a particular core e.g. 'cpu0'.
|
||||
|
||||
:cpu<no>_max_frequency: An ``int`` that can be used to specify a maximum frequency for a particular core e.g. 'cpu0'.
|
||||
|
||||
:cpu<no>_min_frequency: An ``int`` that can be used to specify a minimum frequency for a particular core e.g. 'cpu0'.
|
||||
|
||||
:cpu<no>_governor: A ``string`` that can be used to specify the governor for a particular core e.g. 'cpu0'.
|
||||
|
||||
:cpu<no>_governor: A ``string`` that can be used to specify the governor for a particular core e.g. 'cpu0'.
|
||||
|
||||
:cpu<no>_governor_tunable: A ``dict`` that can be used to specify governor
|
||||
tunables for a particular core e.g. 'cpu0', these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
|
||||
|
||||
If big.LITTLE is detected for the device an additional set of parameters are available:
|
||||
|
||||
:big_frequency: An ``int`` that can be used to specify a frequency for the big cores.
|
||||
|
||||
:big_max_frequency: An ``int`` that can be used to specify a maximum frequency for the big cores.
|
||||
|
||||
:big_min_frequency: An ``int`` that can be used to specify a minimum frequency for the big cores.
|
||||
|
||||
:big_governor: A ``string`` that can be used to specify the governor for the big cores.
|
||||
|
||||
:big_governor: A ``string`` that can be used to specify the governor for the big cores.
|
||||
|
||||
:big_governor_tunable: A ``dict`` that can be used to specify governor
|
||||
tunables for the big cores, these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
|
||||
:little_frequency: An ``int`` that can be used to specify a frequency for the little cores.
|
||||
|
||||
:little_max_frequency: An ``int`` that can be used to specify a maximum frequency for the little cores.
|
||||
|
||||
:little_min_frequency: An ``int`` that can be used to specify a minimum frequency for the little cores.
|
||||
|
||||
:little_governor: A ``string`` that can be used to specify the governor for the little cores.
|
||||
|
||||
:little_governor: A ``string`` that can be used to specify the governor for the little cores.
|
||||
|
||||
:little_governor_tunable: A ``dict`` that can be used to specify governor
|
||||
tunables for the little cores, these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
|
||||
|
||||
CPUIdle
|
||||
^^^^^^^
|
||||
|
||||
:idle_states: A ``string`` or list of strings which can be used to specify what
|
||||
idles states should be enabled for all cores if there are common frequencies
|
||||
available. 'all' and 'none' are also valid entries as a shorthand
|
||||
|
||||
:<core_name>_idle_states: A ``string`` or list of strings which can be used to
|
||||
specify what idles states should be enabled for cores of a particular type
|
||||
e.g. 'A72'. 'all' and 'none' are also valid entries as a shorthand
|
||||
:cpu<no>_idle_states: A ``string`` or list of strings which can be used to
|
||||
specify what idles states should be enabled for a particular core e.g.
|
||||
'cpu0'. 'all' and 'none' are also valid entries as a shorthand
|
||||
|
||||
If big.LITTLE is detected for the device and additional set of parameters are available:
|
||||
|
||||
:big_idle_states: A ``string`` or list of strings which can be used to specify
|
||||
what idles states should be enabled for the big cores. 'all' and 'none' are
|
||||
also valid entries as a shorthand
|
||||
:little_idle_states: A ``string`` or list of strings which can be used to
|
||||
specify what idles states should be enabled for the little cores. 'all' and
|
||||
'none' are also valid entries as a shorthand.
|
||||
|
||||
|
||||
Android Specific Runtime Parameters
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
:brightness: An ``int`` between 0 and 255 (inclusive) to specify the brightness
|
||||
the screen should be set to. Defaults to ``127``.
|
||||
|
||||
:airplane_mode: A ``boolean`` to specify whether airplane mode should be
|
||||
enabled for the device.
|
||||
|
||||
:rotation: A ``String`` to specify the screen orientation for the device. Valid
|
||||
entries are ``NATURAL``, ``LEFT``, ``INVERTED``, ``RIGHT``.
|
||||
|
||||
:screen_on: A ``boolean`` to specify whether the devices screen should be
|
||||
turned on. Defaults to ``true``.
|
||||
|
||||
|
||||
Setting Sysfiles
|
||||
^^^^^^^^^^^^^^^^
|
||||
In order to perform additional configuration of a target the ``sysfile_values``
|
||||
runtime parameter can be used. The value for this parameter is a mapping (an
|
||||
associative array, in YAML) of file paths onto values that should be written
|
||||
into those files. sysfile_values is the only runtime parameter that is available
|
||||
for any (Linux) device. Other runtime parameters will depend on the specifics of
|
||||
the device used (e.g. its CPU cores configuration) as detailed above.
|
||||
|
||||
.. note:: By default WA will attempt to verify that the sysfile value was
|
||||
written correctly by reading the node back and comparing the two values. If
|
||||
you do not wish this check to happen, for example the node you are writing to
|
||||
is write only, you can append an ``!`` to the file path to disable this
|
||||
verification.
|
||||
|
||||
For example the following configuration could be used to enable and verify that cpu0
|
||||
is online, however will not attempt to check that it's governor have been set to
|
||||
userspace::
|
||||
|
||||
- name: dhrystone
|
||||
runtime_params:
|
||||
sysfile_values:
|
||||
/sys/devices/system/cpu/cpu0/online: 1
|
||||
/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor!: userspace
|
Binary file not shown.
Before Width: | Height: | Size: 102 KiB |
File diff suppressed because it is too large
Load Diff
12
setup.py
12
setup.py
@ -24,9 +24,9 @@ except ImportError:
|
||||
from distutils.core import setup
|
||||
|
||||
|
||||
wlauto_dir = os.path.join(os.path.dirname(__file__), 'wa')
|
||||
wa_dir = os.path.join(os.path.dirname(__file__), 'wa')
|
||||
|
||||
sys.path.insert(0, os.path.join(wlauto_dir, 'framework'))
|
||||
sys.path.insert(0, os.path.join(wa_dir, 'framework'))
|
||||
from version import get_wa_version
|
||||
|
||||
# happends if falling back to distutils
|
||||
@ -41,7 +41,7 @@ except OSError:
|
||||
packages = []
|
||||
data_files = {}
|
||||
source_dir = os.path.dirname(__file__)
|
||||
for root, dirs, files in os.walk(wlauto_dir):
|
||||
for root, dirs, files in os.walk(wa_dir):
|
||||
rel_dir = os.path.relpath(root, source_dir)
|
||||
data = []
|
||||
if '__init__.py' in files:
|
||||
@ -80,11 +80,13 @@ params = dict(
|
||||
'colorama', # Printing with colors
|
||||
'pyYAML', # YAML-formatted agenda parsing
|
||||
'requests', # Fetch assets over HTTP
|
||||
'devlib', # Interacting with devices
|
||||
'devlib>=0.0.4', # Interacting with devices
|
||||
'louie', # callbacks dispatch
|
||||
'wrapt', # better decorators
|
||||
'wrapt', # better decorators
|
||||
'pandas>=0.13.1', # Data analysis and manipulation
|
||||
],
|
||||
dependency_links=['https://github.com/ARM-software/devlib/tarball/master#egg=devlib-0.0.4'],
|
||||
|
||||
extras_require={
|
||||
'other': ['jinja2'],
|
||||
'test': ['nose', 'mock'],
|
||||
|
Loading…
Reference in New Issue
Block a user