mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-09-04 04:12:42 +01:00
Compare commits
11 Commits
v2.7.0
...
next-worki
Author | SHA1 | Date | |
---|---|---|---|
|
0b9d8f1c5e | ||
|
a4a428c9ae | ||
|
d89a52584b | ||
|
41a3877640 | ||
|
0b1b9d304c | ||
|
a3962b6323 | ||
|
001239dfe4 | ||
|
6f0de17201 | ||
|
1599c1e0ed | ||
|
4fc93a8a3c | ||
|
cd0186d14e |
17
.gitignore
vendored
17
.gitignore
vendored
@@ -3,7 +3,6 @@
|
||||
*.bak
|
||||
*.o
|
||||
*.cmd
|
||||
*.iml
|
||||
Module.symvers
|
||||
modules.order
|
||||
*~
|
||||
@@ -15,12 +14,9 @@ wa_output/
|
||||
doc/source/api/
|
||||
doc/source/extensions/
|
||||
MANIFEST
|
||||
wlauto/external/uiauto/**/build/
|
||||
wlauto/external/uiauto/*.properties
|
||||
wlauto/external/uiauto/app/libs/
|
||||
wlauto/external/uiauto/app/proguard-rules.pro
|
||||
wlauto/external/uiauto/**/.gradle
|
||||
wlauto/external/uiauto/**/.idea
|
||||
wlauto/external/uiautomator/bin/
|
||||
wlauto/external/uiautomator/*.properties
|
||||
wlauto/external/uiautomator/build.xml
|
||||
*.orig
|
||||
local.properties
|
||||
wlauto/external/revent/libs/
|
||||
@@ -31,9 +27,4 @@ pmu_logger.mod.c
|
||||
.tmp_versions
|
||||
obj/
|
||||
libs/armeabi
|
||||
wlauto/workloads/*/uiauto/**/build/
|
||||
wlauto/workloads/*/uiauto/*.properties
|
||||
wlauto/workloads/*/uiauto/app/libs/
|
||||
wlauto/workloads/*/uiauto/app/proguard-rules.pro
|
||||
wlauto/workloads/*/uiauto/**/.gradle
|
||||
wlauto/workloads/*/uiauto/**/.idea
|
||||
wlauto/workloads/*/uiauto/bin/
|
||||
|
16
README.rst
16
README.rst
@@ -1,19 +1,6 @@
|
||||
Workload Automation
|
||||
+++++++++++++++++++
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<span style="border: solid red 3px">
|
||||
<p align="center">
|
||||
<font color="red">DO NOT USE "MASTER" BRANCH</font>. WA2 has been
|
||||
deprecated in favor of WA3, which is currently on "next" branch.
|
||||
</p>
|
||||
<p align="center">
|
||||
The master branch will be aligned with next shortly. In the mean time,
|
||||
please <font style="bold">USE "NEXT" BRANCH</font> instead.
|
||||
</p>
|
||||
</span>
|
||||
|
||||
Workload Automation (WA) is a framework for executing workloads and collecting
|
||||
measurements on Android and Linux devices. WA includes automation for nearly 50
|
||||
workloads (mostly Android), some common instrumentation (ftrace, ARM
|
||||
@@ -59,8 +46,7 @@ documentation.
|
||||
Documentation
|
||||
=============
|
||||
|
||||
You can view pre-built HTML documentation
|
||||
`here <http://workload-automation.readthedocs.io/en/latest/>`_.
|
||||
You can view pre-built HTML documentation `here <http://pythonhosted.org/wlauto/>`_.
|
||||
|
||||
Documentation in reStructuredText format may be found under ``doc/source``. To
|
||||
compile it into cross-linked HTML, make sure you have `Sphinx
|
||||
|
@@ -6,11 +6,6 @@ distributed as part of WA releases.
|
||||
Scripts
|
||||
-------
|
||||
|
||||
:check_apk_versions: Compares WA workload versions with the versions listed in APK
|
||||
if there are any incistency it will highlight these. This
|
||||
requires all APK files to be present for workloads with
|
||||
versions.
|
||||
|
||||
:clean_install: Performs a clean install of WA from source. This will remove any
|
||||
existing WA install (regardless of whether it was made from
|
||||
source or through a tarball with pip).
|
||||
|
@@ -1,66 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
from distutils.version import StrictVersion
|
||||
|
||||
from wlauto.core.extension_loader import ExtensionLoader
|
||||
from wlauto.common.android.workload import ApkWorkload
|
||||
from wlauto.utils.android import ApkInfo
|
||||
|
||||
el = ExtensionLoader()
|
||||
|
||||
|
||||
class fake_config(object):
|
||||
def __init__(self, ext_loader):
|
||||
self.ext_loader = ext_loader
|
||||
self.get_extension = ext_loader.get_extension
|
||||
|
||||
|
||||
class fake_device(object):
|
||||
platform = "android"
|
||||
|
||||
config = fake_config(el)
|
||||
device = fake_device()
|
||||
|
||||
if "WA_USER_DIRECTORY" in os.environ:
|
||||
base_path = os.environ["WA_USER_DIRECTORY"]
|
||||
else:
|
||||
base_path = "~/.workload_automation/dependencies/"
|
||||
|
||||
apk_workloads = [e for e in el.list_workloads()
|
||||
if issubclass(el.get_extension_class(e.name), ApkWorkload)]
|
||||
|
||||
for wl in apk_workloads:
|
||||
# Get versions from workloads
|
||||
workload_versions = []
|
||||
for p in wl.parameters:
|
||||
if p.name == "version" and p.allowed_values:
|
||||
workload_versions = p.allowed_values
|
||||
break
|
||||
else:
|
||||
continue
|
||||
|
||||
dep_path = os.path.join(os.path.expanduser(base_path), wl.name)
|
||||
apks = [apk for apk in os.listdir(dep_path) if apk.endswith(".apk")]
|
||||
|
||||
# Get versions from APK files
|
||||
apk_versions = []
|
||||
for apk in apks:
|
||||
# skip antutu 3d benchmark apk
|
||||
if apk == "com.antutu.benchmark.full-1.apk":
|
||||
continue
|
||||
apk_versions.append(ApkInfo(os.path.join(dep_path, apk)).version_name)
|
||||
|
||||
# Output workload info
|
||||
print "Workload: {}".format(wl.name)
|
||||
print "Workload Versions: {}".format(sorted(workload_versions, key=StrictVersion))
|
||||
print "APK versions: {}".format(sorted(apk_versions, key=StrictVersion))
|
||||
|
||||
# Check for bad/missing versions
|
||||
error = False
|
||||
for v in apk_versions:
|
||||
if v not in workload_versions:
|
||||
msg = "APK version '{}' not present in workload list of versions"
|
||||
print msg.format(v)
|
||||
error = True
|
||||
if not error:
|
||||
print "OK"
|
@@ -10,29 +10,22 @@ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
||||
|
||||
from wlauto.exceptions import WAError, ToolError
|
||||
from wlauto.utils.doc import format_simple_table
|
||||
from wlauto.utils.misc import check_output, get_null, which
|
||||
|
||||
|
||||
def get_aapt_path():
|
||||
"""Return the full path to aapt tool."""
|
||||
sdk_path = os.getenv('ANDROID_HOME')
|
||||
if sdk_path:
|
||||
build_tools_directory = os.path.join(sdk_path, 'build-tools')
|
||||
versions = os.listdir(build_tools_directory)
|
||||
for version in reversed(sorted(versions)):
|
||||
aapt_path = os.path.join(build_tools_directory, version, 'aapt')
|
||||
if os.path.isfile(aapt_path):
|
||||
logging.debug('Found aapt for version {}'.format(version))
|
||||
return aapt_path
|
||||
else:
|
||||
raise ToolError('aapt not found. Please make sure at least one Android platform is installed.')
|
||||
else:
|
||||
logging.debug("ANDROID_HOME is not set, try to find in $PATH.")
|
||||
aapt_path = which('aapt')
|
||||
if aapt_path:
|
||||
logging.debug('Using aapt from {}'.format(aapt_path))
|
||||
return aapt_path
|
||||
if not sdk_path:
|
||||
raise ToolError('Please make sure you have Android SDK installed and have ANDROID_HOME set.')
|
||||
build_tools_directory = os.path.join(sdk_path, 'build-tools')
|
||||
versions = os.listdir(build_tools_directory)
|
||||
for version in reversed(sorted(versions)):
|
||||
aapt_path = os.path.join(build_tools_directory, version, 'aapt')
|
||||
if os.path.isfile(aapt_path):
|
||||
logging.debug('Found aapt for version {}'.format(version))
|
||||
return aapt_path
|
||||
else:
|
||||
raise ToolError('aapt not found. Please make sure at least one Android platform is installed.')
|
||||
|
||||
|
||||
def get_apks(path):
|
||||
|
59
doc/Makefile
59
doc/Makefile
@@ -7,11 +7,18 @@ SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = build
|
||||
|
||||
SPHINXAPI = sphinx-apidoc
|
||||
SPHINXAPIOPTS =
|
||||
|
||||
WAEXT = ./build_extension_docs.py
|
||||
WAEXTOPTS = source/extensions ../wlauto ../wlauto/external ../wlauto/tests
|
||||
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
ALLSPHINXAPIOPTS = -f $(SPHINXAPIOPTS) -o source/api ../wlauto
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
|
||||
@@ -51,38 +58,52 @@ coverage:
|
||||
@echo
|
||||
@echo "Build finished. The coverage reports are in $(BUILDDIR)/coverage."
|
||||
|
||||
html:
|
||||
api: ../wlauto
|
||||
rm -rf source/api/*
|
||||
$(SPHINXAPI) $(ALLSPHINXAPIOPTS)
|
||||
|
||||
waext: ../wlauto
|
||||
rm -rf source/extensions
|
||||
mkdir -p source/extensions
|
||||
$(WAEXT) $(WAEXTOPTS)
|
||||
|
||||
|
||||
sigtab: ../wlauto/core/instrumentation.py source/instrumentation_method_map.template
|
||||
rm -rf source/instrumentation_method_map.rst
|
||||
./build_instrumentation_method_map.py source/instrumentation_method_map.rst
|
||||
|
||||
html: api waext sigtab
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
dirhtml: api waext sigtab
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
singlehtml: api waext sigtab
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
pickle: api waext sigtab
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
json: api waext sigtab
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
htmlhelp: api waext sigtab
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
qthelp: api waext sigtab
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
@@ -91,7 +112,7 @@ qthelp:
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/WorkloadAutomation2.qhc"
|
||||
|
||||
devhelp:
|
||||
devhelp: api
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@@ -100,64 +121,64 @@ devhelp:
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/WorkloadAutomation2"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
epub: api waext sigtab
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
latex: api waext sigtab
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
latexpdf: api waext sigtab
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
text: api waext sigtab
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
man: api waext sigtab
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
texinfo: api waext sigtab
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
info: api waext sigtab
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
gettext: api waext sigtab
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
changes: api waext sigtab
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
linkcheck: api waext sigtab
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
doctest: api waext sigtab
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
@@ -17,7 +17,6 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
|
||||
from wlauto import ExtensionLoader
|
||||
from wlauto.utils.doc import get_rst_from_extension, underline
|
||||
@@ -31,9 +30,6 @@ def generate_extension_documentation(source_dir, outdir, ignore_paths):
|
||||
loader = ExtensionLoader(keep_going=True)
|
||||
loader.clear()
|
||||
loader.update(paths=[source_dir], ignore_paths=ignore_paths)
|
||||
if os.path.exists(outdir):
|
||||
shutil.rmtree(outdir)
|
||||
os.makedirs(outdir)
|
||||
for ext_type in loader.extension_kinds:
|
||||
if not ext_type in GENERATE_FOR:
|
||||
continue
|
||||
|
@@ -16,6 +16,7 @@
|
||||
import os
|
||||
import sys
|
||||
import string
|
||||
from copy import copy
|
||||
|
||||
from wlauto.core.instrumentation import SIGNAL_MAP, PRIORITY_MAP
|
||||
from wlauto.utils.doc import format_simple_table
|
||||
@@ -24,7 +25,7 @@ from wlauto.utils.doc import format_simple_table
|
||||
CONVINIENCE_ALIASES = ['initialize', 'setup', 'start', 'stop', 'process_workload_result',
|
||||
'update_result', 'teardown', 'finalize']
|
||||
|
||||
OUTPUT_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), 'source', 'instrumentation_method_map.template')
|
||||
OUTPUT_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), 'source', 'instrumentation_method_map.template')
|
||||
|
||||
|
||||
def escape_trailing_underscore(value):
|
||||
@@ -36,9 +37,7 @@ def generate_instrumentation_method_map(outfile):
|
||||
signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.iteritems()],
|
||||
headers=['method name', 'signal'], align='<<')
|
||||
priority_table = format_simple_table([(escape_trailing_underscore(k), v) for k, v in PRIORITY_MAP.iteritems()],
|
||||
headers=['prefix', 'priority'], align='<>')
|
||||
if os.path.isfile(outfile):
|
||||
os.unlink(outfile)
|
||||
headers=['prefix', 'priority'], align='<>')
|
||||
with open(OUTPUT_TEMPLATE_FILE) as fh:
|
||||
template = string.Template(fh.read())
|
||||
with open(outfile, 'w') as wfh:
|
||||
|
@@ -1,2 +0,0 @@
|
||||
nose
|
||||
sphinx==1.6.5
|
@@ -147,7 +147,7 @@ to 15 million. You can specify this using dhrystone's parameters:
|
||||
|
||||
wa show dhrystone
|
||||
|
||||
see the :ref:`Invocation <invocation>` section for details.
|
||||
see the :ref:`Invocation` section for details.
|
||||
|
||||
In addition to configuring the workload itself, we can also specify
|
||||
configuration for the underlying device. This can be done by setting runtime
|
||||
|
@@ -1,57 +0,0 @@
|
||||
.. _apk_workload_settings:
|
||||
|
||||
APK Workloads
|
||||
=============
|
||||
|
||||
APK resolution
|
||||
--------------
|
||||
|
||||
WA has various resource getters that can be configured to locate APK files but for most people APK files
|
||||
should be kept in the ``$WA_USER_DIRECTORY/dependencies/SOME_WORKLOAD/`` directory. (by default
|
||||
``~/.workload_automation/dependencies/SOME_WORKLOAD/``). The ``WA_USER_DIRECTORY`` enviroment variable can be used
|
||||
to chnage the location of this folder. The APK files need to be put into the corresponding directories
|
||||
for the workload they belong to. The name of the file can be anything but as explained below may need
|
||||
to contain certain peices of information.
|
||||
|
||||
All ApkWorkloads have parameters that affect the way in which APK files are resolved, ``exact_abi``,
|
||||
``force_install`` and ``check_apk``. Their exact behaviours are outlined below.
|
||||
|
||||
.. confval:: exact_abi
|
||||
|
||||
If this setting is enabled WA's resource resolvers will look for the devices ABI with any native
|
||||
code present in the apk. By default this setting is disabled since most apks will work across all
|
||||
devices. You may wish to enable this feature when working with devices that support multiple ABI's (like
|
||||
64-bit devices that can run 32-bit APK files) and are specifically trying to test one or the other.
|
||||
|
||||
.. confval:: force_install
|
||||
|
||||
If this setting is enabled WA will *always* use the APK file on the host, and re-install it on every
|
||||
iteration. If there is no APK on the host that is a suitable version and/or ABI for the workload WA
|
||||
will error when ``force_install`` is enabled.
|
||||
|
||||
.. confval:: check_apk
|
||||
|
||||
This parameter is used to specify a preference over host or target versions of the app. When set to
|
||||
``True`` WA will prefer the host side version of the APK. It will check if the host has the APK and
|
||||
if the host APK meets the version requirements of the workload. If does and the target already has
|
||||
same version nothing will be done, other wise it will overwrite the targets app with the host version.
|
||||
If the hosts is missing the APK or it does not meet version requirements WA will fall back to the app
|
||||
on the target if it has the app and it is of a suitable version. When this parameter is set to
|
||||
``false`` WA will prefer to use the version already on the target if it meets the workloads version
|
||||
requirements. If it does not it will fall back to search the host for the correct version. In both modes
|
||||
if neither the host nor target have a suitable version, WA will error and not run the workload.
|
||||
|
||||
Some workloads will also feature the follow parameters which will alter the way their APK files are resolved.
|
||||
|
||||
.. confval:: version
|
||||
|
||||
This parameter is used to specify which version of uiautomation for the workload is used. In some workloads
|
||||
e.g. ``geekbench`` multiple versions with drastically different UI's are supported. When a workload uses a
|
||||
version it is required for the APK file to contain the uiautomation version in the file name. In the case
|
||||
of antutu the file names could be: ``geekbench_2.apk`` or ``geekbench_3.apk``.
|
||||
|
||||
.. confval:: variant_name
|
||||
|
||||
Some workloads use variants of APK files, this is usually the case with web browser APK files, these work
|
||||
in exactly the same way as the version, the variant of the apk
|
||||
|
@@ -1,454 +1,6 @@
|
||||
=================================
|
||||
What's New in Workload Automation
|
||||
=================================
|
||||
-------------
|
||||
Version 2.6.0
|
||||
-------------
|
||||
|
||||
.. note:: Users who are currently using the GitHub master version of WA should
|
||||
uninstall the existing version before upgrading to avoid potential issues.
|
||||
|
||||
Additions:
|
||||
##########
|
||||
|
||||
Workloads
|
||||
~~~~~~~~~
|
||||
- ``AdobeReader``: A workload that carries out following typical productivity
|
||||
tasks. These include opening a file, performing various gestures and
|
||||
zooms on screen and searching for a predefined set of strings.
|
||||
- ``octaned8``: A workload to run the binary (non-browser) version of the JS
|
||||
benchmark Octane.
|
||||
- ``GooglePlayBooks``: A workload to perform standard productivity tasks with
|
||||
Google Play Books. This workload performs various tasks, such as searching
|
||||
for a book title online, browsing through a book, adding and removing notes,
|
||||
word searching, and querying information about the book.
|
||||
- ``GooglePhotos``: A workload to perform standard productivity tasks with
|
||||
Google Photos. Carries out various tasks, such as browsing images,
|
||||
performing zooms, and post-processing the image.
|
||||
- ``GoogleSlides``: Carries out various tasks, such as creating a new
|
||||
presentation, adding text, images, and shapes, as well as basic editing and
|
||||
playing a slideshow.
|
||||
- ``Youtube``: The workload plays a video, determined by the ``video_source``
|
||||
parameter. While the video is playing, some common actions such as video
|
||||
seeking, pausing playback and navigating the comments section are performed.
|
||||
- ``Skype``: Replacement for the ``skypevideo`` workload. Logs into Skype
|
||||
and initiates a voice or video call with a contact.
|
||||
|
||||
Framework
|
||||
~~~~~~~~~
|
||||
- ``AndroidUxPerfWorkload``: Added a new workload class to encapsulate
|
||||
functionality common to all uxperf workloads.
|
||||
- ``UxPerfUiAutomation``: Added class which contains methods specific to
|
||||
UX performance
|
||||
testing.
|
||||
- ``get-assets``: Added new script and command to retrieve external assets
|
||||
for workloads
|
||||
|
||||
Results Processors
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
- ``uxperf``: Parses device logcat for `UX_PERF` markers to produce performance
|
||||
metrics for workload actions using specified instrumentation.
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
- ``State Detection``: Added feature to use visual state detection to
|
||||
verify the state of a workload after setup and run.
|
||||
|
||||
|
||||
Fixes/Improvements:
|
||||
###################
|
||||
|
||||
Documentation
|
||||
~~~~~~~~~~~~~~
|
||||
- ``Revent``: Added file structure to the documentation.
|
||||
- Clarified documentation regarding binary dependencies.
|
||||
- Updated documentation with ``create`` and ``get-assets`` commands.
|
||||
|
||||
Instruments
|
||||
~~~~~~~~~~~~
|
||||
- ``sysfs_extractor``: Fixed error when `tar.gz` file already existed on device,
|
||||
now overwrites.
|
||||
- ``cpufreq``: Fixed error when `tar.gz` file already existed on device, now
|
||||
overwrites.
|
||||
- ``file-poller``:
|
||||
- Improved csv output.
|
||||
- Added error checking and reporting.
|
||||
- Changed ``files`` to be a mandatory parameter.
|
||||
- ``fps``:
|
||||
- Added a new parameter to fps instrument to specify the time period between
|
||||
calls to ``dumpsys SurfaceFlinger --latency`` when collecting frame data.
|
||||
- Added gfxinfo methods to obtain fps stats. Auto detects and uses appropriate
|
||||
method via android version of device.
|
||||
- Fixed issue with regex.
|
||||
- Now handles empty frames correctly.
|
||||
- ``energy_model``: Ensures that the ``ui`` runtime parameter is only set for
|
||||
ChromeOS devices.
|
||||
- ``ftrace``: Added support to handle traces collected by both WA and devlib.
|
||||
- ``Perf``: Updated 32bit binary file for little endian devices.
|
||||
|
||||
Resource Getters
|
||||
~~~~~~~~~~~~~~~~
|
||||
- ``http_getter``: Now used to try and find executables files from a
|
||||
provided ``remove_assets_url``.
|
||||
|
||||
Result Processors
|
||||
~~~~~~~~~~~~~~~~~
|
||||
- ``cpu_states``: Fixes using stand-alone script with timeline option.
|
||||
|
||||
Workloads
|
||||
~~~~~~~~~
|
||||
- ``antutu``: Fixed setting permissions of ``FINE_LOCATION`` on some devices.
|
||||
- ``bbench`` Fixed handling of missing results.
|
||||
- ``camerarecord``:
|
||||
- Added frame stats collection through dumpsys gfxinfo.
|
||||
- Added possibility to select slow_motion recording mode.
|
||||
- ``Geekbench``:
|
||||
- Fixed output file listing causing pull failure.
|
||||
- Added support for Geekbench 4.
|
||||
- ``recentfling``:
|
||||
- Fixed issue when binaries were not uninstalled correctly.
|
||||
- Scripts are now deployed via ``install()`` to ensure they are executable.
|
||||
- Fixed handling of when a PID file is deleted before reaching processing
|
||||
results stage.
|
||||
- Added parameter to not start any apps before flinging.
|
||||
- ``rt-app``: Added camera recorder simulation.
|
||||
- ``sysbench``: Added arm64 binary.
|
||||
- ``Vellamo``: Fixed capitalization in part of UIAutomation to prevent
|
||||
potential issues.
|
||||
- ``Spec2000``: Now uses WA deployed version of busybox.
|
||||
- ``NetStat``: Updated to support new default logcat format in Android 6.
|
||||
- ``Dex2oat``: Now uses root if available.
|
||||
|
||||
Framework
|
||||
~~~~~~~~~
|
||||
- ``adb_shell``:
|
||||
- Fixed issue when using single quoted command with ``adb_shell``.
|
||||
- Correctly forward stderror to the caller for newer version of adb.
|
||||
- ``revent``
|
||||
- Added ``-S`` argument to "record" command to automatically record a
|
||||
screen capture after a recording is completed.
|
||||
- Fixed issue with multiple iterations of a revent workload.
|
||||
- Added ``-s`` option to executable to allow waiting on stdin.
|
||||
- Removed timeout in command as ``-s`` is specified.
|
||||
- Revent recordings can now be parsed and used within WA.
|
||||
- Fixed issue when some recordings wouldn't be retrieved correctly.
|
||||
- Timeout is now based on recording duration.
|
||||
- Added `magic` and file version to revent files. Revent files should now
|
||||
start with ``REVENT`` followed by the file format version.
|
||||
- Added support for gamepad recording. This type of recording contains
|
||||
only the events from a gamepad device (which is automatically
|
||||
identified).
|
||||
- A ``mode`` field has been added to the recording format to help
|
||||
distinguish between the normal and gamepad recording types.
|
||||
- Added ``-g`` option to ``record`` command to expose the gamepad recording
|
||||
mode.
|
||||
- The structure of revent code has undergone a major overhaul to improve
|
||||
maintainability and robustness.
|
||||
- More detailed ``info`` command output.
|
||||
- Updated Makefile to support debug/production builds.
|
||||
- ``Android API``: Upgraded Android API level from 17 to 18.
|
||||
- ``uiautomator``: The window hierarchy is now dumped to a file when WA fails
|
||||
on android devices.
|
||||
- ``AndroidDevice``:
|
||||
- Added support for downgrading when installing an APK.
|
||||
- Added a ``broadcast_media_mounted`` method to force a re-index of the
|
||||
mediaserver cache for a specified directory.
|
||||
- Now correctly handles ``None`` output for ``get_pids_of()`` when there are no
|
||||
running processes with the specified name.
|
||||
- Renamed the capture method from ``capture_view_hierachy`` to
|
||||
``capture_ui_hierarchy``.
|
||||
- Changed the file extension of the capture file to ``.uix``
|
||||
- Added ``-rf`` to delete_files to be consistent with ``LinuxDevice``.
|
||||
- ``LinuxDevice``: Now ensures output from both stdout and etderr is propagated in
|
||||
the event of a DeviceError.
|
||||
- ``APKWorkload``:
|
||||
- Now ensure APKs are replaced properly when reinstalling.
|
||||
- Now checks APK version and ABI when installing.
|
||||
- Fixed error on some devices when trying to grant permissions that were
|
||||
already granted.
|
||||
- Fixed some permissions not being granted.
|
||||
- Now allows disabling the main activity launch in setup (required for some
|
||||
apps).
|
||||
- Added parameter to clear data on reset (default behaviour unchanged).
|
||||
- Ignores exception for non-fatal permission grant failure.
|
||||
- Fixed issue of multiple versions of the same workload failing to find their APK.
|
||||
- Added method to ensure a valid apk version is used within a workload.
|
||||
- Updated how APK resolution is performed to maximise likelihood of
|
||||
a workload running.
|
||||
- When ``check_apk`` is ``True`` will prefer host APK and if no suitable APK
|
||||
is found, will use target APK if the correct version is present. When ``False``
|
||||
will prefer target apk if it is a valid version otherwise will fallback to
|
||||
host APK.
|
||||
- ``RunConfiguration``: Fixed disabling of instruments in workload specs.
|
||||
- ``Devices``:
|
||||
- Added network connectivity check for devices.
|
||||
- Subclasses can now set ``requires_network`` to ``True`` and network
|
||||
connectivity check will be performed during ``setup()``.
|
||||
- ``Workloads``:
|
||||
- Added network check methods.
|
||||
- Fixed versions to be backwards compatible.
|
||||
- Updated workload versions to match APK files.
|
||||
- Fixed issues with calling super.
|
||||
- ``Assets``: Added script to retrieve external assets for workloads.
|
||||
- ``Execution``: Added a ``clean_up`` global config option to delete WA files from
|
||||
devices.
|
||||
- ``Runner``: No longer takes a screenshot or dump of UI hierarchy for some errors when
|
||||
unnecessary, e.g. host errors.
|
||||
- ``core``: Constraints and allowed values are now checked when set instead of
|
||||
when validating.
|
||||
- ``FpsProcessor``:
|
||||
- Added requirement on ``filtered_vsyncs_to_compose`` for ``total_vsync metric``.
|
||||
- Removed misleading comment in class description.
|
||||
- ``BaseUiAutomation``: Added new Marker API so workloads generate start and end
|
||||
markers with a string name.
|
||||
- ``AndroidUiAutoBenchmark``: Automatically checks for known package versions
|
||||
that don't work well with AndroidUiAutoBenchmark workloads.
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
- Updated setup.py url to be a valid URI.
|
||||
- Fixed workload name in big.Little sample agenda.
|
||||
|
||||
Incompatible changes
|
||||
####################
|
||||
|
||||
Framework
|
||||
~~~~~~~~~
|
||||
- ``check_abi``: Now renamed to ``exact_abi``, is used to ensure that if enabled,
|
||||
only an apk containing no native code or code designed for the devices primary
|
||||
abi is use.
|
||||
- ``AndroidDevice``: Renamed ``supported_eabis`` property to ``supported_abis``
|
||||
to be consistent with linux devices.
|
||||
|
||||
Workloads
|
||||
~~~~~~~~~~
|
||||
- ``skypevideo``: Workload removed and replaced with ``skype`` workload.
|
||||
|
||||
-------------
|
||||
Version 2.5.0
|
||||
-------------
|
||||
|
||||
Additions:
|
||||
##########
|
||||
|
||||
Instruments
|
||||
~~~~~~~~~~~
|
||||
- ``servo_power``: Added support for chromebook servo boards.
|
||||
- ``file_poller``: polls files and outputs a CSV of their values over time.
|
||||
- ``systrace``: The Systrace tool helps analyze the performance of your
|
||||
application by capturing and displaying execution times of your applications
|
||||
processes and other Android system processes.
|
||||
|
||||
Workloads
|
||||
~~~~~~~~~
|
||||
- ``blogbench``: Blogbench is a portable filesystem benchmark that tries to
|
||||
reproduce the load of a real-world busy file server.
|
||||
- ``stress-ng``: Designed to exercise various physical subsystems of a computer
|
||||
as well as the various operating system kernel interfaces.
|
||||
- ``hwuitest``: Uses hwuitest from AOSP to test rendering latency on Android
|
||||
devices.
|
||||
- ``recentfling``: Tests UI jank on android devices.
|
||||
- ``apklaunch``: installs and runs an arbitrary apk file.
|
||||
- ``googlemap``: Launches Google Maps and replays previously recorded
|
||||
interactions.
|
||||
|
||||
Framework
|
||||
~~~~~~~~~
|
||||
- ``wlauto.utils.misc``: Added ``memoised`` function decorator that allows
|
||||
caching of previous function/method call results.
|
||||
- Added new ``Device`` APIs:
|
||||
- ``lsmod``: lists kernel modules
|
||||
- ``insmod``: inserts a kernel module from a ``.ko`` file on the host.
|
||||
- ``get_binary_path``: Checks ``binary_directory`` for the wanted binary,
|
||||
if it is not found there it will try to use ``which``
|
||||
- ``install_if_needed``: Will only install a binary if it is not already
|
||||
on the target.
|
||||
- ``get_device_model``: Gets the model of the device.
|
||||
- ``wlauto.core.execution.ExecutionContext``:
|
||||
- ``add_classfiers``: Allows adding a classfier to all metrics for the
|
||||
current result.
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
- Commands:
|
||||
- ``record``: Simplifies recording revent files.
|
||||
- ``replay``: Plays back revent files.
|
||||
|
||||
Fixes/Improvements:
|
||||
###################
|
||||
|
||||
Devices
|
||||
~~~~~~~
|
||||
- ``juno``:
|
||||
- Fixed ``bootargs`` parameter not being passed to ``_boot_via_uboot``.
|
||||
- Removed default ``bootargs``
|
||||
- ``gem5_linux``:
|
||||
- Added ``login_prompt`` and ``login_password_prompt`` parameters.
|
||||
- ``generic_linux``: ABI is now read from the target device.
|
||||
|
||||
Instruments
|
||||
~~~~~~~~~~~
|
||||
- ``trace-cmd``:
|
||||
- Added the ability to report the binary trace on the target device,
|
||||
removing the need for ``trace-cmd`` binary to be present on the host.
|
||||
- Updated to handle messages that the trace for a CPU is empty.
|
||||
- Made timeout for pulling trace 1 minute at minimum.
|
||||
- ``perf``: per-cpu statistics now get added as metrics to the results (with a
|
||||
classifier used to identify the cpu).
|
||||
- ``daq``:
|
||||
- Fixed bug where an exception would be raised if ``merge_channels=False``
|
||||
- No longer allows duplicate channel labels
|
||||
- ``juno_energy``:
|
||||
- Summary metrics are now calculated from the contents of ``energy.csv`` and
|
||||
added to the overall results.
|
||||
- Added a ``strict`` parameter. When this is set to ``False`` the device
|
||||
check during validation is omitted.
|
||||
- ``sysfs_extractor``: tar and gzip are now performed separately to solve
|
||||
permission issues.
|
||||
- ``fps``:
|
||||
- Now only checks for crashed content if ``crash_check`` is ``True``.
|
||||
- Can now process multiple ``view`` attributes.
|
||||
- ``hwmon``: Sensor naming fixed, they are also now added as result classifiers
|
||||
|
||||
Resource Getters
|
||||
~~~~~~~~~~~~~~~~
|
||||
- ``extension_asset``: Now picks up the path to the mounted filer from the
|
||||
``remote_assets_path`` global setting.
|
||||
|
||||
Result Processors
|
||||
~~~~~~~~~~~~~~~~~
|
||||
- ``cpustates``:
|
||||
- Added the ability to configure how a missing ``START`` marker in the trace
|
||||
is handled.
|
||||
- Now raises a warning when there is a ``START`` marker in the trace but no
|
||||
``STOP`` marker.
|
||||
- Exceptions in PowerStateProcessor no longer stop the processing of the
|
||||
rest of the trace.
|
||||
- Now ensures a known initial state by nudging each CPU to bring it out of
|
||||
idle and writing starting CPU frequencies to the trace.
|
||||
- Added the ability to create a CPU utilisation timeline.
|
||||
- Fixed issues with getting frequencies of hotplugged CPUs
|
||||
- ``csv``: Zero-value classifieres are no longer converted to an empty entry.
|
||||
- ``ipynb_exporter``: Default template no longer shows a blank plot for
|
||||
workloads without ``summary_metrics``
|
||||
|
||||
Workloads
|
||||
~~~~~~~~~
|
||||
- ``vellamo``:
|
||||
- Added support for v3.2.4.
|
||||
- Fixed getting values from logcat.
|
||||
- ``cameracapture``: Updated to work with Android M+.
|
||||
- ``camerarecord``: Updated to work with Android M+.
|
||||
- ``lmbench``:
|
||||
- Added the output file as an artifact.
|
||||
- Added taskset support
|
||||
- ``antutu`` - Added support for v6.0.1
|
||||
- ``ebizzy``: Fixed use of ``os.path`` to ``self.device.path``.
|
||||
- ``bbench``: Fixed browser crashes & permissions issues on android M+.
|
||||
- ``geekbench``:
|
||||
- Added check whether device is rooted.
|
||||
- ``manual``: Now only uses logcat on Android devices.
|
||||
- ``applaunch``:
|
||||
- Fixed ``cleanup`` not getting forwarded to script.
|
||||
- Added the ability to stress IO during app launch.
|
||||
- ``dhrystone``: Now uses WA's resource resolution to find it's binary so it
|
||||
uses the correct ABI.
|
||||
- ``glbench``: Updated for new logcat formatting.
|
||||
|
||||
Framework
|
||||
~~~~~~~~~
|
||||
- ``ReventWorkload``:
|
||||
- Now kills all revent instances on teardown.
|
||||
- Device model name is now used when searching for revent files, falling back
|
||||
to WA device name.
|
||||
- ``BaseLinuxDevice``:
|
||||
- ``killall`` will now run as root by default if the device
|
||||
is rooted.
|
||||
- ``list_file_systems`` now handles blank lines.
|
||||
- All binaries are now installed into ``binaries_directory`` this allows..
|
||||
- Busybox is now deployed on non-root devices.
|
||||
- gzipped property files are no zcat'ed
|
||||
- ``LinuxDevice``:
|
||||
- ``kick_off`` no longer requires root.
|
||||
- ``kick_off`` will now run as root by default if the device is rooted.
|
||||
- No longer raises an exception if a connection was dropped during a reboot.
|
||||
- Added a delay before polling for a connection to avoid re-connecting to a
|
||||
device that is still in the process of rebooting.
|
||||
- ``wlauto.utils.types``: ``list_or_string`` now ensures that elements of a list
|
||||
are strings.
|
||||
- ``AndroidDevice``:
|
||||
- ``kick_off`` no longer requires root.
|
||||
- Build props are now gathered via ``getprop`` rather than trying to parse
|
||||
build.prop directly.
|
||||
- WA now pushes its own ``sqlite3`` binary.
|
||||
- Now uses ``content`` instead of ``settings`` to get ``ANDROID_ID``
|
||||
- ``swipe_to_unlock`` parameter is now actually used. It has been changed to
|
||||
take a direction to accomodate various devices.
|
||||
- ``ensure_screen_is_on`` will now also unlock the screen if swipe_to_unlock
|
||||
is set.
|
||||
- Fixed use of variables in as_root=True commands.
|
||||
- ``get_pids_of`` now used ``busybox grep`` since as of Android M+ ps cannot
|
||||
filter by process name anymore.
|
||||
- Fixed installing APK files with whitespace in their path/name.
|
||||
- ``adb_shell``:
|
||||
- Fixed handling of line breaks at the end of command output.
|
||||
- Newline separator is now detected from the target.
|
||||
- As of ADB v1.0.35, ADB returns the return code of the command run. WA now
|
||||
handles this correctly.
|
||||
- ``ApkWorkload``:
|
||||
- Now attempts to grant all runtime permissions for devices on Android M+.
|
||||
- Can now launch packages that don't have a launch activity defined.
|
||||
- Package version is now added to results as a classifier.
|
||||
- Now clears app data if an uninstall failed to ensure it starts from a known
|
||||
state.
|
||||
- ``wlauto.utils.ipython``: Updated to work with ipython v5.
|
||||
- ``Gem5Device``:
|
||||
- Added support for deploying the ``m5`` binary.
|
||||
- No longer waits for the boot animation to finish if it has been disabled.
|
||||
- Fixed runtime error caused by lack of kwargs.
|
||||
- No longer depends on ``busybox``.
|
||||
- Split out commands to resize shell to ``resize_shell``.
|
||||
- Now tries to connect to the shell up to 10 times.
|
||||
- No longer renames gzipped files.
|
||||
- Agendas:
|
||||
- Now errors when an agenda key is empty.
|
||||
- ``wlauto.core.execution.RunInfo``: ``run_name`` will now default to
|
||||
``{output_folder}_{date}_{time}``.
|
||||
- Extensions:
|
||||
- Two different parameters can now have the same global alias as long as they
|
||||
their types match.
|
||||
- You can no longer ``override`` parameters that are defined at the same
|
||||
level.
|
||||
- ``wlauto.core.entry_point``: Now gives a better error when a config file
|
||||
doesn't exist.
|
||||
- ``wlauto.utils.misc``: Added ``aarch64`` to list for arm64 ABI.
|
||||
- ``wlauto.core.resolver``: Now shows what version was being search for when a
|
||||
resource is not found.
|
||||
- Will no longer start instruments ect. if a run has no workload specs.
|
||||
- ``wlauto.utils.uboot``: Now detects uboot version to use correct line endings.
|
||||
- ``wlauto.utils.trace_cmd``: Added a parser for sched_switch events.
|
||||
|
||||
Other
|
||||
~~~~~
|
||||
- Updated to pylint v1.5.1
|
||||
- Rebuilt ``busybox`` binaries to prefer built-in applets over system binaries.
|
||||
- ``BaseUiAutomation``: Added functions for checking version strings.
|
||||
|
||||
Incompatible changes
|
||||
####################
|
||||
|
||||
Instruments
|
||||
~~~~~~~~~~~
|
||||
- ``apk_version``: Removed, use result classifiers instead.
|
||||
|
||||
Framework
|
||||
~~~~~~~~~
|
||||
- ``BaseLinuxDevice``: Removed ``is_installed`` use ``install_if_needed`` and
|
||||
``get_binary_path`` instead.
|
||||
- ``LinuxDevice``: Removed ``has_root`` method, use ``is_rooted`` instead.
|
||||
- ``AndroidDevice``: ``swipe_to_unlock`` method replaced with
|
||||
``perform_unlock_swipe``.
|
||||
|
||||
-------------
|
||||
Version 2.4.0
|
||||
-------------
|
||||
|
@@ -28,16 +28,12 @@
|
||||
|
||||
import sys, os
|
||||
import warnings
|
||||
from sphinx.apidoc import main
|
||||
|
||||
warnings.filterwarnings('ignore', "Module louie was already imported")
|
||||
|
||||
this_dir = os.path.dirname(__file__)
|
||||
sys.path.insert(0, os.path.join(this_dir, '..'))
|
||||
sys.path.insert(0, os.path.join(this_dir, '../..'))
|
||||
import wlauto
|
||||
from build_extension_docs import generate_extension_documentation
|
||||
from build_instrumentation_method_map import generate_instrumentation_method_map
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
@@ -268,21 +264,7 @@ texinfo_documents = [
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
|
||||
def run_apidoc(_):
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
||||
cur_dir = os.path.abspath(os.path.dirname(__file__))
|
||||
api_output = os.path.join(cur_dir, 'api')
|
||||
module = os.path.join(cur_dir, '..', '..', 'wlauto')
|
||||
main(['-f', '-o', api_output, module, '--force'])
|
||||
|
||||
def setup(app):
|
||||
module_dir = os.path.join('..', '..', 'wlauto')
|
||||
excluded_extensions = [os.path.join(module_dir, 'external'),
|
||||
os.path.join(module_dir, 'tests')]
|
||||
os.chdir(os.path.dirname(__file__))
|
||||
app.connect('builder-inited', run_apidoc)
|
||||
generate_instrumentation_method_map('instrumentation_method_map.rst')
|
||||
generate_extension_documentation(module_dir, 'extensions', excluded_extensions)
|
||||
app.add_object_type('confval', 'confval',
|
||||
objname='configuration value',
|
||||
indextemplate='pair: %s; configuration value')
|
||||
|
@@ -118,7 +118,6 @@ and detailed descriptions of how WA functions under the hood.
|
||||
additional_topics
|
||||
daq_device_setup
|
||||
revent
|
||||
apk_workloads
|
||||
contributing
|
||||
|
||||
API Reference
|
||||
|
@@ -59,11 +59,6 @@ usually the best bet.
|
||||
Optionally (but recommended), you should also set ``ANDROID_HOME`` to point to
|
||||
the install location of the SDK (i.e. ``<path_to_android_sdk>/sdk``).
|
||||
|
||||
.. note:: You may need to install 32-bit compatibility libararies for the SDK
|
||||
to work properly. On Ubuntu you need to run::
|
||||
|
||||
sudo apt-get install lib32stdc++6 lib32z1
|
||||
|
||||
|
||||
Python
|
||||
------
|
||||
@@ -92,7 +87,7 @@ similar distributions, this may be done with APT::
|
||||
If you do run into this issue after already installing some packages,
|
||||
you can resolve it by running ::
|
||||
|
||||
sudo chmod -R a+r /usr/local/lib/python2.7/dist-packagessudo
|
||||
sudo chmod -R a+r /usr/local/lib/python2.7/dist-packagessudo
|
||||
find /usr/local/lib/python2.7/dist-packages -type d -exec chmod a+x {} \;
|
||||
|
||||
(The paths above will work for Ubuntu; they may need to be adjusted
|
||||
@@ -192,28 +187,10 @@ version $version".
|
||||
Some WA extensions have additional dependencies that need to be
|
||||
statisfied before they can be used. Not all of these can be provided with WA and
|
||||
so will need to be supplied by the user. They should be placed into
|
||||
``~/.workload_automation/dependencies/<extenion name>`` so that WA can find
|
||||
``~/.workload_uatomation/dependencies/<extenion name>`` so that WA can find
|
||||
them (you may need to create the directory if it doesn't already exist). You
|
||||
only need to provide the dependencies for workloads you want to use.
|
||||
|
||||
Binary Files
|
||||
------------
|
||||
|
||||
Some workloads require native binaries to work. Different binaries will be required
|
||||
for different ABIs. WA may not include the required binary for a workload due to
|
||||
licensing/distribution issues, or may not have a binary compiled for your device's
|
||||
ABI. In such cases, you will have to supply the missing binaries.
|
||||
|
||||
Executable binaries for a workload should be placed inside
|
||||
``~/.workload_automation/dependencies/<extension name>/bin/<ABI>`` directory.
|
||||
This directory may not already exist, in which case you would have to create it.
|
||||
|
||||
Binaries placed in that location will take precidence over any already inclueded with
|
||||
WA. For example, if you have your own ``drystone`` binary compiled for ``arm64``,
|
||||
and you want WA to pick it up, you can do the following on WA host machine ::
|
||||
|
||||
mkdir -p ~/.workload_automation/dependencies/dhrystone/bin/arm64/
|
||||
cp /path/to/your/dhrystone ~/.workload_automation/dependencies/dhrystone/bin/arm64/
|
||||
|
||||
APK Files
|
||||
---------
|
||||
@@ -330,7 +307,7 @@ that location.
|
||||
|
||||
If you have installed Workload Automation via ``pip`` and wish to remove it, run this command to
|
||||
uninstall it::
|
||||
|
||||
|
||||
sudo -H pip uninstall wlauto
|
||||
|
||||
.. Note:: This will *not* remove any user configuration (e.g. the ~/.workload_automation directory)
|
||||
@@ -340,5 +317,5 @@ uninstall it::
|
||||
====================
|
||||
|
||||
To upgrade Workload Automation to the latest version via ``pip``, run::
|
||||
|
||||
|
||||
sudo -H pip install --upgrade --no-deps wlauto
|
||||
|
@@ -1,12 +1,11 @@
|
||||
.. _invocation:
|
||||
.. highlight:: none
|
||||
|
||||
========
|
||||
Commands
|
||||
========
|
||||
|
||||
Installing the wlauto package will add ``wa`` command to your system,
|
||||
which you can run from anywhere. This has a number of sub-commands, which can
|
||||
which you can run from anywhere. This has a number of sub-commands, which can
|
||||
be viewed by executing ::
|
||||
|
||||
wa -h
|
||||
@@ -16,7 +15,7 @@ Individual sub-commands are discussed in detail below.
|
||||
run
|
||||
---
|
||||
|
||||
The most common sub-command you will use is ``run``. This will run specified
|
||||
The most common sub-command you will use is ``run``. This will run specfied
|
||||
workload(s) and process resulting output. This takes a single mandatory
|
||||
argument that specifies what you want WA to run. This could be either a
|
||||
workload name, or a path to an "agenda" file that allows to specify multiple
|
||||
@@ -25,7 +24,7 @@ section for details). Executing ::
|
||||
|
||||
wa run -h
|
||||
|
||||
Will display help for this subcommand that will look something like this::
|
||||
Will display help for this subcommand that will look somehtign like this::
|
||||
|
||||
usage: run [-d DIR] [-f] AGENDA
|
||||
|
||||
@@ -48,13 +47,13 @@ Will display help for this subcommand that will look something like this::
|
||||
--debug Enable debug mode. Note: this implies --verbose.
|
||||
-d DIR, --output-directory DIR
|
||||
Specify a directory where the output will be
|
||||
generated. If the directory already exists, the script
|
||||
generated. If the directoryalready exists, the script
|
||||
will abort unless -f option (see below) is used,in
|
||||
which case the contents of the directory will be
|
||||
overwritten. If this option is not specified, then
|
||||
overwritten. If this optionis not specified, then
|
||||
wa_output will be used instead.
|
||||
-f, --force Overwrite output directory if it exists. By default,
|
||||
the script will abort in this situation to prevent
|
||||
the script will abort in thissituation to prevent
|
||||
accidental data loss.
|
||||
-i ID, --id ID Specify a workload spec ID from an agenda to run. If
|
||||
this is specified, only that particular spec will be
|
||||
@@ -82,74 +81,10 @@ agenda file used to run the workloads along with any other device-specific
|
||||
configuration files used during execution.
|
||||
|
||||
|
||||
create
|
||||
------
|
||||
|
||||
This can be used to create various WA-related objects, currently workloads, packages and agendas.
|
||||
The full set of options for this command are::
|
||||
|
||||
usage: wa create [-h] [-c CONFIG] [-v] [--debug] [--version]
|
||||
{workload,package,agenda} ...
|
||||
|
||||
positional arguments:
|
||||
{workload,package,agenda}
|
||||
workload Create a new workload. By default, a basic workload
|
||||
template will be used but you can use options to
|
||||
specify a different template.
|
||||
package Create a new empty Python package for WA extensions.
|
||||
On installation, this package will "advertise" itself
|
||||
to WA so that Extensions with in it will be loaded by
|
||||
WA when it runs.
|
||||
agenda Create an agenda whit the specified extensions
|
||||
enabled. And parameters set to their default values.
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.py
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--debug Enable debug mode. Note: this implies --verbose.
|
||||
--version show program's version number and exit
|
||||
|
||||
Use "wa create <object> -h" to see all the object-specific arguments. For example::
|
||||
|
||||
wa create agenda -h
|
||||
|
||||
will display the relevant options that can be used to create an agenda.
|
||||
|
||||
get-assets
|
||||
----------
|
||||
|
||||
This command can download external extension dependencies used by Workload Automation.
|
||||
It can be used to download assets for all available extensions or those specificity listed.
|
||||
The full set of options for this command are::
|
||||
|
||||
usage: wa get-assets [-h] [-c CONFIG] [-v] [--debug] [--version] [-f]
|
||||
[--url URL] (-a | -e EXT [EXT ...])
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.py
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--debug Enable debug mode. Note: this implies --verbose.
|
||||
--version show program's version number and exit
|
||||
-f, --force Always fetch the assets, even if matching versions
|
||||
exist in local cache.
|
||||
--url URL The location from which to download the files. If not
|
||||
provided, config setting ``remote_assets_url`` will be
|
||||
used if available, else uses the default
|
||||
REMOTE_ASSETS_URL parameter in the script.
|
||||
-a, --all Download assets for all extensions found in the index.
|
||||
Cannot be used with -e.
|
||||
-e EXT [EXT ...] One or more extensions whose assets to download.
|
||||
Cannot be used with --all.
|
||||
|
||||
|
||||
list
|
||||
----
|
||||
|
||||
This lists all extensions of a particular type. For example::
|
||||
This lists all extensions of a particular type. For example ::
|
||||
|
||||
wa list workloads
|
||||
|
||||
@@ -162,11 +97,11 @@ show
|
||||
|
||||
This will show detailed information about an extension, including more in-depth
|
||||
description and any parameters/configuration that are available. For example
|
||||
executing::
|
||||
executing ::
|
||||
|
||||
wa show andebench
|
||||
|
||||
will produce something like::
|
||||
will produce something like ::
|
||||
|
||||
|
||||
andebench
|
||||
@@ -196,64 +131,5 @@ will produce something like::
|
||||
- Results displayed in Iterations per second
|
||||
- Detailed log file for comprehensive engineering analysis
|
||||
|
||||
.. _record-command:
|
||||
|
||||
record
|
||||
------
|
||||
|
||||
This command simplifies the process of recording an revent file. It
|
||||
will automatically deploy revent and even has the option of automatically
|
||||
opening apps. WA uses two parts to the names of revent recordings in the
|
||||
format, {device_name}.{suffix}.revent. - device_name can either be specified
|
||||
manually with the ``-d`` argument or it can be automatically determined. On
|
||||
Android device it will be obtained from ``build.prop``, on Linux devices it is
|
||||
obtained from ``/proc/device-tree/model``. - suffix is used by WA to determine
|
||||
which part of the app execution the recording is for, currently these are
|
||||
either ``setup`` or ``run``. This should be specified with the ``-s``
|
||||
argument. The full set of options for this command are::
|
||||
|
||||
usage: wa record [-h] [-c CONFIG] [-v] [--debug] [--version] [-d DEVICE]
|
||||
[-s SUFFIX] [-o OUTPUT] [-p PACKAGE] [-g] [-C]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.py
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--debug Enable debug mode. Note: this implies --verbose.
|
||||
--version show program's version number and exit
|
||||
-d DEVICE, --device DEVICE
|
||||
The name of the device
|
||||
-s SUFFIX, --suffix SUFFIX
|
||||
The suffix of the revent file, e.g. ``setup``
|
||||
-o OUTPUT, --output OUTPUT
|
||||
Directory to save the recording in
|
||||
-p PACKAGE, --package PACKAGE
|
||||
Package to launch before recording
|
||||
-g, --gamepad Record from a gamepad rather than all devices.
|
||||
-C, --clear Clear app cache before launching it
|
||||
|
||||
.. _replay-command:
|
||||
|
||||
replay
|
||||
------
|
||||
|
||||
Along side ``record`` wa also has a command to playback recorded revent files.
|
||||
It behaves very similar to the ``record`` command taking many of the same options::
|
||||
|
||||
usage: wa replay [-h] [-c CONFIG] [-v] [--debug] [--version] [-p PACKAGE] [-C]
|
||||
revent
|
||||
|
||||
positional arguments:
|
||||
revent The name of the file to replay
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-c CONFIG, --config CONFIG
|
||||
specify an additional config.py
|
||||
-v, --verbose The scripts will produce verbose output.
|
||||
--debug Enable debug mode. Note: this implies --verbose.
|
||||
--version show program's version number and exit
|
||||
-p PACKAGE, --package PACKAGE
|
||||
Package to launch before recording
|
||||
-C, --clear Clear app cache before launching it
|
||||
|
@@ -1,10 +1,7 @@
|
||||
.. _revent_files_creation:
|
||||
|
||||
revent
|
||||
++++++
|
||||
|
||||
Overview and Usage
|
||||
==================
|
||||
======
|
||||
|
||||
revent utility can be used to record and later play back a sequence of user
|
||||
input events, such as key presses and touch screen taps. This is an alternative
|
||||
@@ -20,47 +17,36 @@ to Android UI Automator for providing automation for workloads. ::
|
||||
info:shows info about each event char device
|
||||
any additional parameters make it verbose
|
||||
|
||||
|
||||
.. note:: There are now also WA commands that perform the below steps.
|
||||
Please see ``wa show record/replay`` and ``wa record/replay --help``
|
||||
for details.
|
||||
|
||||
Recording
|
||||
---------
|
||||
|
||||
WA features a ``record`` command that will automatically deploy and start
|
||||
revent on the target device::
|
||||
To record, transfer the revent binary to the device, then invoke ``revent
|
||||
record``, giving it the time (in seconds) you want to record for, and the
|
||||
file you want to record to (WA expects these files to have .revent
|
||||
extension)::
|
||||
|
||||
wa record
|
||||
INFO Connecting to device...
|
||||
INFO Press Enter when you are ready to record...
|
||||
[Pressed Enter]
|
||||
INFO Press Enter when you have finished recording...
|
||||
[Pressed Enter]
|
||||
INFO Pulling files from device
|
||||
|
||||
Once started, you will need to get the target device ready to record (e.g.
|
||||
unlock screen, navigate menus and launch an app) then press ``ENTER``.
|
||||
The recording has now started and button presses, taps, etc you perform on
|
||||
the device will go into the .revent file. To stop the recording simply press
|
||||
``ENTER`` again.
|
||||
|
||||
Once you have finished recording the revent file will be pulled from the device
|
||||
to the current directory. It will be named ``{device_model}.revent``. When
|
||||
recording revent files for a ``GameWorkload`` you can use the ``-s`` option to
|
||||
add ``run`` or ``setup`` suffixes.
|
||||
|
||||
From version 2.6 of WA onwards, a "gamepad" recording mode is also supported.
|
||||
This mode requires a gamepad to be connected to the device when recoridng, but
|
||||
the recordings produced in this mode should be portable across devices.
|
||||
|
||||
For more information run please read :ref:`record-command`
|
||||
host$ adb push revent /data/local/revent
|
||||
host$ adb shell
|
||||
device# cd /data/local
|
||||
device# ./revent record 1000 my_recording.revent
|
||||
|
||||
The recording has now started and button presses, taps, etc you perform on the
|
||||
device will go into the .revent file. The recording will stop after the
|
||||
specified time period, and you can also stop it by hitting return in the adb
|
||||
shell.
|
||||
|
||||
Replaying
|
||||
---------
|
||||
|
||||
To replay a recorded file, run ``wa replay``, giving it the file you want to
|
||||
replay::
|
||||
To replay a recorded file, run ``revent replay`` on the device, giving it the
|
||||
file you want to replay::
|
||||
|
||||
wa replay my_recording.revent
|
||||
|
||||
For more information run please read :ref:`replay-command`
|
||||
device# ./revent replay my_recording.revent
|
||||
|
||||
|
||||
Using revent With Workloads
|
||||
@@ -114,359 +100,3 @@ where as UI Automator only works for Android UI elements (such as text boxes or
|
||||
radio buttons), which makes the latter useless for things like games. Recording
|
||||
revent sequence is also faster than writing automation code (on the other hand,
|
||||
one would need maintain a different revent log for each screen resolution).
|
||||
|
||||
|
||||
Using state detection with revent
|
||||
=================================
|
||||
|
||||
State detection can be used to verify that a workload is executing as expected.
|
||||
This utility, if enabled, and if state definitions are available for the
|
||||
particular workload, takes a screenshot after the setup and the run revent
|
||||
sequence, matches the screenshot to a state and compares with the expected
|
||||
state. A WorkloadError is raised if an unexpected state is encountered.
|
||||
|
||||
To enable state detection, make sure a valid state definition file and
|
||||
templates exist for your workload and set the check_states parameter to True.
|
||||
|
||||
State definition directory
|
||||
--------------------------
|
||||
|
||||
State and phase definitions should be placed in a directory of the following
|
||||
structure inside the dependencies directory of each workload (along with
|
||||
revent files etc):
|
||||
|
||||
::
|
||||
|
||||
dependencies/
|
||||
<workload_name>/
|
||||
state_definitions/
|
||||
definition.yaml
|
||||
templates/
|
||||
<oneTemplate>.png
|
||||
<anotherTemplate>.png
|
||||
...
|
||||
|
||||
definition.yaml file
|
||||
--------------------
|
||||
|
||||
This defines each state of the workload and lists which templates are expected
|
||||
to be found and how many are required to be detected for a conclusive match. It
|
||||
also defines the expected state in each workload phase where a state detection
|
||||
is run (currently those are setup_complete and run_complete).
|
||||
|
||||
Templates are picture elements to be matched in a screenshot. Each template
|
||||
mentioned in the definition file should be placed as a file with the same name
|
||||
and a .png extension inside the templates folder. Creating template png files
|
||||
is as simple as taking a screenshot of the workload in a given state, cropping
|
||||
out the relevant templates (eg. a button, label or other unique element that is
|
||||
present in that state) and storing them in PNG format.
|
||||
|
||||
Please see the definition file for Angry Birds below as an example to
|
||||
understand the format. Note that more than just two states (for the afterSetup
|
||||
and afterRun phase) can be defined and this helps track the cause of errors in
|
||||
case an unexpected state is encountered.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
workload_name: angrybirds
|
||||
|
||||
workload_states:
|
||||
- state_name: titleScreen
|
||||
templates:
|
||||
- play_button
|
||||
- logo
|
||||
matches: 2
|
||||
- state_name: worldSelection
|
||||
templates:
|
||||
- first_world_thumb
|
||||
- second_world_thumb
|
||||
- third_world_thumb
|
||||
- fourth_world_thumb
|
||||
matches: 3
|
||||
- state_name: level_selection
|
||||
templates:
|
||||
- locked_level
|
||||
- first_level
|
||||
matches: 2
|
||||
- state_name: gameplay
|
||||
templates:
|
||||
- pause_button
|
||||
- score_label_text
|
||||
matches: 2
|
||||
- state_name: pause_screen
|
||||
templates:
|
||||
- replay_button
|
||||
- menu_button
|
||||
- resume_button
|
||||
- help_button
|
||||
matches: 4
|
||||
- state_name: level_cleared_screen
|
||||
templates:
|
||||
- level_cleared_text
|
||||
- menu_button
|
||||
- replay_button
|
||||
- fast_forward_button
|
||||
matches: 4
|
||||
|
||||
workload_phases:
|
||||
- phase_name: setup_complete
|
||||
expected_state: gameplay
|
||||
- phase_name: run_complete
|
||||
expected_state: level_cleared_screen
|
||||
|
||||
|
||||
File format of revent recordings
|
||||
================================
|
||||
|
||||
You do not need to understand recording format in order to use revent. This
|
||||
section is intended for those looking to extend revent in some way, or to
|
||||
utilize revent recordings for other purposes.
|
||||
|
||||
Format Overview
|
||||
---------------
|
||||
|
||||
Recordings are stored in a binary format. A recording consists of three
|
||||
sections::
|
||||
|
||||
+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Header |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| Device Description |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| |
|
||||
| Event Stream |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
The header contains metadata describing the recording. The device description
|
||||
contains information about input devices involved in this recording. Finally,
|
||||
the event stream contains the recorded input events.
|
||||
|
||||
All fields are either fixed size or prefixed with their length or the number of
|
||||
(fixed-sized) elements.
|
||||
|
||||
.. note:: All values below are little endian
|
||||
|
||||
|
||||
Recording Header
|
||||
----------------
|
||||
|
||||
An revent recoding header has the following structure
|
||||
|
||||
* It starts with the "magic" string ``REVENT`` to indicate that this is an
|
||||
revent recording.
|
||||
* The magic is followed by a 16 bit version number. This indicates the format
|
||||
version of the recording that follows. Current version is ``2``.
|
||||
* The next 16 bits indicate the type of the recording. This dictates the
|
||||
structure of the Device Description section. Valid values are:
|
||||
|
||||
``0``
|
||||
This is a general input event recording. The device description
|
||||
contains a list of paths from which the events where recorded.
|
||||
``1``
|
||||
This a gamepad recording. The device description contains the
|
||||
description of the gamepad used to create the recording.
|
||||
|
||||
* The header is zero-padded to 128 bits.
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 'R' | 'E' | 'V' | 'E' |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 'N' | 'T' | Version |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Mode | PADDING |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| PADDING |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Device Description
|
||||
------------------
|
||||
|
||||
This section describes the input devices used in the recording. Its structure is
|
||||
determined by the value of ``Mode`` field in the header.
|
||||
|
||||
general recording
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. note:: This is the only format supported prior to version ``2``.
|
||||
|
||||
The recording has been made from all available input devices. This section
|
||||
contains the list of ``/dev/input`` paths for the devices, prefixed with total
|
||||
number of the devices recorded.
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of devices |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| Device paths +-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Similarly, each device path is a length-prefixed string. Unlike C strings, the
|
||||
path is *not* NULL-terminated.
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of device path |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| Device path |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
gamepad recording
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
The recording has been made from a specific gamepad. All events in the stream
|
||||
will be for that device only. The section describes the device properties that
|
||||
will be used to create a virtual input device using ``/dev/uinput``. Please
|
||||
see ``linux/input.h`` header in the Linux kernel source for more information
|
||||
about the fields in this section.
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| bustype | vendor |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| product | version |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| name_length |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| name |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| ev_bits |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| |
|
||||
| key_bits (96 bytes) |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| |
|
||||
| rel_bits (96 bytes) |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| |
|
||||
| abs_bits (96 bytes) |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| num_absinfo |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
| absinfo entries |
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Each ``absinfo`` entry consists of six 32 bit values. The number of entries is
|
||||
determined by the ``abs_bits`` field.
|
||||
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| value |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| minimum |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| maximum |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| fuzz |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| flat |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| resolution |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Event structure
|
||||
---------------
|
||||
|
||||
The majority of an revent recording will be made up of the input events that were
|
||||
recorded. The event stream is prefixed with the number of events in the stream.
|
||||
|
||||
Each event entry structured as follows:
|
||||
|
||||
* An unsigned integer representing which device from the list of device paths
|
||||
this event is for (zero indexed). E.g. Device ID = 3 would be the 4th
|
||||
device in the list of device paths.
|
||||
* A signed integer representing the number of seconds since "epoch" when the
|
||||
event was recorded.
|
||||
* A signed integer representing the microseconds part of the timestamp.
|
||||
* An unsigned integer representing the event type
|
||||
* An unsigned integer representing the event code
|
||||
* An unsigned integer representing the event value
|
||||
|
||||
For more information about the event type, code and value please read:
|
||||
https://www.kernel.org/doc/Documentation/input/event-codes.txt
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Device ID | Timestamp Seconds |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Timestamp Seconds (cont.) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Timestamp Seconds (cont.) | stamp Micoseconds |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Timestamp Micoseconds (cont.) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Timestamp Micoseconds (cont.) | Event Type |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Event Code | Event Value |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Event Value (cont.) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Parser
|
||||
------
|
||||
|
||||
WA has a parser for revent recordings. This can be used to work with revent
|
||||
recordings in scripts. Here is an example:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from wlauto.utils.revent import ReventRecording
|
||||
|
||||
with ReventRecording('/path/to/recording.revent') as recording:
|
||||
print "Recording: {}".format(recording.filepath)
|
||||
print "There are {} input events".format(recording.num_events)
|
||||
print "Over a total of {} seconds".format(recording.duration)
|
||||
|
@@ -161,8 +161,8 @@ In order for the binary to be obtained in this way, it must be stored in one of
|
||||
the locations scanned by the resource resolver in a directry structure
|
||||
``<root>/bin/<abi>/<binary>`` (where ``root`` is the base resource location to
|
||||
be searched, e.g. ``~/.workload_automation/depencencies/<extension name>``, and
|
||||
``<abi>`` is the ABI for which the exectuable has been compiled, as returned by
|
||||
``self.device.abi``).
|
||||
``<abi>`` is the ABI for which the exectuable has been compiled, as returned by
|
||||
``self.device.abi``).
|
||||
|
||||
Once the path to the host-side binary has been obtained, it may be deployed using
|
||||
one of two methods of a ``Device`` instace -- ``install`` or ``install_if_needed``.
|
||||
@@ -182,8 +182,8 @@ WA and will not try to re-install.
|
||||
|
||||
Both of the above methods will return the path to the installed binary on the
|
||||
device. The executable should be invoked *only* via that path; do **not** assume
|
||||
that it will be in ``PATH`` on the target (or that the executable with the same
|
||||
name in ``PATH`` is the version deployed by WA.
|
||||
that it will be in ``PATH`` on the target (or that the executable with the same
|
||||
name in ``PATH`` is the version deployed by WA.
|
||||
|
||||
.. code:: python
|
||||
|
||||
@@ -964,7 +964,7 @@ that accompanies them, in addition to what has been outlined here, should
|
||||
provide enough guidance.
|
||||
|
||||
:commands: This allows extending WA with additional sub-commands (to supplement
|
||||
exiting ones outlined in the :ref:`invocation <invocation>` section).
|
||||
exiting ones outlined in the :ref:`invocation` section).
|
||||
:modules: Modules are "extensions for extensions". They can be loaded by other
|
||||
extensions to expand their functionality (for example, a flashing
|
||||
module maybe loaded by a device in order to support flashing).
|
||||
|
6
setup.py
6
setup.py
@@ -66,7 +66,7 @@ params = dict(
|
||||
packages=packages,
|
||||
package_data=data_files,
|
||||
scripts=scripts,
|
||||
url='http://github.com/arm-sowftware/workload-automation',
|
||||
url='N/A',
|
||||
license='Apache v2',
|
||||
maintainer='ARM Architecture & Technology Device Lab',
|
||||
maintainer_email='workload-automation@arm.com',
|
||||
@@ -76,11 +76,11 @@ params = dict(
|
||||
'pyserial', # Serial port interface
|
||||
'colorama', # Printing with colors
|
||||
'pyYAML', # YAML-formatted agenda parsing
|
||||
'requests', # Fetch assets over HTTP
|
||||
'requests', # Fetch assets over HTTP
|
||||
'devlib', # Interacting with devices
|
||||
],
|
||||
extras_require={
|
||||
'other': ['jinja2', 'pandas>=0.13.1'],
|
||||
'statedetect': ['numpy', 'imutils', 'opencv-python'],
|
||||
'test': ['nose'],
|
||||
'mongodb': ['pymongo'],
|
||||
'notify': ['notify2'],
|
||||
|
@@ -14,7 +14,7 @@
|
||||
#
|
||||
|
||||
from wlauto.core.bootstrap import settings # NOQA
|
||||
from wlauto.core.device import Device, RuntimeParameter, CoreParameter # NOQA
|
||||
from wlauto.core.device_manager import DeviceManager, RuntimeParameter, CoreParameter # NOQA
|
||||
from wlauto.core.command import Command # NOQA
|
||||
from wlauto.core.workload import Workload # NOQA
|
||||
from wlauto.core.extension import Module, Parameter, Artifact, Alias # NOQA
|
||||
@@ -25,11 +25,9 @@ from wlauto.core.resource import ResourceGetter, Resource, GetterPriority, NO_ON
|
||||
from wlauto.core.exttype import get_extension_type # NOQA Note: MUST be imported after other core imports.
|
||||
|
||||
from wlauto.common.resources import File, ExtensionAsset, Executable
|
||||
from wlauto.common.linux.device import LinuxDevice # NOQA
|
||||
from wlauto.common.android.device import AndroidDevice, BigLittleDevice # NOQA
|
||||
from wlauto.common.android.resources import ApkFile, JarFile
|
||||
from wlauto.common.android.workload import (UiAutomatorWorkload, ApkWorkload, AndroidBenchmark, # NOQA
|
||||
AndroidUiAutoBenchmark, AndroidUxPerfWorkload, GameWorkload) # NOQA
|
||||
AndroidUiAutoBenchmark, GameWorkload) # NOQA
|
||||
|
||||
from wlauto.core.version import get_wa_version
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# This agenda specifies configuration that may be used for regression runs
|
||||
# on big.LITTLE systems. This agenda will work with a TC2 device configured
|
||||
# as described in the documentation.
|
||||
# on big.LITTLE systems. This agenda will with a TC2 device configured as
|
||||
# described in the documentation.
|
||||
config:
|
||||
device: tc2
|
||||
run_name: big.LITTLE_regression
|
||||
@@ -69,7 +69,7 @@ workloads:
|
||||
- id: b10
|
||||
name: smartbench
|
||||
- id: b11
|
||||
name: sqlitebm
|
||||
name: sqlite
|
||||
- id: b12
|
||||
name: vellamo
|
||||
|
||||
|
@@ -12,3 +12,5 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
|
@@ -22,6 +22,7 @@ import textwrap
|
||||
import argparse
|
||||
import shutil
|
||||
import getpass
|
||||
import subprocess
|
||||
from collections import OrderedDict
|
||||
|
||||
import yaml
|
||||
@@ -29,9 +30,8 @@ import yaml
|
||||
from wlauto import ExtensionLoader, Command, settings
|
||||
from wlauto.exceptions import CommandError, ConfigError
|
||||
from wlauto.utils.cli import init_argument_parser
|
||||
from wlauto.utils.misc import (capitalize,
|
||||
ensure_file_directory_exists as _f,
|
||||
ensure_directory_exists as _d)
|
||||
from wlauto.utils.misc import (capitalize, check_output,
|
||||
ensure_file_directory_exists as _f, ensure_directory_exists as _d)
|
||||
from wlauto.utils.types import identifier
|
||||
from wlauto.utils.doc import format_body
|
||||
|
||||
@@ -41,6 +41,20 @@ __all__ = ['create_workload']
|
||||
|
||||
TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates')
|
||||
|
||||
UIAUTO_BUILD_SCRIPT = """#!/bin/bash
|
||||
|
||||
class_dir=bin/classes/com/arm/wlauto/uiauto
|
||||
base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"`
|
||||
mkdir -p $$class_dir
|
||||
cp $$base_class $$class_dir
|
||||
|
||||
ant build
|
||||
|
||||
if [[ -f bin/${package_name}.jar ]]; then
|
||||
cp bin/${package_name}.jar ..
|
||||
fi
|
||||
"""
|
||||
|
||||
|
||||
class CreateSubcommand(object):
|
||||
|
||||
@@ -307,7 +321,7 @@ def create_basic_workload(path, name, class_name):
|
||||
|
||||
|
||||
def create_uiautomator_workload(path, name, class_name):
|
||||
uiauto_path = os.path.join(path, 'uiauto')
|
||||
uiauto_path = _d(os.path.join(path, 'uiauto'))
|
||||
create_uiauto_project(uiauto_path, name)
|
||||
source_file = os.path.join(path, '__init__.py')
|
||||
with open(source_file, 'w') as wfh:
|
||||
@@ -321,34 +335,37 @@ def create_android_benchmark(path, name, class_name):
|
||||
|
||||
|
||||
def create_android_uiauto_benchmark(path, name, class_name):
|
||||
uiauto_path = os.path.join(path, 'uiauto')
|
||||
uiauto_path = _d(os.path.join(path, 'uiauto'))
|
||||
create_uiauto_project(uiauto_path, name)
|
||||
source_file = os.path.join(path, '__init__.py')
|
||||
with open(source_file, 'w') as wfh:
|
||||
wfh.write(render_template('android_uiauto_benchmark', {'name': name, 'class_name': class_name}))
|
||||
|
||||
|
||||
def create_uiauto_project(path, name):
|
||||
def create_uiauto_project(path, name, target='1'):
|
||||
sdk_path = get_sdk_path()
|
||||
android_path = os.path.join(sdk_path, 'tools', 'android')
|
||||
package_name = 'com.arm.wlauto.uiauto.' + name.lower()
|
||||
|
||||
shutil.copytree(os.path.join(TEMPLATES_DIR, 'uiauto_template'), path)
|
||||
|
||||
manifest_path = os.path.join(path, 'app', 'src', 'main')
|
||||
mainifest = os.path.join(_d(manifest_path), 'AndroidManifest.xml')
|
||||
with open(mainifest, 'w') as wfh:
|
||||
wfh.write(render_template('uiauto_AndroidManifest.xml', {'package_name': package_name}))
|
||||
|
||||
build_gradle_path = os.path.join(path, 'app')
|
||||
build_gradle = os.path.join(_d(build_gradle_path), 'build.gradle')
|
||||
with open(build_gradle, 'w') as wfh:
|
||||
wfh.write(render_template('uiauto_build.gradle', {'package_name': package_name}))
|
||||
# ${ANDROID_HOME}/tools/android create uitest-project -n com.arm.wlauto.uiauto.linpack -t 1 -p ../test2
|
||||
command = '{} create uitest-project --name {} --target {} --path {}'.format(android_path,
|
||||
package_name,
|
||||
target,
|
||||
path)
|
||||
try:
|
||||
check_output(command, shell=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if 'is is not valid' in e.output:
|
||||
message = 'No Android SDK target found; have you run "{} update sdk" and download a platform?'
|
||||
raise CommandError(message.format(android_path))
|
||||
|
||||
build_script = os.path.join(path, 'build.sh')
|
||||
with open(build_script, 'w') as wfh:
|
||||
wfh.write(render_template('uiauto_build_script', {'package_name': package_name}))
|
||||
template = string.Template(UIAUTO_BUILD_SCRIPT)
|
||||
wfh.write(template.substitute({'package_name': package_name}))
|
||||
os.chmod(build_script, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
|
||||
|
||||
source_file = _f(os.path.join(path, 'app', 'src', 'main', 'java',
|
||||
source_file = _f(os.path.join(path, 'src',
|
||||
os.sep.join(package_name.split('.')[:-1]),
|
||||
'UiAutomation.java'))
|
||||
with open(source_file, 'w') as wfh:
|
||||
|
@@ -1,122 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
|
||||
from requests import ConnectionError, RequestException
|
||||
|
||||
from wlauto import File, ExtensionLoader, Command, settings
|
||||
from wlauto.core.extension import Extension
|
||||
|
||||
|
||||
REMOTE_ASSETS_URL = 'https://github.com/ARM-software/workload-automation-assets/raw/master/dependencies'
|
||||
|
||||
|
||||
class GetAssetsCommand(Command):
|
||||
name = 'get-assets'
|
||||
description = '''
|
||||
This command downloads external extension dependencies used by Workload Automation.
|
||||
Works by first downloading a directory index of the assets, then iterating through
|
||||
it to get assets for the specified extensions.
|
||||
'''
|
||||
|
||||
# Uses config setting if available otherwise defaults to ARM-software repo
|
||||
# Can be overriden with the --url argument
|
||||
assets_url = settings.remote_assets_url or REMOTE_ASSETS_URL
|
||||
|
||||
def initialize(self, context):
|
||||
self.parser.add_argument('-f', '--force', action='store_true',
|
||||
help='Always fetch the assets, even if matching versions exist in local cache.')
|
||||
self.parser.add_argument('--url', metavar='URL', type=not_empty, default=self.assets_url,
|
||||
help='''The location from which to download the files. If not provided,
|
||||
config setting ``remote_assets_url`` will be used if available, else
|
||||
uses the default REMOTE_ASSETS_URL parameter in the script.''')
|
||||
group = self.parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument('-a', '--all', action='store_true',
|
||||
help='Download assets for all extensions found in the index. Cannot be used with -e.')
|
||||
group.add_argument('-e', dest='exts', metavar='EXT', nargs='+', type=not_empty,
|
||||
help='One or more extensions whose assets to download. Cannot be used with --all.')
|
||||
|
||||
def execute(self, args):
|
||||
self.logger.debug('Program arguments: {}'.format(vars(args)))
|
||||
if args.force:
|
||||
self.logger.info('Force-download of assets requested')
|
||||
if not args.url:
|
||||
self.logger.debug('URL not provided, falling back to default setting in config')
|
||||
self.logger.info('Downloading external assets from {}'.format(args.url))
|
||||
|
||||
# Get file index of assets
|
||||
ext_loader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths)
|
||||
getter = ext_loader.get_resource_getter('http_assets', None, url=args.url, always_fetch=args.force)
|
||||
try:
|
||||
getter.index = getter.fetch_index()
|
||||
except (ConnectionError, RequestException) as e:
|
||||
self.exit_with_error(str(e))
|
||||
all_assets = dict()
|
||||
for k, v in getter.index.iteritems():
|
||||
all_assets[str(k)] = [str(asset['path']) for asset in v]
|
||||
|
||||
# Here we get a list of all extensions present in the current WA installation,
|
||||
# and cross-check that against the list of extensions whose assets are requested.
|
||||
# The aim is to avoid downloading assets for extensions that do not exist, since
|
||||
# WA extensions and asset index can be updated independently and go out of sync.
|
||||
all_extensions = [ext.name for ext in ext_loader.list_extensions()]
|
||||
assets_to_get = set(all_assets).intersection(all_extensions)
|
||||
if args.exts:
|
||||
assets_to_get = assets_to_get.intersection(args.exts)
|
||||
# Check list is not empty
|
||||
if not assets_to_get:
|
||||
if args.all:
|
||||
self.exit_with_error('Could not find extensions: {}'.format(', '.join(all_assets.keys())))
|
||||
else: # args.exts
|
||||
self.exit_with_error('Asset index has no entries for: {}'.format(', '.join(args.exts)))
|
||||
|
||||
# Check out of sync extensions i.e. do not exist in both WA and assets index
|
||||
missing = set(all_assets).difference(all_extensions) | set(args.exts or []).difference(all_assets)
|
||||
if missing:
|
||||
self.logger.warning('Not getting assets for missing extensions: {}'.format(', '.join(missing)))
|
||||
|
||||
# Ideally the extension loader would be used to instantiate, but it does full
|
||||
# validation of the extension, like checking connected devices or supported
|
||||
# platform(s). This info might be unavailable and is not required to download
|
||||
# assets, since they are classified by extension name alone. So instead we use
|
||||
# a simple subclass of ``Extension`` providing a valid ``name`` attribute.
|
||||
for ext_name in assets_to_get:
|
||||
owner = _instantiate(NamedExtension, ext_name)
|
||||
self.logger.info('Getting assets for: {}'.format(ext_name))
|
||||
for asset in all_assets[ext_name]:
|
||||
getter.get(File(owner, asset)) # Download the files
|
||||
|
||||
def exit_with_error(self, message, code=1):
|
||||
self.logger.error(message)
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
class NamedExtension(Extension):
|
||||
def __init__(self, name, **kwargs):
|
||||
super(NamedExtension, self).__init__(**kwargs)
|
||||
self.name = name
|
||||
|
||||
|
||||
def not_empty(val):
|
||||
if val:
|
||||
return val
|
||||
else:
|
||||
raise argparse.ArgumentTypeError('Extension name cannot be blank')
|
||||
|
||||
|
||||
def _instantiate(cls, *args, **kwargs):
|
||||
return cls(*args, **kwargs)
|
@@ -15,8 +15,6 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
from math import ceil
|
||||
|
||||
from wlauto import ExtensionLoader, Command, settings
|
||||
from wlauto.common.resources import Executable
|
||||
@@ -24,10 +22,40 @@ from wlauto.core.resource import NO_ONE
|
||||
from wlauto.core.resolver import ResourceResolver
|
||||
from wlauto.core.configuration import RunConfiguration
|
||||
from wlauto.core.agenda import Agenda
|
||||
from wlauto.utils.revent import ReventRecording, GAMEPAD_MODE
|
||||
|
||||
|
||||
class ReventCommand(Command):
|
||||
class RecordCommand(Command):
|
||||
|
||||
name = 'record'
|
||||
description = '''Performs a revent recording
|
||||
|
||||
This command helps making revent recordings. It will automatically
|
||||
deploy revent and even has the option of automatically opening apps.
|
||||
|
||||
Revent allows you to record raw inputs such as screen swipes or button presses.
|
||||
This can be useful for recording inputs for workloads such as games that don't
|
||||
have XML UI layouts that can be used with UIAutomator. As a drawback from this,
|
||||
revent recordings are specific to the device type they were recorded on.
|
||||
|
||||
WA uses two parts to the names of revent recordings in the format,
|
||||
{device_name}.{suffix}.revent.
|
||||
|
||||
- device_name can either be specified manually with the ``-d`` argument or
|
||||
it can be automatically determined. On Android device it will be obtained
|
||||
from ``build.prop``, on Linux devices it is obtained from ``/proc/device-tree/model``.
|
||||
- suffix is used by WA to determine which part of the app execution the
|
||||
recording is for, currently these are either ``setup`` or ``run``. This
|
||||
should be specified with the ``-s`` argument.
|
||||
'''
|
||||
|
||||
def initialize(self, context):
|
||||
self.context = context
|
||||
self.parser.add_argument('-d', '--device', help='The name of the device')
|
||||
self.parser.add_argument('-s', '--suffix', help='The suffix of the revent file, e.g. ``setup``')
|
||||
self.parser.add_argument('-o', '--output', help='Directory to save the recording in')
|
||||
self.parser.add_argument('-p', '--package', help='Package to launch before recording')
|
||||
self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it',
|
||||
action="store_true")
|
||||
|
||||
# Validate command options
|
||||
def validate_args(self, args):
|
||||
@@ -61,83 +89,21 @@ class ReventCommand(Command):
|
||||
self.device.initialize(context)
|
||||
|
||||
host_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent'))
|
||||
self.target_binary = self.device.install_executable(host_binary)
|
||||
self.target_binary = self.device.install_if_needed(host_binary)
|
||||
|
||||
self.run(args)
|
||||
|
||||
def run(self, args):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class RecordCommand(ReventCommand):
|
||||
|
||||
name = 'record'
|
||||
description = '''Performs a revent recording
|
||||
|
||||
This command helps create revent recordings. It will automatically
|
||||
deploy revent and even has the option of automatically opening apps.
|
||||
|
||||
Revent allows you to record raw inputs such as screen swipes or button presses.
|
||||
This can be useful for recording inputs for workloads such as games that don't
|
||||
have XML UI layouts that can be used with UIAutomator. As a drawback from this,
|
||||
revent recordings are specific to the device type they were recorded on.
|
||||
|
||||
WA uses two parts to form the names of revent recordings in the format,
|
||||
{device_name}.{suffix}.revent
|
||||
|
||||
- device_name can either be specified manually with the ``-d`` argument or
|
||||
else the name of the device will be automatically determined. On an Android device it is obtained
|
||||
from ``build.prop``, on Linux devices it is obtained from ``/proc/device-tree/model``.
|
||||
- suffix is used by WA to determine which part of the app execution the
|
||||
recording is for, currently either ``setup``, ``run`` or ``teardown``. This
|
||||
should be specified with the ``-s`` argument.
|
||||
|
||||
|
||||
**gamepad recording**
|
||||
|
||||
revent supports an alternative recording mode, where it will record events
|
||||
from a single gamepad device. In this mode, revent will store the
|
||||
description of this device as a part of the recording. When replaying such
|
||||
a recording, revent will first create a virtual gamepad using the
|
||||
description, and will replay the events into it, so a physical controller
|
||||
does not need to be connected on replay. Unlike standard revent recordings,
|
||||
recordings generated in this mode should be (to an extent) portable across
|
||||
different devices.
|
||||
|
||||
note:
|
||||
|
||||
- The device on which a recording is being made in gamepad mode, must have
|
||||
exactly one gamepad connected to it.
|
||||
- The device on which a gamepad recording is being replayed must have
|
||||
/dev/uinput enabled in the kernel (this interface is necessary to create
|
||||
virtual gamepad).
|
||||
|
||||
'''
|
||||
|
||||
def initialize(self, context):
|
||||
self.context = context
|
||||
self.parser.add_argument('-d', '--device', help='The name of the device')
|
||||
self.parser.add_argument('-s', '--suffix', help='The suffix of the revent file, e.g. ``setup``')
|
||||
self.parser.add_argument('-o', '--output', help='Directory to save the recording in')
|
||||
self.parser.add_argument('-p', '--package', help='Package to launch before recording')
|
||||
self.parser.add_argument('-g', '--gamepad', help='Record from a gamepad rather than all devices.',
|
||||
action="store_true")
|
||||
self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it',
|
||||
action="store_true")
|
||||
self.parser.add_argument('-S', '--capture-screen', help='Record a screen capture after recording',
|
||||
action="store_true")
|
||||
|
||||
def run(self, args):
|
||||
if args.device:
|
||||
device_name = args.device
|
||||
self.device_name = args.device
|
||||
else:
|
||||
device_name = self.device.get_device_model()
|
||||
self.device_name = self.device.get_device_model()
|
||||
|
||||
if args.suffix:
|
||||
args.suffix += "."
|
||||
|
||||
revent_file = self.device.path.join(self.device.working_directory,
|
||||
'{}.{}revent'.format(device_name, args.suffix or ""))
|
||||
'{}.{}revent'.format(self.device_name, args.suffix or ""))
|
||||
|
||||
if args.clear:
|
||||
self.device.execute("pm clear {}".format(args.package))
|
||||
@@ -148,24 +114,18 @@ class RecordCommand(ReventCommand):
|
||||
|
||||
self.logger.info("Press Enter when you are ready to record...")
|
||||
raw_input("")
|
||||
gamepad_flag = '-g ' if args.gamepad else ''
|
||||
command = "{} record {}-s {}".format(self.target_binary, gamepad_flag, revent_file)
|
||||
command = "{} record -t 100000 -s {}".format(self.target_binary, revent_file)
|
||||
self.device.kick_off(command)
|
||||
|
||||
self.logger.info("Press Enter when you have finished recording...")
|
||||
raw_input("")
|
||||
if args.capture_screen:
|
||||
self.logger.info("Recording screen capture")
|
||||
self.device.capture_screen(args.output or os.getcwdu())
|
||||
self.device.killall("revent", signal.SIGINT)
|
||||
self.logger.info("Waiting for revent to finish")
|
||||
while self.device.get_pids_of("revent"):
|
||||
pass
|
||||
self.device.killall("revent")
|
||||
|
||||
self.logger.info("Pulling files from device")
|
||||
self.device.pull_file(revent_file, args.output or os.getcwdu())
|
||||
self.device.pull(revent_file, args.output or os.getcwdu())
|
||||
|
||||
|
||||
class ReplayCommand(ReventCommand):
|
||||
class ReplayCommand(RecordCommand):
|
||||
|
||||
name = 'replay'
|
||||
description = '''Replay a revent recording
|
||||
@@ -184,7 +144,7 @@ class ReplayCommand(ReventCommand):
|
||||
# pylint: disable=W0201
|
||||
def run(self, args):
|
||||
self.logger.info("Pushing file to device")
|
||||
self.device.push_file(args.revent, self.device.working_directory)
|
||||
self.device.push(args.revent, self.device.working_directory)
|
||||
revent_file = self.device.path.join(self.device.working_directory, os.path.split(args.revent)[1])
|
||||
|
||||
if args.clear:
|
||||
@@ -194,13 +154,8 @@ class ReplayCommand(ReventCommand):
|
||||
self.logger.info("Starting {}".format(args.package))
|
||||
self.device.execute('monkey -p {} -c android.intent.category.LAUNCHER 1'.format(args.package))
|
||||
|
||||
self.logger.info("Replaying recording")
|
||||
command = "{} replay {}".format(self.target_binary, revent_file)
|
||||
recording = ReventRecording(args.revent)
|
||||
timeout = ceil(recording.duration) + 30
|
||||
recording.close()
|
||||
self.device.execute(command, timeout=timeout,
|
||||
as_root=(recording.mode == GAMEPAD_MODE))
|
||||
self.device.execute(command)
|
||||
self.logger.info("Finished replay")
|
||||
|
||||
|
||||
|
@@ -20,7 +20,6 @@ import shutil
|
||||
|
||||
import wlauto
|
||||
from wlauto import Command, settings
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.core.agenda import Agenda
|
||||
from wlauto.core.execution import Executor
|
||||
from wlauto.utils.log import add_log_file
|
||||
@@ -77,11 +76,6 @@ class RunCommand(Command):
|
||||
agenda = Agenda(args.agenda)
|
||||
settings.agenda = args.agenda
|
||||
shutil.copy(args.agenda, settings.meta_directory)
|
||||
|
||||
if len(agenda.workloads) == 0:
|
||||
raise ConfigError("No workloads specified")
|
||||
elif '.' in args.agenda or os.sep in args.agenda:
|
||||
raise ConfigError('Agenda "{}" does not exist.'.format(args.agenda))
|
||||
else:
|
||||
self.logger.debug('{} is not a file; assuming workload name.'.format(args.agenda))
|
||||
agenda = Agenda()
|
||||
|
@@ -111,3 +111,4 @@ def format_extension_parameters(extension, out, width, shift=4):
|
||||
param_texts.append(indent(param_text, shift))
|
||||
|
||||
out.write(format_column('\n'.join(param_texts), width))
|
||||
|
||||
|
@@ -2,30 +2,23 @@ package ${package_name};
|
||||
|
||||
import android.app.Activity;
|
||||
import android.os.Bundle;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import android.support.test.runner.AndroidJUnit4;
|
||||
|
||||
import android.util.Log;
|
||||
import android.view.KeyEvent;
|
||||
|
||||
// Import the uiautomator libraries
|
||||
import android.support.test.uiautomator.UiObject;
|
||||
import android.support.test.uiautomator.UiObjectNotFoundException;
|
||||
import android.support.test.uiautomator.UiScrollable;
|
||||
import android.support.test.uiautomator.UiSelector;
|
||||
|
||||
import com.android.uiautomator.core.UiObject;
|
||||
import com.android.uiautomator.core.UiObjectNotFoundException;
|
||||
import com.android.uiautomator.core.UiScrollable;
|
||||
import com.android.uiautomator.core.UiSelector;
|
||||
import com.android.uiautomator.testrunner.UiAutomatorTestCase;
|
||||
|
||||
import com.arm.wlauto.uiauto.BaseUiAutomation;
|
||||
|
||||
@RunWith(AndroidJUnit4.class)
|
||||
public class UiAutomation extends BaseUiAutomation {
|
||||
public class UiAutomation extends BaseUiAutomation {
|
||||
|
||||
public static String TAG = "${name}";
|
||||
|
||||
@Test
|
||||
public void runUiAutomation() throws Exception {
|
||||
initialize_instrumentation();
|
||||
public void runUiAutomation() throws Exception {
|
||||
// UI Automation code goes here
|
||||
}
|
||||
|
||||
|
@@ -14,7 +14,7 @@ class ${class_name}(AndroidBenchmark):
|
||||
|
||||
parameters = [
|
||||
# Workload parameters go here e.g.
|
||||
Parameter('example_parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
|
||||
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
|
||||
description='This is an example parameter')
|
||||
]
|
||||
|
||||
|
@@ -14,7 +14,7 @@ class ${class_name}(AndroidUiAutoBenchmark):
|
||||
|
||||
parameters = [
|
||||
# Workload parameters go here e.g.
|
||||
Parameter('example_parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
|
||||
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
|
||||
description='This is an example parameter')
|
||||
]
|
||||
|
||||
|
@@ -8,7 +8,7 @@ class ${class_name}(Workload):
|
||||
|
||||
parameters = [
|
||||
# Workload parameters go here e.g.
|
||||
Parameter('example_parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
|
||||
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
|
||||
description='This is an example parameter')
|
||||
]
|
||||
|
||||
|
@@ -1,12 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
package="${package_name}"
|
||||
android:versionCode="1"
|
||||
android:versionName="1.0">
|
||||
|
||||
|
||||
<instrumentation
|
||||
android:name="android.support.test.runner.AndroidJUnitRunner"
|
||||
android:targetPackage="${package_name}"/>
|
||||
|
||||
</manifest>
|
@@ -1,33 +0,0 @@
|
||||
apply plugin: 'com.android.application'
|
||||
|
||||
android {
|
||||
compileSdkVersion 18
|
||||
buildToolsVersion '25.0.0'
|
||||
defaultConfig {
|
||||
applicationId "${package_name}"
|
||||
minSdkVersion 18
|
||||
targetSdkVersion 25
|
||||
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
|
||||
}
|
||||
buildTypes {
|
||||
applicationVariants.all { variant ->
|
||||
variant.outputs.each { output ->
|
||||
output.outputFile = file("$$project.buildDir/apk/${package_name}.apk")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile fileTree(include: ['*.jar'], dir: 'libs')
|
||||
compile 'com.android.support.test:runner:0.5'
|
||||
compile 'com.android.support.test:rules:0.5'
|
||||
compile 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2'
|
||||
compile(name: 'uiauto', ext: 'aar')
|
||||
}
|
||||
|
||||
repositories {
|
||||
flatDir {
|
||||
dirs 'libs'
|
||||
}
|
||||
}
|
@@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# CD into build dir if possible - allows building from any directory
|
||||
script_path='.'
|
||||
if `readlink -f $$0 &>/dev/null`; then
|
||||
script_path=`readlink -f $$0 2>/dev/null`
|
||||
fi
|
||||
script_dir=`dirname $$script_path`
|
||||
cd $$script_dir
|
||||
|
||||
# Ensure gradelw exists before starting
|
||||
if [[ ! -f gradlew ]]; then
|
||||
echo 'gradlew file not found! Check that you are in the right directory.'
|
||||
exit 9
|
||||
fi
|
||||
|
||||
# Copy base class library from wlauto dist
|
||||
libs_dir=app/libs
|
||||
base_classes=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', '*.aar')"`
|
||||
mkdir -p $$libs_dir
|
||||
cp $$base_classes $$libs_dir
|
||||
|
||||
# Build and return appropriate exit code if failed
|
||||
# gradle build
|
||||
./gradlew clean :app:assembleDebug
|
||||
exit_code=$$?
|
||||
if [[ $$exit_code -ne 0 ]]; then
|
||||
echo "ERROR: 'gradle build' exited with code $$exit_code"
|
||||
exit $$exit_code
|
||||
fi
|
||||
|
||||
# If successful move APK file to workload folder (overwrite previous)
|
||||
rm -f ../$package_name
|
||||
if [[ -f app/build/apk/$package_name.apk ]]; then
|
||||
cp app/build/apk/$package_name.apk ../$package_name.apk
|
||||
else
|
||||
echo 'ERROR: UiAutomator apk could not be found!'
|
||||
exit 9
|
||||
fi
|
@@ -1,23 +0,0 @@
|
||||
// Top-level build file where you can add configuration options common to all sub-projects/modules.
|
||||
|
||||
buildscript {
|
||||
repositories {
|
||||
jcenter()
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:2.3.1'
|
||||
|
||||
// NOTE: Do not place your application dependencies here; they belong
|
||||
// in the individual module build.gradle files
|
||||
}
|
||||
}
|
||||
|
||||
allprojects {
|
||||
repositories {
|
||||
jcenter()
|
||||
}
|
||||
}
|
||||
|
||||
task clean(type: Delete) {
|
||||
delete rootProject.buildDir
|
||||
}
|
Binary file not shown.
@@ -1,6 +0,0 @@
|
||||
#Wed May 03 15:42:44 BST 2017
|
||||
distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-3.3-all.zip
|
160
wlauto/commands/templates/uiauto_template/gradlew
vendored
160
wlauto/commands/templates/uiauto_template/gradlew
vendored
@@ -1,160 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
##############################################################################
|
||||
##
|
||||
## Gradle start up script for UN*X
|
||||
##
|
||||
##############################################################################
|
||||
|
||||
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
|
||||
DEFAULT_JVM_OPTS=""
|
||||
|
||||
APP_NAME="Gradle"
|
||||
APP_BASE_NAME=`basename "$0"`
|
||||
|
||||
# Use the maximum available, or set MAX_FD != -1 to use that value.
|
||||
MAX_FD="maximum"
|
||||
|
||||
warn ( ) {
|
||||
echo "$*"
|
||||
}
|
||||
|
||||
die ( ) {
|
||||
echo
|
||||
echo "$*"
|
||||
echo
|
||||
exit 1
|
||||
}
|
||||
|
||||
# OS specific support (must be 'true' or 'false').
|
||||
cygwin=false
|
||||
msys=false
|
||||
darwin=false
|
||||
case "`uname`" in
|
||||
CYGWIN* )
|
||||
cygwin=true
|
||||
;;
|
||||
Darwin* )
|
||||
darwin=true
|
||||
;;
|
||||
MINGW* )
|
||||
msys=true
|
||||
;;
|
||||
esac
|
||||
|
||||
# Attempt to set APP_HOME
|
||||
# Resolve links: $0 may be a link
|
||||
PRG="$0"
|
||||
# Need this for relative symlinks.
|
||||
while [ -h "$PRG" ] ; do
|
||||
ls=`ls -ld "$PRG"`
|
||||
link=`expr "$ls" : '.*-> \(.*\)$'`
|
||||
if expr "$link" : '/.*' > /dev/null; then
|
||||
PRG="$link"
|
||||
else
|
||||
PRG=`dirname "$PRG"`"/$link"
|
||||
fi
|
||||
done
|
||||
SAVED="`pwd`"
|
||||
cd "`dirname \"$PRG\"`/" >/dev/null
|
||||
APP_HOME="`pwd -P`"
|
||||
cd "$SAVED" >/dev/null
|
||||
|
||||
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
|
||||
|
||||
# Determine the Java command to use to start the JVM.
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
|
||||
|
||||
Please set the JAVA_HOME variable in your environment to match the
|
||||
location of your Java installation."
|
||||
fi
|
||||
else
|
||||
JAVACMD="java"
|
||||
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
|
||||
|
||||
Please set the JAVA_HOME variable in your environment to match the
|
||||
location of your Java installation."
|
||||
fi
|
||||
|
||||
# Increase the maximum file descriptors if we can.
|
||||
if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
|
||||
MAX_FD_LIMIT=`ulimit -H -n`
|
||||
if [ $? -eq 0 ] ; then
|
||||
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
|
||||
MAX_FD="$MAX_FD_LIMIT"
|
||||
fi
|
||||
ulimit -n $MAX_FD
|
||||
if [ $? -ne 0 ] ; then
|
||||
warn "Could not set maximum file descriptor limit: $MAX_FD"
|
||||
fi
|
||||
else
|
||||
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
|
||||
fi
|
||||
fi
|
||||
|
||||
# For Darwin, add options to specify how the application appears in the dock
|
||||
if $darwin; then
|
||||
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
|
||||
fi
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin ; then
|
||||
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
|
||||
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
|
||||
JAVACMD=`cygpath --unix "$JAVACMD"`
|
||||
|
||||
# We build the pattern for arguments to be converted via cygpath
|
||||
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
|
||||
SEP=""
|
||||
for dir in $ROOTDIRSRAW ; do
|
||||
ROOTDIRS="$ROOTDIRS$SEP$dir"
|
||||
SEP="|"
|
||||
done
|
||||
OURCYGPATTERN="(^($ROOTDIRS))"
|
||||
# Add a user-defined pattern to the cygpath arguments
|
||||
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
|
||||
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
|
||||
fi
|
||||
# Now convert the arguments - kludge to limit ourselves to /bin/sh
|
||||
i=0
|
||||
for arg in "$@" ; do
|
||||
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
|
||||
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
|
||||
|
||||
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
|
||||
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
|
||||
else
|
||||
eval `echo args$i`="\"$arg\""
|
||||
fi
|
||||
i=$((i+1))
|
||||
done
|
||||
case $i in
|
||||
(0) set -- ;;
|
||||
(1) set -- "$args0" ;;
|
||||
(2) set -- "$args0" "$args1" ;;
|
||||
(3) set -- "$args0" "$args1" "$args2" ;;
|
||||
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
|
||||
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
|
||||
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
|
||||
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
|
||||
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
|
||||
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
|
||||
function splitJvmOpts() {
|
||||
JVM_OPTS=("$@")
|
||||
}
|
||||
eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
|
||||
JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
|
||||
|
||||
exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
|
@@ -1,90 +0,0 @@
|
||||
@if "%DEBUG%" == "" @echo off
|
||||
@rem ##########################################################################
|
||||
@rem
|
||||
@rem Gradle startup script for Windows
|
||||
@rem
|
||||
@rem ##########################################################################
|
||||
|
||||
@rem Set local scope for the variables with windows NT shell
|
||||
if "%OS%"=="Windows_NT" setlocal
|
||||
|
||||
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
|
||||
set DEFAULT_JVM_OPTS=
|
||||
|
||||
set DIRNAME=%~dp0
|
||||
if "%DIRNAME%" == "" set DIRNAME=.
|
||||
set APP_BASE_NAME=%~n0
|
||||
set APP_HOME=%DIRNAME%
|
||||
|
||||
@rem Find java.exe
|
||||
if defined JAVA_HOME goto findJavaFromJavaHome
|
||||
|
||||
set JAVA_EXE=java.exe
|
||||
%JAVA_EXE% -version >NUL 2>&1
|
||||
if "%ERRORLEVEL%" == "0" goto init
|
||||
|
||||
echo.
|
||||
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
|
||||
echo.
|
||||
echo Please set the JAVA_HOME variable in your environment to match the
|
||||
echo location of your Java installation.
|
||||
|
||||
goto fail
|
||||
|
||||
:findJavaFromJavaHome
|
||||
set JAVA_HOME=%JAVA_HOME:"=%
|
||||
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
|
||||
|
||||
if exist "%JAVA_EXE%" goto init
|
||||
|
||||
echo.
|
||||
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
|
||||
echo.
|
||||
echo Please set the JAVA_HOME variable in your environment to match the
|
||||
echo location of your Java installation.
|
||||
|
||||
goto fail
|
||||
|
||||
:init
|
||||
@rem Get command-line arguments, handling Windowz variants
|
||||
|
||||
if not "%OS%" == "Windows_NT" goto win9xME_args
|
||||
if "%@eval[2+2]" == "4" goto 4NT_args
|
||||
|
||||
:win9xME_args
|
||||
@rem Slurp the command line arguments.
|
||||
set CMD_LINE_ARGS=
|
||||
set _SKIP=2
|
||||
|
||||
:win9xME_args_slurp
|
||||
if "x%~1" == "x" goto execute
|
||||
|
||||
set CMD_LINE_ARGS=%*
|
||||
goto execute
|
||||
|
||||
:4NT_args
|
||||
@rem Get arguments from the 4NT Shell from JP Software
|
||||
set CMD_LINE_ARGS=%$
|
||||
|
||||
:execute
|
||||
@rem Setup the command line
|
||||
|
||||
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
|
||||
|
||||
@rem Execute Gradle
|
||||
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
|
||||
|
||||
:end
|
||||
@rem End local scope for the variables with windows NT shell
|
||||
if "%ERRORLEVEL%"=="0" goto mainEnd
|
||||
|
||||
:fail
|
||||
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
|
||||
rem the _cmd.exe /c_ return code!
|
||||
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
|
||||
exit /b 1
|
||||
|
||||
:mainEnd
|
||||
if "%OS%"=="Windows_NT" endlocal
|
||||
|
||||
:omega
|
@@ -1 +0,0 @@
|
||||
include ':app'
|
@@ -8,7 +8,7 @@ class ${class_name}(UiAutomatorWorkload):
|
||||
|
||||
parameters = [
|
||||
# Workload parameters go here e.g.
|
||||
Parameter('example_parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
|
||||
Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False,
|
||||
description='This is an example parameter')
|
||||
]
|
||||
|
||||
|
@@ -12,3 +12,5 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
|
BIN
wlauto/common/android/BaseUiAutomation.class
Normal file
BIN
wlauto/common/android/BaseUiAutomation.class
Normal file
Binary file not shown.
@@ -12,3 +12,5 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
|
@@ -1,856 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# pylint: disable=E1101
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import time
|
||||
import tempfile
|
||||
import shutil
|
||||
import threading
|
||||
import json
|
||||
import xml.dom.minidom
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
from wlauto.core.extension import Parameter
|
||||
from wlauto.common.resources import Executable
|
||||
from wlauto.core.resource import NO_ONE
|
||||
from wlauto.common.linux.device import BaseLinuxDevice, PsEntry
|
||||
from wlauto.exceptions import DeviceError, WorkerThreadError, TimeoutError, DeviceNotRespondingError
|
||||
from wlauto.utils.misc import convert_new_lines, ABI_MAP, commonprefix
|
||||
from wlauto.utils.types import boolean, regex
|
||||
from wlauto.utils.android import (adb_shell, adb_background_shell, adb_list_devices,
|
||||
adb_command, AndroidProperties, ANDROID_VERSION_MAP)
|
||||
|
||||
|
||||
SCREEN_STATE_REGEX = re.compile('(?:mPowerState|mScreenOn|Display Power: state)=([0-9]+|true|false|ON|OFF)', re.I)
|
||||
SCREEN_SIZE_REGEX = re.compile(r'mUnrestrictedScreen=\(\d+,\d+\)\s+(?P<width>\d+)x(?P<height>\d+)')
|
||||
|
||||
|
||||
class AndroidDevice(BaseLinuxDevice): # pylint: disable=W0223
|
||||
"""
|
||||
Device running Android OS.
|
||||
|
||||
"""
|
||||
|
||||
platform = 'android'
|
||||
|
||||
parameters = [
|
||||
Parameter('adb_name',
|
||||
description='The unique ID of the device as output by "adb devices".'),
|
||||
Parameter('android_prompt', kind=regex, default=re.compile('^.*(shell|root)@.*:/\S* [#$] ', re.MULTILINE),
|
||||
description='The format of matching the shell prompt in Android.'),
|
||||
Parameter('working_directory', default='/sdcard/wa-working', override=True),
|
||||
Parameter('binaries_directory', default='/data/local/tmp/wa-bin', override=True,
|
||||
description='Location of binaries on the device.'),
|
||||
Parameter('package_data_directory', default='/data/data',
|
||||
description='Location of of data for an installed package (APK).'),
|
||||
Parameter('external_storage_directory', default='/sdcard',
|
||||
description='Mount point for external storage.'),
|
||||
Parameter('connection', default='usb', allowed_values=['usb', 'ethernet'],
|
||||
description='Specified the nature of adb connection.'),
|
||||
Parameter('logcat_poll_period', kind=int,
|
||||
description="""
|
||||
If specified and is not ``0``, logcat will be polled every
|
||||
``logcat_poll_period`` seconds, and buffered on the host. This
|
||||
can be used if a lot of output is expected in logcat and the fixed
|
||||
logcat buffer on the device is not big enough. The trade off is that
|
||||
this introduces some minor runtime overhead. Not set by default.
|
||||
"""),
|
||||
Parameter('enable_screen_check', kind=boolean, default=False,
|
||||
description="""
|
||||
Specified whether the device should make sure that the screen is on
|
||||
during initialization.
|
||||
"""),
|
||||
Parameter('swipe_to_unlock', kind=str, default=None,
|
||||
allowed_values=[None, "horizontal", "vertical"],
|
||||
description="""
|
||||
If set a swipe of the specified direction will be performed.
|
||||
This should unlock the screen.
|
||||
"""),
|
||||
]
|
||||
|
||||
default_timeout = 30
|
||||
delay = 2
|
||||
long_delay = 3 * delay
|
||||
ready_timeout = 60
|
||||
|
||||
# Overwritten from Device. For documentation, see corresponding method in
|
||||
# Device.
|
||||
|
||||
@property
|
||||
def is_rooted(self):
|
||||
if self._is_rooted is None:
|
||||
try:
|
||||
result = adb_shell(self.adb_name, 'su', timeout=1)
|
||||
if 'not found' in result:
|
||||
self._is_rooted = False
|
||||
else:
|
||||
self._is_rooted = True
|
||||
except TimeoutError:
|
||||
self._is_rooted = True
|
||||
except DeviceError:
|
||||
self._is_rooted = False
|
||||
return self._is_rooted
|
||||
|
||||
@property
|
||||
def abi(self):
|
||||
val = self.getprop()['ro.product.cpu.abi'].split('-')[0]
|
||||
for abi, architectures in ABI_MAP.iteritems():
|
||||
if val in architectures:
|
||||
return abi
|
||||
return val
|
||||
|
||||
@property
|
||||
def supported_abi(self):
|
||||
props = self.getprop()
|
||||
result = [props['ro.product.cpu.abi']]
|
||||
if 'ro.product.cpu.abi2' in props:
|
||||
result.append(props['ro.product.cpu.abi2'])
|
||||
if 'ro.product.cpu.abilist' in props:
|
||||
for abi in props['ro.product.cpu.abilist'].split(','):
|
||||
if abi not in result:
|
||||
result.append(abi)
|
||||
|
||||
mapped_result = []
|
||||
for supported_abi in result:
|
||||
for abi, architectures in ABI_MAP.iteritems():
|
||||
found = False
|
||||
if supported_abi in architectures and abi not in mapped_result:
|
||||
mapped_result.append(abi)
|
||||
found = True
|
||||
break
|
||||
if not found and supported_abi not in mapped_result:
|
||||
mapped_result.append(supported_abi)
|
||||
return mapped_result
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(AndroidDevice, self).__init__(**kwargs)
|
||||
self._logcat_poller = None
|
||||
|
||||
def reset(self):
|
||||
self._is_ready = False
|
||||
self._just_rebooted = True
|
||||
adb_command(self.adb_name, 'reboot', timeout=self.default_timeout)
|
||||
|
||||
def hard_reset(self):
|
||||
super(AndroidDevice, self).hard_reset()
|
||||
self._is_ready = False
|
||||
self._just_rebooted = True
|
||||
|
||||
def boot(self, hard=False, **kwargs):
|
||||
if hard:
|
||||
self.hard_reset()
|
||||
else:
|
||||
self.reset()
|
||||
|
||||
def connect(self): # NOQA pylint: disable=R0912
|
||||
iteration_number = 0
|
||||
max_iterations = self.ready_timeout / self.delay
|
||||
available = False
|
||||
self.logger.debug('Polling for device {}...'.format(self.adb_name))
|
||||
while iteration_number < max_iterations:
|
||||
devices = adb_list_devices()
|
||||
if self.adb_name:
|
||||
for device in devices:
|
||||
if device.name == self.adb_name and device.status != 'offline':
|
||||
available = True
|
||||
else: # adb_name not set
|
||||
if len(devices) == 1:
|
||||
available = True
|
||||
elif len(devices) > 1:
|
||||
raise DeviceError('More than one device is connected and adb_name is not set.')
|
||||
|
||||
if available:
|
||||
break
|
||||
else:
|
||||
time.sleep(self.delay)
|
||||
iteration_number += 1
|
||||
else:
|
||||
raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name))
|
||||
|
||||
while iteration_number < max_iterations:
|
||||
available = (int('0' + (adb_shell(self.adb_name, 'getprop sys.boot_completed', timeout=self.default_timeout))) == 1)
|
||||
if available:
|
||||
break
|
||||
else:
|
||||
time.sleep(self.delay)
|
||||
iteration_number += 1
|
||||
else:
|
||||
raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name))
|
||||
|
||||
if self._just_rebooted:
|
||||
self.logger.debug('Waiting for boot to complete...')
|
||||
# On some devices, adb connection gets reset some time after booting.
|
||||
# This causes errors during execution. To prevent this, open a shell
|
||||
# session and wait for it to be killed. Once its killed, give adb
|
||||
# enough time to restart, and then the device should be ready.
|
||||
# TODO: This is more of a work-around rather than an actual solution.
|
||||
# Need to figure out what is going on the "proper" way of handling it.
|
||||
try:
|
||||
adb_shell(self.adb_name, '', timeout=20)
|
||||
time.sleep(5) # give adb time to re-initialize
|
||||
except TimeoutError:
|
||||
pass # timed out waiting for the session to be killed -- assume not going to be.
|
||||
|
||||
self.logger.debug('Boot completed.')
|
||||
self._just_rebooted = False
|
||||
self._is_ready = True
|
||||
|
||||
def initialize(self, context):
|
||||
self.sqlite = self.deploy_sqlite3(context) # pylint: disable=attribute-defined-outside-init
|
||||
if self.is_rooted:
|
||||
self.disable_screen_lock()
|
||||
self.disable_selinux()
|
||||
if self.enable_screen_check:
|
||||
self.ensure_screen_is_on()
|
||||
|
||||
def disconnect(self):
|
||||
if self._logcat_poller:
|
||||
self._logcat_poller.close()
|
||||
|
||||
def ping(self):
|
||||
try:
|
||||
# May be triggered inside initialize()
|
||||
adb_shell(self.adb_name, 'ls /', timeout=10)
|
||||
except (TimeoutError, CalledProcessError):
|
||||
raise DeviceNotRespondingError(self.adb_name or self.name)
|
||||
|
||||
def start(self):
|
||||
if self.logcat_poll_period:
|
||||
if self._logcat_poller:
|
||||
self._logcat_poller.close()
|
||||
self._logcat_poller = _LogcatPoller(self, self.logcat_poll_period, timeout=self.default_timeout)
|
||||
self._logcat_poller.start()
|
||||
|
||||
def stop(self):
|
||||
if self._logcat_poller:
|
||||
self._logcat_poller.stop()
|
||||
|
||||
def get_android_version(self):
|
||||
return ANDROID_VERSION_MAP.get(self.get_sdk_version(), None)
|
||||
|
||||
def get_android_id(self):
|
||||
"""
|
||||
Get the device's ANDROID_ID. Which is
|
||||
|
||||
"A 64-bit number (as a hex string) that is randomly generated when the user
|
||||
first sets up the device and should remain constant for the lifetime of the
|
||||
user's device."
|
||||
|
||||
.. note:: This will get reset on userdata erasure.
|
||||
|
||||
"""
|
||||
output = self.execute('content query --uri content://settings/secure --projection value --where "name=\'android_id\'"').strip()
|
||||
return output.split('value=')[-1]
|
||||
|
||||
def get_sdk_version(self):
|
||||
try:
|
||||
return int(self.getprop('ro.build.version.sdk'))
|
||||
except (ValueError, TypeError):
|
||||
return None
|
||||
|
||||
def get_installed_package_version(self, package):
|
||||
"""
|
||||
Returns the version (versionName) of the specified package if it is installed
|
||||
on the device, or ``None`` otherwise.
|
||||
|
||||
Added in version 2.1.4
|
||||
|
||||
"""
|
||||
output = self.execute('dumpsys package {}'.format(package))
|
||||
for line in convert_new_lines(output).split('\n'):
|
||||
if 'versionName' in line:
|
||||
return line.split('=', 1)[1]
|
||||
return None
|
||||
|
||||
def get_installed_package_abi(self, package):
|
||||
"""
|
||||
Returns the primary abi of the specified package if it is installed
|
||||
on the device, or ``None`` otherwise.
|
||||
"""
|
||||
output = self.execute('dumpsys package {}'.format(package))
|
||||
val = None
|
||||
for line in convert_new_lines(output).split('\n'):
|
||||
if 'primaryCpuAbi' in line:
|
||||
val = line.split('=', 1)[1]
|
||||
break
|
||||
if val == 'null':
|
||||
return None
|
||||
for abi, architectures in ABI_MAP.iteritems():
|
||||
if val in architectures:
|
||||
return abi
|
||||
return val
|
||||
|
||||
def list_packages(self):
|
||||
"""
|
||||
List packages installed on the device.
|
||||
|
||||
Added in version 2.1.4
|
||||
|
||||
"""
|
||||
output = self.execute('pm list packages')
|
||||
output = output.replace('package:', '')
|
||||
return output.split()
|
||||
|
||||
def package_is_installed(self, package_name):
|
||||
"""
|
||||
Returns ``True`` the if a package with the specified name is installed on
|
||||
the device, and ``False`` otherwise.
|
||||
|
||||
Added in version 2.1.4
|
||||
|
||||
"""
|
||||
return package_name in self.list_packages()
|
||||
|
||||
def executable_is_installed(self, executable_name): # pylint: disable=unused-argument,no-self-use
|
||||
raise AttributeError("""Instead of using is_installed, please use
|
||||
``get_binary_path`` or ``install_if_needed`` instead. You should
|
||||
use the path returned by these functions to then invoke the binary
|
||||
|
||||
please see: https://pythonhosted.org/wlauto/writing_extensions.html""")
|
||||
|
||||
def is_installed(self, name):
|
||||
if self.package_is_installed(name):
|
||||
return True
|
||||
elif "." in name: # assumes android packages have a . in their name and binaries documentation
|
||||
return False
|
||||
else:
|
||||
raise AttributeError("""Instead of using is_installed, please use
|
||||
``get_binary_path`` or ``install_if_needed`` instead. You should
|
||||
use the path returned by these functions to then invoke the binary
|
||||
|
||||
please see: https://pythonhosted.org/wlauto/writing_extensions.html""")
|
||||
|
||||
def listdir(self, path, as_root=False, **kwargs):
|
||||
contents = self.execute('ls {}'.format(path), as_root=as_root)
|
||||
return [x.strip() for x in contents.split()]
|
||||
|
||||
def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
|
||||
"""
|
||||
Modified in version 2.1.4: added ``as_root`` parameter.
|
||||
|
||||
"""
|
||||
self._check_ready()
|
||||
try:
|
||||
if not as_root:
|
||||
adb_command(self.adb_name, "push '{}' '{}'".format(source, dest), timeout=timeout)
|
||||
else:
|
||||
device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep))
|
||||
self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile)))
|
||||
adb_command(self.adb_name, "push '{}' '{}'".format(source, device_tempfile), timeout=timeout)
|
||||
self.execute('cp {} {}'.format(device_tempfile, dest), as_root=True)
|
||||
except CalledProcessError as e:
|
||||
raise DeviceError(e)
|
||||
|
||||
def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
|
||||
"""
|
||||
Modified in version 2.1.4: added ``as_root`` parameter.
|
||||
|
||||
"""
|
||||
self._check_ready()
|
||||
try:
|
||||
if not as_root:
|
||||
adb_command(self.adb_name, "pull '{}' '{}'".format(source, dest), timeout=timeout)
|
||||
else:
|
||||
device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep))
|
||||
self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile)))
|
||||
self.execute('cp {} {}'.format(source, device_tempfile), as_root=True)
|
||||
adb_command(self.adb_name, "pull '{}' '{}'".format(device_tempfile, dest), timeout=timeout)
|
||||
except CalledProcessError as e:
|
||||
raise DeviceError(e)
|
||||
|
||||
def delete_file(self, filepath, as_root=False): # pylint: disable=W0221
|
||||
self._check_ready()
|
||||
adb_shell(self.adb_name, "rm -rf '{}'".format(filepath), as_root=as_root, timeout=self.default_timeout)
|
||||
|
||||
def file_exists(self, filepath):
|
||||
self._check_ready()
|
||||
output = adb_shell(self.adb_name, 'if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath),
|
||||
timeout=self.default_timeout)
|
||||
return bool(int(output))
|
||||
|
||||
def install(self, filepath, timeout=default_timeout, with_name=None, replace=False): # pylint: disable=W0221
|
||||
ext = os.path.splitext(filepath)[1].lower()
|
||||
if ext == '.apk':
|
||||
return self.install_apk(filepath, timeout, replace)
|
||||
else:
|
||||
return self.install_executable(filepath, with_name)
|
||||
|
||||
def install_apk(self, filepath, timeout=300, replace=False, allow_downgrade=False): # pylint: disable=W0221
|
||||
self._check_ready()
|
||||
ext = os.path.splitext(filepath)[1].lower()
|
||||
if ext == '.apk':
|
||||
flags = []
|
||||
if replace:
|
||||
flags.append('-r') # Replace existing APK
|
||||
if allow_downgrade:
|
||||
flags.append('-d') # Install the APK even if a newer version is already installed
|
||||
if self.get_sdk_version() >= 23:
|
||||
flags.append('-g') # Grant all runtime permissions
|
||||
self.logger.debug("Replace APK = {}, ADB flags = '{}'".format(replace, ' '.join(flags)))
|
||||
return adb_command(self.adb_name, "install {} '{}'".format(' '.join(flags), filepath), timeout=timeout)
|
||||
else:
|
||||
raise DeviceError('Can\'t install {}: unsupported format.'.format(filepath))
|
||||
|
||||
def install_executable(self, filepath, with_name=None):
|
||||
"""
|
||||
Installs a binary executable on device. Returns
|
||||
the path to the installed binary, or ``None`` if the installation has failed.
|
||||
Optionally, ``with_name`` parameter may be used to specify a different name under
|
||||
which the executable will be installed.
|
||||
|
||||
Added in version 2.1.3.
|
||||
Updated in version 2.1.5 with ``with_name`` parameter.
|
||||
|
||||
"""
|
||||
self._ensure_binaries_directory_is_writable()
|
||||
executable_name = with_name or os.path.basename(filepath)
|
||||
on_device_file = self.path.join(self.working_directory, executable_name)
|
||||
on_device_executable = self.path.join(self.binaries_directory, executable_name)
|
||||
self.push_file(filepath, on_device_file)
|
||||
self.execute('cp {} {}'.format(on_device_file, on_device_executable), as_root=self.is_rooted)
|
||||
self.execute('chmod 0777 {}'.format(on_device_executable), as_root=self.is_rooted)
|
||||
return on_device_executable
|
||||
|
||||
def uninstall(self, package):
|
||||
self._check_ready()
|
||||
adb_command(self.adb_name, "uninstall {}".format(package), timeout=self.default_timeout)
|
||||
|
||||
def uninstall_executable(self, executable_name):
|
||||
"""
|
||||
|
||||
Added in version 2.1.3.
|
||||
|
||||
"""
|
||||
on_device_executable = self.get_binary_path(executable_name, search_system_binaries=False)
|
||||
if not on_device_executable:
|
||||
raise DeviceError("Could not uninstall {}, binary not found".format(on_device_executable))
|
||||
self._ensure_binaries_directory_is_writable()
|
||||
self.delete_file(on_device_executable, as_root=self.is_rooted)
|
||||
|
||||
def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False,
|
||||
as_root=False, busybox=False, **kwargs):
|
||||
"""
|
||||
Execute the specified command on the device using adb.
|
||||
|
||||
Parameters:
|
||||
|
||||
:param command: The command to be executed. It should appear exactly
|
||||
as if you were typing it into a shell.
|
||||
:param timeout: Time, in seconds, to wait for adb to return before aborting
|
||||
and raising an error. Defaults to ``AndroidDevice.default_timeout``.
|
||||
:param check_exit_code: If ``True``, the return code of the command on the Device will
|
||||
be check and exception will be raised if it is not 0.
|
||||
Defaults to ``True``.
|
||||
:param background: If ``True``, will execute adb in a subprocess, and will return
|
||||
immediately, not waiting for adb to return. Defaults to ``False``
|
||||
:param busybox: If ``True``, will use busybox to execute the command. Defaults to ``False``.
|
||||
|
||||
Added in version 2.1.3
|
||||
|
||||
.. note:: The device must be rooted to be able to use some busybox features.
|
||||
|
||||
:param as_root: If ``True``, will attempt to execute command in privileged mode. The device
|
||||
must be rooted, otherwise an error will be raised. Defaults to ``False``.
|
||||
|
||||
Added in version 2.1.3
|
||||
|
||||
:returns: If ``background`` parameter is set to ``True``, the subprocess object will
|
||||
be returned; otherwise, the contents of STDOUT from the device will be returned.
|
||||
|
||||
:raises: DeviceError if adb timed out or if the command returned non-zero exit
|
||||
code on the device, or if attempting to execute a command in privileged mode on an
|
||||
unrooted device.
|
||||
|
||||
"""
|
||||
self._check_ready()
|
||||
if as_root and not self.is_rooted:
|
||||
raise DeviceError('Attempting to execute "{}" as root on unrooted device.'.format(command))
|
||||
if busybox:
|
||||
command = ' '.join([self.busybox, command])
|
||||
if background:
|
||||
return adb_background_shell(self.adb_name, command, as_root=as_root)
|
||||
else:
|
||||
return adb_shell(self.adb_name, command, timeout, check_exit_code, as_root)
|
||||
|
||||
def kick_off(self, command, as_root=None):
|
||||
"""
|
||||
Like execute but closes adb session and returns immediately, leaving the command running on the
|
||||
device (this is different from execute(background=True) which keeps adb connection open and returns
|
||||
a subprocess object).
|
||||
|
||||
Added in version 2.1.4
|
||||
|
||||
"""
|
||||
if as_root is None:
|
||||
as_root = self.is_rooted
|
||||
try:
|
||||
command = 'cd {} && {} nohup {}'.format(self.working_directory, self.busybox, command)
|
||||
output = self.execute(command, timeout=1, as_root=as_root)
|
||||
except TimeoutError:
|
||||
pass
|
||||
else:
|
||||
raise ValueError('Background command exited before timeout; got "{}"'.format(output))
|
||||
|
||||
def get_pids_of(self, process_name):
|
||||
"""Returns a list of PIDs of all processes with the specified name."""
|
||||
result = (self.execute('ps | {} grep {}'.format(self.busybox, process_name),
|
||||
check_exit_code=False) or '').strip()
|
||||
if result and 'not found' not in result:
|
||||
return [int(x.split()[1]) for x in result.split('\n')]
|
||||
else:
|
||||
return []
|
||||
|
||||
def ps(self, **kwargs):
|
||||
"""
|
||||
Returns the list of running processes on the device. Keyword arguments may
|
||||
be used to specify simple filters for columns.
|
||||
|
||||
Added in version 2.1.4
|
||||
|
||||
"""
|
||||
lines = iter(convert_new_lines(self.execute('ps')).split('\n'))
|
||||
lines.next() # header
|
||||
result = []
|
||||
for line in lines:
|
||||
parts = line.split()
|
||||
if parts:
|
||||
result.append(PsEntry(*(parts[0:1] + map(int, parts[1:5]) + parts[5:])))
|
||||
if not kwargs:
|
||||
return result
|
||||
else:
|
||||
filtered_result = []
|
||||
for entry in result:
|
||||
if all(getattr(entry, k) == v for k, v in kwargs.iteritems()):
|
||||
filtered_result.append(entry)
|
||||
return filtered_result
|
||||
|
||||
def get_properties(self, context):
|
||||
"""Captures and saves the information from /system/build.prop and /proc/version"""
|
||||
props = super(AndroidDevice, self).get_properties(context)
|
||||
props.update(self._get_android_properties(context))
|
||||
return props
|
||||
|
||||
def _get_android_properties(self, context):
|
||||
props = {}
|
||||
props['android_id'] = self.get_android_id()
|
||||
self._update_build_properties(props)
|
||||
|
||||
dumpsys_host_file = os.path.join(context.host_working_directory, 'window.dumpsys')
|
||||
with open(dumpsys_host_file, 'w') as wfh:
|
||||
wfh.write(self.execute('dumpsys window'))
|
||||
context.add_run_artifact('dumpsys_window', dumpsys_host_file, 'meta')
|
||||
|
||||
prop_file = os.path.join(context.host_working_directory, 'android-props.json')
|
||||
with open(prop_file, 'w') as wfh:
|
||||
json.dump(props, wfh)
|
||||
context.add_run_artifact('android_properties', prop_file, 'export')
|
||||
return props
|
||||
|
||||
def getprop(self, prop=None):
|
||||
"""Returns parsed output of Android getprop command. If a property is
|
||||
specified, only the value for that property will be returned (with
|
||||
``None`` returned if the property doesn't exist. Otherwise,
|
||||
``wlauto.utils.android.AndroidProperties`` will be returned, which is
|
||||
a dict-like object."""
|
||||
props = AndroidProperties(self.execute('getprop'))
|
||||
if prop:
|
||||
return props[prop]
|
||||
return props
|
||||
|
||||
def deploy_sqlite3(self, context):
|
||||
host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'sqlite3'))
|
||||
target_file = self.install_if_needed(host_file)
|
||||
return target_file
|
||||
|
||||
# Android-specific methods. These either rely on specifics of adb or other
|
||||
# Android-only concepts in their interface and/or implementation.
|
||||
|
||||
def forward_port(self, from_port, to_port):
|
||||
"""
|
||||
Forward a port on the device to a port on localhost.
|
||||
|
||||
:param from_port: Port on the device which to forward.
|
||||
:param to_port: Port on the localhost to which the device port will be forwarded.
|
||||
|
||||
Ports should be specified using adb spec. See the "adb forward" section in "adb help".
|
||||
|
||||
"""
|
||||
adb_command(self.adb_name, 'forward {} {}'.format(from_port, to_port), timeout=self.default_timeout)
|
||||
|
||||
def dump_logcat(self, outfile, filter_spec=None):
|
||||
"""
|
||||
Dump the contents of logcat, for the specified filter spec to the
|
||||
specified output file.
|
||||
See http://developer.android.com/tools/help/logcat.html
|
||||
|
||||
:param outfile: Output file on the host into which the contents of the
|
||||
log will be written.
|
||||
:param filter_spec: Logcat filter specification.
|
||||
see http://developer.android.com/tools/debugging/debugging-log.html#filteringOutput
|
||||
|
||||
"""
|
||||
if self._logcat_poller:
|
||||
return self._logcat_poller.write_log(outfile)
|
||||
else:
|
||||
if filter_spec:
|
||||
command = 'logcat -d -s {} > {}'.format(filter_spec, outfile)
|
||||
else:
|
||||
command = 'logcat -d > {}'.format(outfile)
|
||||
return adb_command(self.adb_name, command, timeout=self.default_timeout)
|
||||
|
||||
def clear_logcat(self):
|
||||
"""Clear (flush) logcat log."""
|
||||
if self._logcat_poller:
|
||||
return self._logcat_poller.clear_buffer()
|
||||
else:
|
||||
return adb_shell(self.adb_name, 'logcat -c', timeout=self.default_timeout)
|
||||
|
||||
def get_screen_size(self):
|
||||
output = self.execute('dumpsys window')
|
||||
match = SCREEN_SIZE_REGEX.search(output)
|
||||
if match:
|
||||
return (int(match.group('width')),
|
||||
int(match.group('height')))
|
||||
else:
|
||||
return (0, 0)
|
||||
|
||||
def perform_unlock_swipe(self):
|
||||
width, height = self.get_screen_size()
|
||||
command = 'input swipe {} {} {} {}'
|
||||
if self.swipe_to_unlock == "horizontal":
|
||||
swipe_heigh = height * 2 // 3
|
||||
start = 100
|
||||
stop = width - start
|
||||
self.execute(command.format(start, swipe_heigh, stop, swipe_heigh))
|
||||
if self.swipe_to_unlock == "vertical":
|
||||
swipe_middle = height / 2
|
||||
swipe_heigh = height * 2 // 3
|
||||
self.execute(command.format(swipe_middle, swipe_heigh, swipe_middle, 0))
|
||||
else: # Should never reach here
|
||||
raise DeviceError("Invalid swipe direction: {}".format(self.swipe_to_unlock))
|
||||
|
||||
def capture_screen(self, filepath):
|
||||
"""Caputers the current device screen into the specified file in a PNG format."""
|
||||
on_device_file = self.path.join(self.working_directory, 'screen_capture.png')
|
||||
self.execute('screencap -p {}'.format(on_device_file))
|
||||
self.pull_file(on_device_file, filepath)
|
||||
self.delete_file(on_device_file)
|
||||
|
||||
def capture_ui_hierarchy(self, filepath):
|
||||
"""Captures the current view hierarchy into the specified file in a XML format."""
|
||||
on_device_file = self.path.join(self.working_directory, 'screen_capture.xml')
|
||||
self.execute('uiautomator dump {}'.format(on_device_file))
|
||||
self.pull_file(on_device_file, filepath)
|
||||
self.delete_file(on_device_file)
|
||||
|
||||
parsed_xml = xml.dom.minidom.parse(filepath)
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(parsed_xml.toprettyxml())
|
||||
|
||||
def is_screen_on(self):
|
||||
"""Returns ``True`` if the device screen is currently on, ``False`` otherwise."""
|
||||
output = self.execute('dumpsys power')
|
||||
match = SCREEN_STATE_REGEX.search(output)
|
||||
if match:
|
||||
return boolean(match.group(1))
|
||||
else:
|
||||
raise DeviceError('Could not establish screen state.')
|
||||
|
||||
def ensure_screen_is_on(self):
|
||||
if not self.is_screen_on():
|
||||
self.execute('input keyevent 26')
|
||||
if self.swipe_to_unlock:
|
||||
self.perform_unlock_swipe()
|
||||
|
||||
def disable_screen_lock(self):
|
||||
"""
|
||||
Attempts to disable he screen lock on the device.
|
||||
|
||||
.. note:: This does not always work...
|
||||
|
||||
Added inversion 2.1.4
|
||||
|
||||
"""
|
||||
lockdb = '/data/system/locksettings.db'
|
||||
sqlcommand = "update locksettings set value='0' where name='screenlock.disabled';"
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
try:
|
||||
f.write('{} {} "{}"'.format(self.sqlite, lockdb, sqlcommand))
|
||||
f.flush()
|
||||
on_device_executable = self.install_executable(f.name,
|
||||
with_name="disable_screen_lock")
|
||||
finally:
|
||||
f.close()
|
||||
self.execute(on_device_executable, as_root=True)
|
||||
|
||||
def disable_selinux(self):
|
||||
# This may be invoked from intialize() so we can't use execute() or the
|
||||
# standard API for doing this.
|
||||
api_level = int(adb_shell(self.adb_name, 'getprop ro.build.version.sdk',
|
||||
timeout=self.default_timeout).strip())
|
||||
# SELinux was added in Android 4.3 (API level 18). Trying to
|
||||
# 'getenforce' in earlier versions will produce an error.
|
||||
if api_level >= 18:
|
||||
se_status = self.execute('getenforce', as_root=True).strip()
|
||||
if se_status == 'Enforcing':
|
||||
self.execute('setenforce 0', as_root=True)
|
||||
|
||||
def get_device_model(self):
|
||||
try:
|
||||
return self.getprop(prop='ro.product.device')
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def refresh_device_files(self, file_list):
|
||||
"""
|
||||
Depending on the devices android version and root status, determine the
|
||||
appropriate method of forcing a re-index of the mediaserver cache for a given
|
||||
list of files.
|
||||
"""
|
||||
if self.device.is_rooted or self.device.get_sdk_version() < 24: # MM and below
|
||||
common_path = commonprefix(file_list, sep=self.device.path.sep)
|
||||
self.broadcast_media_mounted(common_path, self.device.is_rooted)
|
||||
else:
|
||||
for f in file_list:
|
||||
self.broadcast_media_scan_file(f)
|
||||
|
||||
def broadcast_media_scan_file(self, filepath):
|
||||
"""
|
||||
Force a re-index of the mediaserver cache for the specified file.
|
||||
"""
|
||||
command = 'am broadcast -a android.intent.action.MEDIA_SCANNER_SCAN_FILE -d file://'
|
||||
self.execute(command + filepath)
|
||||
|
||||
def broadcast_media_mounted(self, dirpath, as_root=False):
|
||||
"""
|
||||
Force a re-index of the mediaserver cache for the specified directory.
|
||||
"""
|
||||
command = 'am broadcast -a android.intent.action.MEDIA_MOUNTED -d file://'
|
||||
self.execute(command + dirpath, as_root=as_root)
|
||||
|
||||
# Internal methods: do not use outside of the class.
|
||||
def _update_build_properties(self, props):
|
||||
try:
|
||||
regex = re.compile(r'\[([^\]]+)\]\s*:\s*\[([^\]]+)\]')
|
||||
for match in regex.finditer(self.execute("getprop")):
|
||||
key = match.group(1).strip()
|
||||
value = match.group(2).strip()
|
||||
props[key] = value
|
||||
except ValueError:
|
||||
self.logger.warning('Could not parse build.prop.')
|
||||
|
||||
def _update_versions(self, filepath, props):
|
||||
with open(filepath) as fh:
|
||||
text = fh.read()
|
||||
props['version'] = text
|
||||
text = re.sub(r'#.*', '', text).strip()
|
||||
match = re.search(r'^(Linux version .*?)\s*\((gcc version .*)\)$', text)
|
||||
if match:
|
||||
props['linux_version'] = match.group(1).strip()
|
||||
props['gcc_version'] = match.group(2).strip()
|
||||
else:
|
||||
self.logger.warning('Could not parse version string.')
|
||||
|
||||
def _ensure_binaries_directory_is_writable(self):
|
||||
matched = []
|
||||
for entry in self.list_file_systems():
|
||||
if self.binaries_directory.rstrip('/').startswith(entry.mount_point):
|
||||
matched.append(entry)
|
||||
if matched:
|
||||
entry = sorted(matched, key=lambda x: len(x.mount_point))[-1]
|
||||
if 'rw' not in entry.options:
|
||||
self.execute('mount -o rw,remount {} {}'.format(entry.device, entry.mount_point), as_root=True)
|
||||
else:
|
||||
raise DeviceError('Could not find mount point for binaries directory {}'.format(self.binaries_directory))
|
||||
|
||||
|
||||
class _LogcatPoller(threading.Thread):
|
||||
|
||||
join_timeout = 5
|
||||
|
||||
def __init__(self, device, period, timeout=None):
|
||||
super(_LogcatPoller, self).__init__()
|
||||
self.adb_device = device.adb_name
|
||||
self.logger = device.logger
|
||||
self.period = period
|
||||
self.timeout = timeout
|
||||
self.stop_signal = threading.Event()
|
||||
self.lock = threading.RLock()
|
||||
self.buffer_file = tempfile.mktemp()
|
||||
self.last_poll = 0
|
||||
self.daemon = True
|
||||
self.exc = None
|
||||
|
||||
def run(self):
|
||||
self.logger.debug('Starting logcat polling.')
|
||||
try:
|
||||
while True:
|
||||
if self.stop_signal.is_set():
|
||||
break
|
||||
with self.lock:
|
||||
current_time = time.time()
|
||||
if (current_time - self.last_poll) >= self.period:
|
||||
self._poll()
|
||||
time.sleep(0.5)
|
||||
except Exception: # pylint: disable=W0703
|
||||
self.exc = WorkerThreadError(self.name, sys.exc_info())
|
||||
self.logger.debug('Logcat polling stopped.')
|
||||
|
||||
def stop(self):
|
||||
self.logger.debug('Stopping logcat polling.')
|
||||
self.stop_signal.set()
|
||||
self.join(self.join_timeout)
|
||||
if self.is_alive():
|
||||
self.logger.error('Could not join logcat poller thread.')
|
||||
if self.exc:
|
||||
raise self.exc # pylint: disable=E0702
|
||||
|
||||
def clear_buffer(self):
|
||||
self.logger.debug('Clearing logcat buffer.')
|
||||
with self.lock:
|
||||
adb_shell(self.adb_device, 'logcat -c', timeout=self.timeout)
|
||||
with open(self.buffer_file, 'w') as _: # NOQA
|
||||
pass
|
||||
|
||||
def write_log(self, outfile):
|
||||
self.logger.debug('Writing logbuffer to {}.'.format(outfile))
|
||||
with self.lock:
|
||||
self._poll()
|
||||
if os.path.isfile(self.buffer_file):
|
||||
shutil.copy(self.buffer_file, outfile)
|
||||
else: # there was no logcat trace at this time
|
||||
with open(outfile, 'w') as _: # NOQA
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
self.logger.debug('Closing logcat poller.')
|
||||
if os.path.isfile(self.buffer_file):
|
||||
os.remove(self.buffer_file)
|
||||
|
||||
def _poll(self):
|
||||
with self.lock:
|
||||
self.last_poll = time.time()
|
||||
adb_command(self.adb_device, 'logcat -d >> {}'.format(self.buffer_file), timeout=self.timeout)
|
||||
adb_command(self.adb_device, 'logcat -c', timeout=self.timeout)
|
||||
|
||||
|
||||
class BigLittleDevice(AndroidDevice): # pylint: disable=W0223
|
||||
|
||||
parameters = [
|
||||
Parameter('scheduler', default='hmp', override=True),
|
||||
]
|
@@ -34,13 +34,3 @@ class JarFile(FileResource):
|
||||
class ApkFile(FileResource):
|
||||
|
||||
name = 'apk'
|
||||
|
||||
def __init__(self, owner, platform=None, uiauto=False, package=None):
|
||||
super(ApkFile, self).__init__(owner)
|
||||
self.platform = platform
|
||||
self.uiauto = uiauto
|
||||
self.package = package
|
||||
|
||||
def __str__(self):
|
||||
apk_type = 'uiautomator ' if self.uiauto else ''
|
||||
return '<{}\'s {} {}APK>'.format(self.owner, self.platform, apk_type)
|
||||
|
Binary file not shown.
594
wlauto/common/android/workload.py
Executable file → Normal file
594
wlauto/common/android/workload.py
Executable file → Normal file
@@ -17,37 +17,27 @@ import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from wlauto.core.extension import Parameter, ExtensionMeta, ListCollection
|
||||
from wlauto.core.extension import Parameter
|
||||
from wlauto.core.workload import Workload
|
||||
from wlauto.common.android.resources import ApkFile
|
||||
from wlauto.common.resources import ExtensionAsset, File
|
||||
from wlauto.exceptions import WorkloadError, ResourceError, DeviceError
|
||||
from wlauto.utils.android import (ApkInfo, ANDROID_NORMAL_PERMISSIONS,
|
||||
ANDROID_UNCHANGEABLE_PERMISSIONS, UNSUPPORTED_PACKAGES)
|
||||
from wlauto.utils.types import boolean, ParameterDict
|
||||
import wlauto.utils.statedetect as state_detector
|
||||
from wlauto.common.linux.workload import ReventWorkload
|
||||
from wlauto.core.resource import NO_ONE
|
||||
from wlauto.common.resources import ExtensionAsset, Executable
|
||||
from wlauto.exceptions import WorkloadError, ResourceError, ConfigError
|
||||
from wlauto.utils.android import ApkInfo, ANDROID_NORMAL_PERMISSIONS
|
||||
from wlauto.utils.types import boolean
|
||||
import wlauto.common.android.resources
|
||||
|
||||
|
||||
DELAY = 5
|
||||
|
||||
|
||||
# Due to the way `super` works you have to call it at every level but WA executes some
|
||||
# methods conditionally and so has to call them directly via the class, this breaks super
|
||||
# and causes it to run things mutiple times ect. As a work around for this untill workloads
|
||||
# are reworked everything that subclasses workload calls parent methods explicitly
|
||||
|
||||
|
||||
class UiAutomatorWorkload(Workload):
|
||||
"""
|
||||
Base class for all workloads that rely on a UI Automator APK file.
|
||||
Base class for all workloads that rely on a UI Automator JAR file.
|
||||
|
||||
This class should be subclassed by workloads that rely on android UiAutomator
|
||||
to work. This class handles installing the UI Automator APK to the device
|
||||
and invoking it to run the workload. By default, it will look for the ``*.apk`` file
|
||||
in the same directory as the .py file for the workload (this can be changed by overriding
|
||||
to work. This class handles transferring the UI Automator JAR file to the device
|
||||
and invoking it to run the workload. By default, it will look for the JAR file in
|
||||
the same directory as the .py file for the workload (this can be changed by overriding
|
||||
the ``uiauto_file`` property in the subclassing workload).
|
||||
|
||||
To inintiate UI Automation, the fully-qualified name of the Java class and the
|
||||
@@ -59,7 +49,7 @@ class UiAutomatorWorkload(Workload):
|
||||
match what is expected, or you could override ``uiauto_package``, ``uiauto_class`` and
|
||||
``uiauto_method`` class attributes with the value that match your Java code.
|
||||
|
||||
You can also pass parameters to the APK file. To do this add the parameters to
|
||||
You can also pass parameters to the JAR file. To do this add the parameters to
|
||||
``self.uiauto_params`` dict inside your class's ``__init__`` or ``setup`` methods.
|
||||
|
||||
"""
|
||||
@@ -68,43 +58,38 @@ class UiAutomatorWorkload(Workload):
|
||||
|
||||
uiauto_package = ''
|
||||
uiauto_class = 'UiAutomation'
|
||||
uiauto_method = 'android.support.test.runner.AndroidJUnitRunner'
|
||||
uiauto_method = 'runUiAutomation'
|
||||
|
||||
# Can be overidden by subclasses to adjust to run time of specific
|
||||
# benchmarks.
|
||||
run_timeout = 10 * 60 # seconds
|
||||
uninstall_uiauto_apk = True
|
||||
run_timeout = 4 * 60 # seconds
|
||||
|
||||
def __init__(self, device, _call_super=True, **kwargs): # pylint: disable=W0613
|
||||
if _call_super:
|
||||
Workload.__init__(self, device, **kwargs)
|
||||
super(UiAutomatorWorkload, self).__init__(device, **kwargs)
|
||||
self.uiauto_file = None
|
||||
self.device_uiauto_file = None
|
||||
self.command = None
|
||||
self.uiauto_params = ParameterDict()
|
||||
self.uiauto_params = {}
|
||||
|
||||
def init_resources(self, context):
|
||||
self.uiauto_file = context.resolver.get(ApkFile(self, uiauto=True))
|
||||
self.uiauto_file = context.resolver.get(wlauto.common.android.resources.JarFile(self))
|
||||
if not self.uiauto_file:
|
||||
raise ResourceError('No UI automation APK file found for workload {}.'.format(self.name))
|
||||
|
||||
raise ResourceError('No UI automation JAR file found for workload {}.'.format(self.name))
|
||||
self.device_uiauto_file = self.device.path.join(self.device.working_directory,
|
||||
os.path.basename(self.uiauto_file))
|
||||
if not self.uiauto_package:
|
||||
self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0]
|
||||
|
||||
def setup(self, context):
|
||||
Workload.setup(self, context)
|
||||
method_string = '{}.{}#{}'.format(self.uiauto_package, self.uiauto_class, self.uiauto_method)
|
||||
params_dict = self.uiauto_params
|
||||
params_dict['workdir'] = self.device.working_directory
|
||||
params = ''
|
||||
for k, v in self.uiauto_params.iter_encoded_items():
|
||||
params += ' -e {} "{}"'.format(k, v)
|
||||
|
||||
if self.device.package_is_installed(self.uiauto_package):
|
||||
self.device.uninstall(self.uiauto_package)
|
||||
self.device.install_apk(self.uiauto_file)
|
||||
|
||||
instrumention_string = 'am instrument -w -r {} -e class {}.{} {}/{}'
|
||||
self.command = instrumention_string.format(params, self.uiauto_package,
|
||||
self.uiauto_class, self.uiauto_package,
|
||||
self.uiauto_method)
|
||||
for k, v in self.uiauto_params.iteritems():
|
||||
params += ' -e {} {}'.format(k, v)
|
||||
self.command = 'uiautomator runtest {}{} -c {}'.format(self.device_uiauto_file, params, method_string)
|
||||
self.device.push(self.uiauto_file, self.device_uiauto_file)
|
||||
self.device.killall('uiautomator')
|
||||
|
||||
def run(self, context):
|
||||
@@ -119,12 +104,11 @@ class UiAutomatorWorkload(Workload):
|
||||
pass
|
||||
|
||||
def teardown(self, context):
|
||||
if self.uninstall_uiauto_apk:
|
||||
self.device.uninstall(self.uiauto_package)
|
||||
self.device.remove(self.device_uiauto_file)
|
||||
|
||||
def validate(self):
|
||||
if not self.uiauto_file:
|
||||
raise WorkloadError('No UI automation APK file found for workload {}.'.format(self.name))
|
||||
raise WorkloadError('No UI automation JAR file found for workload {}.'.format(self.name))
|
||||
if not self.uiauto_package:
|
||||
raise WorkloadError('No UI automation package specified for workload {}.'.format(self.name))
|
||||
|
||||
@@ -138,16 +122,10 @@ class ApkWorkload(Workload):
|
||||
:package: The package name of the app. This is usually a Java-style name of the form
|
||||
``com.companyname.appname``.
|
||||
:activity: This is the initial activity of the app. This will be used to launch the
|
||||
app during the setup. Many applications do not specify a launch activity so
|
||||
this may be left blank if necessary.
|
||||
app during the setup.
|
||||
:view: The class of the main view pane of the app. This needs to be defined in order
|
||||
to collect SurfaceFlinger-derived statistics (such as FPS) for the app, but
|
||||
may otherwise be left as ``None``.
|
||||
:launch_main: If ``False``, the default activity will not be launched (during setup),
|
||||
allowing workloads to start the app with an intent of their choice in
|
||||
the run step. This is useful for apps without a launchable default/main
|
||||
activity or those where it cannot be launched without intent data (which
|
||||
is provided at the run phase).
|
||||
:install_timeout: Timeout for the installation of the APK. This may vary wildly based on
|
||||
the size and nature of a specific APK, and so should be defined on
|
||||
per-workload basis.
|
||||
@@ -157,9 +135,6 @@ class ApkWorkload(Workload):
|
||||
so, as with all timeouts, so leeway must be included in
|
||||
the specified value.
|
||||
|
||||
:min_apk_version: The minimum supported apk version for this workload. May be ``None``.
|
||||
:max_apk_version: The maximum supported apk version for this workload. May be ``None``.
|
||||
|
||||
.. note:: Both package and activity for a workload may be obtained from the APK using
|
||||
the ``aapt`` tool that comes with the ADT (Android Developemnt Tools) bundle.
|
||||
|
||||
@@ -167,241 +142,91 @@ class ApkWorkload(Workload):
|
||||
package = None
|
||||
activity = None
|
||||
view = None
|
||||
min_apk_version = None
|
||||
max_apk_version = None
|
||||
supported_platforms = ['android']
|
||||
launch_main = True
|
||||
|
||||
parameters = [
|
||||
Parameter('install_timeout', kind=int, default=300,
|
||||
description='Timeout for the installation of the apk.'),
|
||||
Parameter('check_apk', kind=boolean, default=True,
|
||||
description='''
|
||||
When set to True the APK file on the host will be prefered if
|
||||
it is a valid version and ABI, if not it will fall back to the
|
||||
version on the targer. When set to False the target version is
|
||||
prefered.
|
||||
Discover the APK for this workload on the host, and check that
|
||||
the version matches the one on device (if already installed).
|
||||
'''),
|
||||
Parameter('force_install', kind=boolean, default=False,
|
||||
description='''
|
||||
Always re-install the APK, even if matching version is found already installed
|
||||
on the device. Runs ``adb install -r`` to ensure existing APK is replaced. When
|
||||
this is set, check_apk is ignored.
|
||||
Always re-install the APK, even if matching version is found
|
||||
on already installed on the device.
|
||||
'''),
|
||||
Parameter('uninstall_apk', kind=boolean, default=False,
|
||||
description='If ``True``, will uninstall workload\'s APK as part of teardown.'),
|
||||
Parameter('exact_abi', kind=bool, default=False,
|
||||
description='''
|
||||
If ``True``, workload will check that the APK matches the target
|
||||
device ABI, otherwise any APK found will be used.
|
||||
'''),
|
||||
Parameter('clear_data_on_reset', kind=bool, default=True,
|
||||
description="""
|
||||
If set to ``False``, this will prevent WA from clearing package
|
||||
data for this workload prior to running it.
|
||||
"""),
|
||||
]
|
||||
|
||||
def __init__(self, device, _call_super=True, **kwargs):
|
||||
if _call_super:
|
||||
Workload.__init__(self, device, **kwargs)
|
||||
super(ApkWorkload, self).__init__(device, **kwargs)
|
||||
self.apk_file = None
|
||||
self.apk_version = None
|
||||
self.logcat_log = None
|
||||
self.exact_apk_version = None
|
||||
|
||||
def setup(self, context): # pylint: disable=too-many-branches
|
||||
Workload.setup(self, context)
|
||||
self.setup_workload_apk(context)
|
||||
self.launch_application()
|
||||
self.kill_background()
|
||||
def init_resources(self, context):
|
||||
self.apk_file = context.resolver.get(wlauto.common.android.resources.ApkFile(self),
|
||||
version=getattr(self, 'version', None),
|
||||
strict=self.check_apk)
|
||||
|
||||
def validate(self):
|
||||
if self.check_apk:
|
||||
if not self.apk_file:
|
||||
raise WorkloadError('No APK file found for workload {}.'.format(self.name))
|
||||
else:
|
||||
if self.force_install:
|
||||
raise ConfigError('force_install cannot be "True" when check_apk is set to "False".')
|
||||
|
||||
def setup(self, context):
|
||||
self.initialize_package(context)
|
||||
self.start_activity()
|
||||
self.device.execute('am kill-all') # kill all *background* activities
|
||||
self.device.clear_logcat()
|
||||
|
||||
def setup_workload_apk(self, context):
|
||||
# Get target version
|
||||
target_version = self.device.get_installed_package_version(self.package)
|
||||
if target_version:
|
||||
target_version = LooseVersion(target_version)
|
||||
self.logger.debug("Found version '{}' on target device".format(target_version))
|
||||
|
||||
# Get host version
|
||||
self.apk_file = context.resolver.get(ApkFile(self, self.device.abi,
|
||||
package=getattr(self, 'package', None)),
|
||||
version=getattr(self, 'version', None),
|
||||
variant_name=getattr(self, 'variant_name', None),
|
||||
strict=False)
|
||||
|
||||
# Get target abi
|
||||
target_abi = self.device.get_installed_package_abi(self.package)
|
||||
if target_abi:
|
||||
self.logger.debug("Found apk with primary abi '{}' on target device".format(target_abi))
|
||||
|
||||
# Get host version, primary abi is first, and then try to find supported.
|
||||
for abi in self.device.supported_abi:
|
||||
self.apk_file = context.resolver.get(ApkFile(self, abi,
|
||||
package=getattr(self, 'package', None)),
|
||||
version=getattr(self, 'version', None),
|
||||
variant_name=getattr(self, 'variant_name', None),
|
||||
strict=False)
|
||||
|
||||
# Stop if apk found, or if exact_abi is set only look for primary abi.
|
||||
if self.apk_file or self.exact_abi:
|
||||
break
|
||||
|
||||
host_version = self.check_host_version()
|
||||
self.verify_apk_version(target_version, target_abi, host_version)
|
||||
def initialize_package(self, context):
|
||||
installed_version = self.device.get_package_version(self.package)
|
||||
if self.check_apk:
|
||||
self.initialize_with_host_apk(context, installed_version)
|
||||
else:
|
||||
if not installed_version:
|
||||
message = '''{} not found found on the device and check_apk is set to "False"
|
||||
so host version was not checked.'''
|
||||
raise WorkloadError(message.format(self.package))
|
||||
message = 'Version {} installed on device; skipping host APK check.'
|
||||
self.logger.debug(message.format(installed_version))
|
||||
self.reset(context)
|
||||
self.apk_version = installed_version
|
||||
|
||||
def initialize_with_host_apk(self, context, installed_version):
|
||||
host_version = ApkInfo(self.apk_file).version_name
|
||||
if installed_version != host_version:
|
||||
if installed_version:
|
||||
message = '{} host version: {}, device version: {}; re-installing...'
|
||||
self.logger.debug(message.format(os.path.basename(self.apk_file),
|
||||
host_version, installed_version))
|
||||
else:
|
||||
message = '{} host version: {}, not found on device; installing...'
|
||||
self.logger.debug(message.format(os.path.basename(self.apk_file),
|
||||
host_version))
|
||||
self.force_install = True # pylint: disable=attribute-defined-outside-init
|
||||
else:
|
||||
message = '{} version {} found on both device and host.'
|
||||
self.logger.debug(message.format(os.path.basename(self.apk_file),
|
||||
host_version))
|
||||
if self.force_install:
|
||||
self.force_install_apk(context, host_version)
|
||||
elif self.check_apk:
|
||||
self.prefer_host_apk(context, host_version, target_version)
|
||||
if installed_version:
|
||||
self.device.uninstall(self.package)
|
||||
self.install_apk(context)
|
||||
else:
|
||||
self.prefer_target_apk(context, host_version, target_version)
|
||||
self.reset(context)
|
||||
self.apk_version = host_version
|
||||
|
||||
self.reset(context)
|
||||
self.apk_version = self.device.get_installed_package_version(self.package)
|
||||
context.add_classifiers(apk_version=self.apk_version)
|
||||
|
||||
def check_host_version(self):
|
||||
host_version = None
|
||||
if self.apk_file is not None:
|
||||
host_version = ApkInfo(self.apk_file).version_name
|
||||
if host_version:
|
||||
host_version = LooseVersion(host_version)
|
||||
self.logger.debug("Found version '{}' on host".format(host_version))
|
||||
return host_version
|
||||
|
||||
def verify_apk_version(self, target_version, target_abi, host_version):
|
||||
# Error if apk was not found anywhere
|
||||
if target_version is None and host_version is None:
|
||||
msg = "Could not find APK for '{}' on the host or target device"
|
||||
raise ResourceError(msg.format(self.name))
|
||||
|
||||
if self.exact_apk_version is not None:
|
||||
if self.exact_apk_version != target_version and self.exact_apk_version != host_version:
|
||||
msg = "APK version '{}' not found on the host '{}' or target '{}'"
|
||||
raise ResourceError(msg.format(self.exact_apk_version, host_version, target_version))
|
||||
|
||||
# Error if exact_abi and suitable apk not found on host and incorrect version on device
|
||||
if self.exact_abi and host_version is None:
|
||||
if target_abi != self.device.abi:
|
||||
msg = "APK abi '{}' not found on the host and target is '{}'"
|
||||
raise ResourceError(msg.format(self.device.abi, target_abi))
|
||||
|
||||
def launch_application(self):
|
||||
if self.launch_main:
|
||||
self.launch_package() # launch default activity without intent data
|
||||
|
||||
def kill_background(self):
|
||||
self.device.execute('am kill-all') # kill all *background* activities
|
||||
|
||||
def force_install_apk(self, context, host_version):
|
||||
if host_version is None:
|
||||
raise ResourceError("force_install is 'True' but could not find APK on the host")
|
||||
try:
|
||||
self.validate_version(host_version)
|
||||
except ResourceError as e:
|
||||
msg = "force_install is 'True' but the host version is invalid:\n\t{}"
|
||||
raise ResourceError(msg.format(str(e)))
|
||||
self.install_apk(context, replace=True)
|
||||
|
||||
def prefer_host_apk(self, context, host_version, target_version):
|
||||
msg = "check_apk is 'True' "
|
||||
if host_version is None:
|
||||
try:
|
||||
self.validate_version(target_version)
|
||||
except ResourceError as e:
|
||||
msg += "but the APK was not found on the host and the target version is invalid:\n\t{}"
|
||||
raise ResourceError(msg.format(str(e)))
|
||||
else:
|
||||
msg += "but the APK was not found on the host, using target version"
|
||||
self.logger.debug(msg)
|
||||
return
|
||||
try:
|
||||
self.validate_version(host_version)
|
||||
except ResourceError as e1:
|
||||
msg += "but the host APK version is invalid:\n\t{}\n"
|
||||
if target_version is None:
|
||||
msg += "The target does not have the app either"
|
||||
raise ResourceError(msg.format(str(e1)))
|
||||
try:
|
||||
self.validate_version(target_version)
|
||||
except ResourceError as e2:
|
||||
msg += "The target version is also invalid:\n\t{}"
|
||||
raise ResourceError(msg.format(str(e1), str(e2)))
|
||||
else:
|
||||
msg += "using the target version instead"
|
||||
self.logger.debug(msg.format(str(e1)))
|
||||
else: # Host version is valid
|
||||
if target_version is not None and target_version == host_version:
|
||||
msg += " and a matching version is alread on the device, doing nothing"
|
||||
self.logger.debug(msg)
|
||||
return
|
||||
msg += " and the host version is not on the target, installing APK"
|
||||
self.logger.debug(msg)
|
||||
self.install_apk(context, replace=True)
|
||||
|
||||
def prefer_target_apk(self, context, host_version, target_version):
|
||||
msg = "check_apk is 'False' "
|
||||
if target_version is None:
|
||||
try:
|
||||
self.validate_version(host_version)
|
||||
except ResourceError as e:
|
||||
msg += "but the app was not found on the target and the host version is invalid:\n\t{}"
|
||||
raise ResourceError(msg.format(str(e)))
|
||||
else:
|
||||
msg += "and the app was not found on the target, using host version"
|
||||
self.logger.debug(msg)
|
||||
self.install_apk(context)
|
||||
return
|
||||
try:
|
||||
self.validate_version(target_version)
|
||||
except ResourceError as e1:
|
||||
msg += "but the target app version is invalid:\n\t{}\n"
|
||||
if host_version is None:
|
||||
msg += "The host does not have the APK either"
|
||||
raise ResourceError(msg.format(str(e1)))
|
||||
try:
|
||||
self.validate_version(host_version)
|
||||
except ResourceError as e2:
|
||||
msg += "The host version is also invalid:\n\t{}"
|
||||
raise ResourceError(msg.format(str(e1), str(e2)))
|
||||
else:
|
||||
msg += "Using the host APK instead"
|
||||
self.logger.debug(msg.format(str(e1)))
|
||||
self.install_apk(context, replace=True)
|
||||
else:
|
||||
msg += "and a valid version of the app is already on the target, using target app"
|
||||
self.logger.debug(msg)
|
||||
|
||||
def validate_version(self, version):
|
||||
min_apk_version = getattr(self, 'min_apk_version', None)
|
||||
max_apk_version = getattr(self, 'max_apk_version', None)
|
||||
|
||||
if min_apk_version is not None and max_apk_version is not None:
|
||||
if version < LooseVersion(min_apk_version) or \
|
||||
version > LooseVersion(max_apk_version):
|
||||
msg = "version '{}' not supported. " \
|
||||
"Minimum version required: '{}', Maximum version known to work: '{}'"
|
||||
raise ResourceError(msg.format(version, min_apk_version, max_apk_version))
|
||||
|
||||
elif min_apk_version is not None:
|
||||
if version < LooseVersion(min_apk_version):
|
||||
msg = "version '{}' not supported. " \
|
||||
"Minimum version required: '{}'"
|
||||
raise ResourceError(msg.format(version, min_apk_version))
|
||||
|
||||
elif max_apk_version is not None:
|
||||
if version > LooseVersion(max_apk_version):
|
||||
msg = "version '{}' not supported. " \
|
||||
"Maximum version known to work: '{}'"
|
||||
raise ResourceError(msg.format(version, max_apk_version))
|
||||
|
||||
def launch_package(self):
|
||||
if not self.activity:
|
||||
output = self.device.execute('am start -W {}'.format(self.package))
|
||||
else:
|
||||
output = self.device.execute('am start -W -n {}/{}'.format(self.package, self.activity))
|
||||
def start_activity(self):
|
||||
output = self.device.execute('am start -W -n {}/{}'.format(self.package, self.activity))
|
||||
if 'Error:' in output:
|
||||
self.device.execute('am force-stop {}'.format(self.package)) # this will dismiss any erro dialogs
|
||||
raise WorkloadError(output)
|
||||
@@ -409,32 +234,23 @@ class ApkWorkload(Workload):
|
||||
|
||||
def reset(self, context): # pylint: disable=W0613
|
||||
self.device.execute('am force-stop {}'.format(self.package))
|
||||
if self.clear_data_on_reset:
|
||||
self.device.execute('pm clear {}'.format(self.package))
|
||||
self.device.execute('pm clear {}'.format(self.package))
|
||||
|
||||
# As of android API level 23, apps can request permissions at runtime,
|
||||
# this will grant all of them so requests do not pop up when running the app
|
||||
# This can also be done less "manually" during adb install using the -g flag
|
||||
if self.device.get_sdk_version() >= 23:
|
||||
if self.device.os_version['sdk'] >= 23:
|
||||
self._grant_requested_permissions()
|
||||
|
||||
def install_apk(self, context, replace=False):
|
||||
success = False
|
||||
if replace and self.device.package_is_installed(self.package):
|
||||
self.device.uninstall(self.package)
|
||||
output = self.device.install_apk(self.apk_file, timeout=self.install_timeout,
|
||||
replace=replace, allow_downgrade=True)
|
||||
def install_apk(self, context):
|
||||
output = self.device.install(self.apk_file, self.install_timeout)
|
||||
if 'Failure' in output:
|
||||
if 'ALREADY_EXISTS' in output:
|
||||
self.logger.warn('Using already installed APK (did not unistall properly?)')
|
||||
self.reset(context)
|
||||
else:
|
||||
raise WorkloadError(output)
|
||||
else:
|
||||
self.logger.debug(output)
|
||||
success = True
|
||||
self.do_post_install(context)
|
||||
return success
|
||||
|
||||
def _grant_requested_permissions(self):
|
||||
dumpsys_output = self.device.execute(command="dumpsys package {}".format(self.package))
|
||||
@@ -447,29 +263,17 @@ class ApkWorkload(Workload):
|
||||
for line in lines:
|
||||
if "android.permission." in line:
|
||||
permissions.append(line.split(":")[0].strip())
|
||||
# Matching either of these means the end of requested permissions section
|
||||
elif "install permissions:" in line or "runtime permissions:" in line:
|
||||
else:
|
||||
break
|
||||
|
||||
for permission in set(permissions):
|
||||
for permission in permissions:
|
||||
# "Normal" Permisions are automatically granted and cannot be changed
|
||||
permission_name = permission.rsplit('.', 1)[1]
|
||||
if permission_name not in ANDROID_NORMAL_PERMISSIONS:
|
||||
# Some permissions are not allowed to be "changed"
|
||||
if permission_name not in ANDROID_UNCHANGEABLE_PERMISSIONS:
|
||||
# On some API 23+ devices, this may fail with a SecurityException
|
||||
# on previously granted permissions. In that case, just skip as it
|
||||
# is not fatal to the workload execution
|
||||
try:
|
||||
self.device.execute("pm grant {} {}".format(self.package, permission))
|
||||
except DeviceError as e:
|
||||
if "changeable permission" in e.message or "Unknown permission" in e.message:
|
||||
self.logger.debug(e)
|
||||
else:
|
||||
raise e
|
||||
self.device.execute("pm grant {} {}".format(self.package, permission))
|
||||
|
||||
def do_post_install(self, context):
|
||||
""" May be overwritten by derived classes."""
|
||||
""" May be overwritten by dervied classes."""
|
||||
pass
|
||||
|
||||
def run(self, context):
|
||||
@@ -477,7 +281,7 @@ class ApkWorkload(Workload):
|
||||
|
||||
def update_result(self, context):
|
||||
self.logcat_log = os.path.join(context.output_directory, 'logcat.log')
|
||||
self.device.dump_logcat(self.logcat_log)
|
||||
context.device_manager.dump_logcat(self.logcat_log)
|
||||
context.add_iteration_artifact(name='logcat',
|
||||
path='logcat.log',
|
||||
kind='log',
|
||||
@@ -488,8 +292,71 @@ class ApkWorkload(Workload):
|
||||
if self.uninstall_apk:
|
||||
self.device.uninstall(self.package)
|
||||
|
||||
|
||||
AndroidBenchmark = ApkWorkload # backward compatibility
|
||||
|
||||
|
||||
class ReventWorkload(Workload):
|
||||
|
||||
default_setup_timeout = 5 * 60 # in seconds
|
||||
default_run_timeout = 10 * 60 # in seconds
|
||||
|
||||
def __init__(self, device, _call_super=True, **kwargs):
|
||||
if _call_super:
|
||||
super(ReventWorkload, self).__init__(device, **kwargs)
|
||||
devpath = self.device.path
|
||||
self.on_device_revent_binary = devpath.join(self.device.binaries_directory, 'revent')
|
||||
self.on_device_setup_revent = devpath.join(self.device.working_directory, '{}.setup.revent'.format(self.device.name))
|
||||
self.on_device_run_revent = devpath.join(self.device.working_directory, '{}.run.revent'.format(self.device.name))
|
||||
self.setup_timeout = kwargs.get('setup_timeout', self.default_setup_timeout)
|
||||
self.run_timeout = kwargs.get('run_timeout', self.default_run_timeout)
|
||||
self.revent_setup_file = None
|
||||
self.revent_run_file = None
|
||||
|
||||
def init_resources(self, context):
|
||||
self.revent_setup_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'setup'))
|
||||
self.revent_run_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'run'))
|
||||
|
||||
def setup(self, context):
|
||||
self._check_revent_files(context)
|
||||
self.device.killall('revent')
|
||||
command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_setup_revent)
|
||||
self.device.execute(command, timeout=self.setup_timeout)
|
||||
|
||||
def run(self, context):
|
||||
command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_run_revent)
|
||||
self.logger.debug('Replaying {}'.format(os.path.basename(self.on_device_run_revent)))
|
||||
self.device.execute(command, timeout=self.run_timeout)
|
||||
self.logger.debug('Replay completed.')
|
||||
|
||||
def update_result(self, context):
|
||||
pass
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.remove(self.on_device_setup_revent)
|
||||
self.device.remove(self.on_device_run_revent)
|
||||
|
||||
def _check_revent_files(self, context):
|
||||
# check the revent binary
|
||||
revent_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent'))
|
||||
if not os.path.isfile(revent_binary):
|
||||
message = '{} does not exist. '.format(revent_binary)
|
||||
message += 'Please build revent for your system and place it in that location'
|
||||
raise WorkloadError(message)
|
||||
if not self.revent_setup_file:
|
||||
# pylint: disable=too-few-format-args
|
||||
message = '{0}.setup.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name)
|
||||
raise WorkloadError(message)
|
||||
if not self.revent_run_file:
|
||||
# pylint: disable=too-few-format-args
|
||||
message = '{0}.run.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name)
|
||||
raise WorkloadError(message)
|
||||
|
||||
self.on_device_revent_binary = self.device.install_executable(revent_binary)
|
||||
self.device.push(self.revent_run_file, self.on_device_run_revent)
|
||||
self.device.push(self.revent_setup_file, self.on_device_setup_revent)
|
||||
|
||||
|
||||
class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark):
|
||||
|
||||
supported_platforms = ['android']
|
||||
@@ -498,11 +365,6 @@ class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark):
|
||||
UiAutomatorWorkload.__init__(self, device, **kwargs)
|
||||
AndroidBenchmark.__init__(self, device, _call_super=False, **kwargs)
|
||||
|
||||
def initialize(self, context):
|
||||
UiAutomatorWorkload.initialize(self, context)
|
||||
AndroidBenchmark.initialize(self, context)
|
||||
self._check_unsupported_packages()
|
||||
|
||||
def init_resources(self, context):
|
||||
UiAutomatorWorkload.init_resources(self, context)
|
||||
AndroidBenchmark.init_resources(self, context)
|
||||
@@ -519,98 +381,6 @@ class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark):
|
||||
UiAutomatorWorkload.teardown(self, context)
|
||||
AndroidBenchmark.teardown(self, context)
|
||||
|
||||
def _check_unsupported_packages(self):
|
||||
"""
|
||||
Check for any unsupported package versions and raise an
|
||||
exception if detected.
|
||||
|
||||
"""
|
||||
for package in UNSUPPORTED_PACKAGES:
|
||||
version = self.device.get_installed_package_version(package)
|
||||
if version is None:
|
||||
continue
|
||||
|
||||
if '-' in version:
|
||||
version = version.split('-')[0] # ignore abi version
|
||||
|
||||
if version in UNSUPPORTED_PACKAGES[package]:
|
||||
message = 'This workload does not support version "{}" of package "{}"'
|
||||
raise WorkloadError(message.format(version, package))
|
||||
|
||||
|
||||
class AndroidUxPerfWorkloadMeta(ExtensionMeta):
|
||||
to_propagate = ExtensionMeta.to_propagate + [('deployable_assets', str, ListCollection)]
|
||||
|
||||
|
||||
class AndroidUxPerfWorkload(AndroidUiAutoBenchmark):
|
||||
__metaclass__ = AndroidUxPerfWorkloadMeta
|
||||
|
||||
deployable_assets = []
|
||||
parameters = [
|
||||
Parameter('markers_enabled', kind=bool, default=False,
|
||||
description="""
|
||||
If ``True``, UX_PERF action markers will be emitted to logcat during
|
||||
the test run.
|
||||
"""),
|
||||
Parameter('clean_assets', kind=bool, default=False,
|
||||
description="""
|
||||
If ``True`` pushed assets will be deleted at the end of each iteration
|
||||
"""),
|
||||
Parameter('force_push_assets', kind=bool, default=False,
|
||||
description="""
|
||||
If ``True`` always push assets on each iteration, even if the
|
||||
assets already exists in the device path
|
||||
"""),
|
||||
]
|
||||
|
||||
def _path_on_device(self, fpath, dirname=None):
|
||||
if dirname is None:
|
||||
dirname = self.device.working_directory
|
||||
fname = os.path.basename(fpath)
|
||||
return self.device.path.join(dirname, fname)
|
||||
|
||||
def push_assets(self, context):
|
||||
pushed = False
|
||||
file_list = []
|
||||
for f in self.deployable_assets:
|
||||
fpath = context.resolver.get(File(self, f))
|
||||
device_path = self._path_on_device(fpath)
|
||||
if self.force_push_assets or not self.device.file_exists(device_path):
|
||||
self.device.push_file(fpath, device_path, timeout=300)
|
||||
file_list.append(device_path)
|
||||
pushed = True
|
||||
if pushed:
|
||||
self.device.refresh_device_files(file_list)
|
||||
|
||||
def delete_assets(self):
|
||||
if self.deployable_assets:
|
||||
file_list = []
|
||||
for f in self.deployable_assets:
|
||||
f = self._path_on_device(f)
|
||||
self.device.delete_file(f)
|
||||
file_list.append(f)
|
||||
self.device.delete_file(f)
|
||||
self.device.refresh_device_files(file_list)
|
||||
|
||||
def __init__(self, device, **kwargs):
|
||||
super(AndroidUxPerfWorkload, self).__init__(device, **kwargs)
|
||||
# Turn class attribute into instance attribute
|
||||
self.deployable_assets = list(self.deployable_assets)
|
||||
|
||||
def validate(self):
|
||||
super(AndroidUxPerfWorkload, self).validate()
|
||||
self.uiauto_params['package_name'] = self.package
|
||||
self.uiauto_params['markers_enabled'] = self.markers_enabled
|
||||
|
||||
def setup(self, context):
|
||||
super(AndroidUxPerfWorkload, self).setup(context)
|
||||
self.push_assets(context)
|
||||
|
||||
def teardown(self, context):
|
||||
super(AndroidUxPerfWorkload, self).teardown(context)
|
||||
if self.clean_assets:
|
||||
self.delete_assets()
|
||||
|
||||
|
||||
class GameWorkload(ApkWorkload, ReventWorkload):
|
||||
"""
|
||||
@@ -644,22 +414,21 @@ class GameWorkload(ApkWorkload, ReventWorkload):
|
||||
view = 'SurfaceView'
|
||||
loading_time = 10
|
||||
supported_platforms = ['android']
|
||||
setup_required = True
|
||||
|
||||
parameters = [
|
||||
Parameter('install_timeout', default=500, override=True),
|
||||
Parameter('check_states', kind=bool, default=False, global_alias='check_game_states',
|
||||
description="""Use visual state detection to verify the state of the workload
|
||||
after setup and run"""),
|
||||
Parameter('assets_push_timeout', kind=int, default=500,
|
||||
description='Timeout used during deployment of the assets package (if there is one).'),
|
||||
Parameter('clear_data_on_reset', kind=bool, default=True,
|
||||
description="""
|
||||
If set to ``False``, this will prevent WA from clearing package
|
||||
data for this workload prior to running it.
|
||||
"""),
|
||||
]
|
||||
|
||||
def __init__(self, device, **kwargs): # pylint: disable=W0613
|
||||
ApkWorkload.__init__(self, device, **kwargs)
|
||||
ReventWorkload.__init__(self, device, _call_super=False, **kwargs)
|
||||
if self.check_states:
|
||||
state_detector.check_match_state_dependencies()
|
||||
self.logcat_process = None
|
||||
self.module_dir = os.path.dirname(sys.modules[self.__module__].__file__)
|
||||
self.revent_dir = os.path.join(self.module_dir, 'revent_files')
|
||||
@@ -667,8 +436,6 @@ class GameWorkload(ApkWorkload, ReventWorkload):
|
||||
def init_resources(self, context):
|
||||
ApkWorkload.init_resources(self, context)
|
||||
ReventWorkload.init_resources(self, context)
|
||||
if self.check_states:
|
||||
self._check_statedetection_files(context)
|
||||
|
||||
def setup(self, context):
|
||||
ApkWorkload.setup(self, context)
|
||||
@@ -676,10 +443,6 @@ class GameWorkload(ApkWorkload, ReventWorkload):
|
||||
time.sleep(self.loading_time)
|
||||
ReventWorkload.setup(self, context)
|
||||
|
||||
# state detection check if it's enabled in the config
|
||||
if self.check_states:
|
||||
self.check_state(context, "setup_complete")
|
||||
|
||||
def do_post_install(self, context):
|
||||
ApkWorkload.do_post_install(self, context)
|
||||
self._deploy_assets(context, self.assets_push_timeout)
|
||||
@@ -699,10 +462,6 @@ class GameWorkload(ApkWorkload, ReventWorkload):
|
||||
ReventWorkload.run(self, context)
|
||||
|
||||
def teardown(self, context):
|
||||
# state detection check if it's enabled in the config
|
||||
if self.check_states:
|
||||
self.check_state(context, "run_complete")
|
||||
|
||||
if not self.saved_state_file:
|
||||
ApkWorkload.teardown(self, context)
|
||||
else:
|
||||
@@ -727,29 +486,10 @@ class GameWorkload(ApkWorkload, ReventWorkload):
|
||||
raise WorkloadError(message.format(resource_file, self.name))
|
||||
# adb push will create intermediate directories if they don't
|
||||
# exist.
|
||||
self.device.push_file(asset_tarball, ondevice_cache, timeout=timeout)
|
||||
self.device.push(asset_tarball, ondevice_cache, timeout=timeout)
|
||||
|
||||
device_asset_directory = self.device.path.join(self.device.external_storage_directory, 'Android', kind)
|
||||
device_asset_directory = self.device.path.join(self.context.device_manager.external_storage_directory, 'Android', kind)
|
||||
deploy_command = 'cd {} && {} tar -xzf {}'.format(device_asset_directory,
|
||||
self.device.busybox,
|
||||
ondevice_cache)
|
||||
self.device.execute(deploy_command, timeout=timeout, as_root=True)
|
||||
|
||||
def _check_statedetection_files(self, context):
|
||||
try:
|
||||
self.statedefs_dir = context.resolver.get(File(self, 'state_definitions'))
|
||||
except ResourceError:
|
||||
self.logger.warning("State definitions directory not found. Disabling state detection.")
|
||||
self.check_states = False # pylint: disable=W0201
|
||||
|
||||
def check_state(self, context, phase):
|
||||
try:
|
||||
self.logger.info("\tChecking workload state...")
|
||||
screenshotPath = os.path.join(context.output_directory, "screen.png")
|
||||
self.device.capture_screen(screenshotPath)
|
||||
stateCheck = state_detector.verify_state(screenshotPath, self.statedefs_dir, phase)
|
||||
if not stateCheck:
|
||||
raise WorkloadError("Unexpected state after setup")
|
||||
except state_detector.StateDefinitionError as e:
|
||||
msg = "State definitions or template files missing or invalid ({}). Skipping state detection."
|
||||
self.logger.warning(msg.format(e.message))
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -12,3 +12,5 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
|
@@ -1,684 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Original implementation by Rene de Jong. Updated by Sascha Bischoff.
|
||||
|
||||
# pylint: disable=E1101
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import tarfile
|
||||
import time
|
||||
from pexpect import EOF, TIMEOUT, pxssh
|
||||
|
||||
from wlauto import settings, Parameter
|
||||
from wlauto.core.resource import NO_ONE
|
||||
from wlauto.common.resources import Executable
|
||||
from wlauto.core import signal as sig
|
||||
from wlauto.exceptions import DeviceError
|
||||
from wlauto.utils import ssh, types
|
||||
|
||||
|
||||
class BaseGem5Device(object):
|
||||
"""
|
||||
Base implementation for a gem5-based device
|
||||
|
||||
This class is used as the base class for OS-specific devices such as the
|
||||
G3m5LinuxDevice and the Gem5AndroidDevice. The majority of the gem5-specific
|
||||
functionality is included here.
|
||||
|
||||
Note: When inheriting from this class, make sure to inherit from this class
|
||||
prior to inheriting from the OS-specific class, i.e. LinuxDevice, to ensure
|
||||
that the methods are correctly overridden.
|
||||
"""
|
||||
# gem5 can be very slow. Hence, we use some very long timeouts!
|
||||
delay = 3600
|
||||
long_delay = 3 * delay
|
||||
ready_timeout = long_delay
|
||||
default_timeout = delay
|
||||
|
||||
platform = None
|
||||
path_module = 'posixpath'
|
||||
|
||||
parameters = [
|
||||
Parameter('gem5_binary', kind=str, default='./build/ARM/gem5.fast',
|
||||
mandatory=False, description="Command used to execute gem5. "
|
||||
"Adjust according to needs."),
|
||||
Parameter('gem5_args', kind=types.arguments, mandatory=True,
|
||||
description="Command line passed to the gem5 simulation. This"
|
||||
" command line is used to set up the simulated system, and "
|
||||
"should be the same as used for a standard gem5 simulation "
|
||||
"without workload automation. Note that this is simulation "
|
||||
"script specific and will hence need to be tailored to each "
|
||||
"particular use case."),
|
||||
Parameter('gem5_vio_args', kind=types.arguments, mandatory=True,
|
||||
constraint=lambda x: "{}" in str(x),
|
||||
description="gem5 VirtIO command line used to enable the "
|
||||
"VirtIO device in the simulated system. At the very least, "
|
||||
"the root parameter of the VirtIO9PDiod device must be "
|
||||
"exposed on the command line. Please set this root mount to "
|
||||
"{}, as it will be replaced with the directory used by "
|
||||
"Workload Automation at runtime."),
|
||||
Parameter('temp_dir', kind=str, default='/tmp',
|
||||
description="Temporary directory used to pass files into the "
|
||||
"gem5 simulation. Workload Automation will automatically "
|
||||
"create a directory in this folder, and will remove it again "
|
||||
"once the simulation completes."),
|
||||
Parameter('checkpoint', kind=bool, default=False,
|
||||
mandatory=False, description="This parameter "
|
||||
"tells Workload Automation to create a checkpoint of the "
|
||||
"simulated system once the guest system has finished booting."
|
||||
" This checkpoint can then be used at a later stage by other "
|
||||
"WA runs to avoid booting the guest system a second time. Set"
|
||||
" to True to take a checkpoint of the simulated system post "
|
||||
"boot."),
|
||||
Parameter('run_delay', kind=int, default=0, mandatory=False,
|
||||
constraint=lambda x: x >= 0,
|
||||
description="This sets the time that the "
|
||||
"system should sleep in the simulated system prior to "
|
||||
"running and workloads or taking checkpoints. This allows "
|
||||
"the system to quieten down prior to running the workloads. "
|
||||
"When this is combined with the checkpoint_post_boot"
|
||||
" option, it allows the checkpoint to be created post-sleep,"
|
||||
" and therefore the set of workloads resuming from this "
|
||||
"checkpoint will not be required to sleep.")
|
||||
]
|
||||
|
||||
@property
|
||||
def is_rooted(self): # pylint: disable=R0201
|
||||
# gem5 is always rooted
|
||||
return True
|
||||
|
||||
# pylint: disable=E0203
|
||||
def __init__(self):
|
||||
self.logger = logging.getLogger('gem5Device')
|
||||
|
||||
# The gem5 subprocess
|
||||
self.gem5 = None
|
||||
self.gem5_port = -1
|
||||
self.gem5outdir = os.path.join(settings.output_directory, "gem5")
|
||||
self.m5_path = 'm5'
|
||||
self.stdout_file = None
|
||||
self.stderr_file = None
|
||||
self.stderr_filename = None
|
||||
self.sckt = None
|
||||
|
||||
# Find the first one that does not exist. Ensures that we do not re-use
|
||||
# the directory used by someone else.
|
||||
for i in xrange(sys.maxint):
|
||||
directory = os.path.join(self.temp_dir, "wa_{}".format(i))
|
||||
try:
|
||||
os.stat(directory)
|
||||
continue
|
||||
except OSError:
|
||||
break
|
||||
self.temp_dir = directory
|
||||
self.logger.debug("Using {} as the temporary directory.".format(self.temp_dir))
|
||||
|
||||
# Start the gem5 simulation when WA starts a run using a signal.
|
||||
sig.connect(self.init_gem5, sig.RUN_START)
|
||||
|
||||
def validate(self):
|
||||
# Assemble the virtio args
|
||||
self.gem5_vio_args = str(self.gem5_vio_args).format(self.temp_dir) # pylint: disable=W0201
|
||||
self.logger.debug("gem5 VirtIO command: {}".format(self.gem5_vio_args))
|
||||
|
||||
def init_gem5(self, _):
|
||||
"""
|
||||
Start gem5, find out the telnet port and connect to the simulation.
|
||||
|
||||
We first create the temporary directory used by VirtIO to pass files
|
||||
into the simulation, as well as the gem5 output directory.We then create
|
||||
files for the standard output and error for the gem5 process. The gem5
|
||||
process then is started.
|
||||
"""
|
||||
self.logger.info("Creating temporary directory: {}".format(self.temp_dir))
|
||||
os.mkdir(self.temp_dir)
|
||||
os.mkdir(self.gem5outdir)
|
||||
|
||||
# We need to redirect the standard output and standard error for the
|
||||
# gem5 process to a file so that we can debug when things go wrong.
|
||||
f = os.path.join(self.gem5outdir, 'stdout')
|
||||
self.stdout_file = open(f, 'w')
|
||||
f = os.path.join(self.gem5outdir, 'stderr')
|
||||
self.stderr_file = open(f, 'w')
|
||||
# We need to keep this so we can check which port to use for the telnet
|
||||
# connection.
|
||||
self.stderr_filename = f
|
||||
|
||||
self.start_gem5()
|
||||
|
||||
def start_gem5(self):
|
||||
"""
|
||||
Starts the gem5 simulator, and parses the output to get the telnet port.
|
||||
"""
|
||||
self.logger.info("Starting the gem5 simulator")
|
||||
|
||||
command_line = "{} --outdir={}/gem5 {} {}".format(self.gem5_binary,
|
||||
settings.output_directory,
|
||||
self.gem5_args,
|
||||
self.gem5_vio_args)
|
||||
self.logger.debug("gem5 command line: {}".format(command_line))
|
||||
self.gem5 = subprocess.Popen(command_line.split(),
|
||||
stdout=self.stdout_file,
|
||||
stderr=self.stderr_file)
|
||||
|
||||
while self.gem5_port == -1:
|
||||
# Check that gem5 is running!
|
||||
if self.gem5.poll():
|
||||
raise DeviceError("The gem5 process has crashed with error code {}!".format(self.gem5.poll()))
|
||||
|
||||
# Open the stderr file
|
||||
f = open(self.stderr_filename, 'r')
|
||||
for line in f:
|
||||
m = re.search(r"Listening\ for\ system\ connection\ on\ port\ (?P<port>\d+)", line)
|
||||
if m:
|
||||
port = int(m.group('port'))
|
||||
if port >= 3456 and port < 5900:
|
||||
self.gem5_port = port
|
||||
f.close()
|
||||
break
|
||||
else:
|
||||
time.sleep(1)
|
||||
f.close()
|
||||
|
||||
def connect(self): # pylint: disable=R0912,W0201
|
||||
"""
|
||||
Connect to the gem5 simulation and wait for Android to boot. Then,
|
||||
create checkpoints, and mount the VirtIO device.
|
||||
"""
|
||||
self.connect_gem5()
|
||||
|
||||
self.wait_for_boot()
|
||||
|
||||
if self.run_delay:
|
||||
self.logger.info("Sleeping for {} seconds in the guest".format(self.run_delay))
|
||||
self.gem5_shell("sleep {}".format(self.run_delay))
|
||||
|
||||
if self.checkpoint:
|
||||
self.checkpoint_gem5()
|
||||
|
||||
self.mount_virtio()
|
||||
self.logger.info("Creating the working directory in the simulated system")
|
||||
self.gem5_shell('mkdir -p {}'.format(self.working_directory))
|
||||
self._is_ready = True # pylint: disable=W0201
|
||||
|
||||
def wait_for_boot(self):
|
||||
pass
|
||||
|
||||
def connect_gem5(self): # pylint: disable=R0912
|
||||
"""
|
||||
Connect to the telnet port of the gem5 simulation.
|
||||
|
||||
We connect, and wait for the prompt to be found. We do not use a timeout
|
||||
for this, and wait for the prompt in a while loop as the gem5 simulation
|
||||
can take many hours to reach a prompt when booting the system. We also
|
||||
inject some newlines periodically to try and force gem5 to show a
|
||||
prompt. Once the prompt has been found, we replace it with a unique
|
||||
prompt to ensure that we are able to match it properly. We also disable
|
||||
the echo as this simplifies parsing the output when executing commands
|
||||
on the device.
|
||||
"""
|
||||
self.logger.info("Connecting to the gem5 simulation on port {}".format(self.gem5_port))
|
||||
host = socket.gethostname()
|
||||
port = self.gem5_port
|
||||
|
||||
# Connect to the gem5 telnet port. Use a short timeout here.
|
||||
attempts = 0
|
||||
while attempts < 10:
|
||||
attempts += 1
|
||||
try:
|
||||
self.sckt = ssh.TelnetConnection()
|
||||
self.sckt.login(host, 'None', port=port, auto_prompt_reset=False,
|
||||
login_timeout=10)
|
||||
break
|
||||
except pxssh.ExceptionPxssh:
|
||||
pass
|
||||
else:
|
||||
self.gem5.kill()
|
||||
raise DeviceError("Failed to connect to the gem5 telnet session.")
|
||||
|
||||
self.logger.info("Connected! Waiting for prompt...")
|
||||
|
||||
# We need to find the prompt. It might be different if we are resuming
|
||||
# from a checkpoint. Therefore, we test multiple options here.
|
||||
prompt_found = False
|
||||
while not prompt_found:
|
||||
try:
|
||||
self.login_to_device()
|
||||
except TIMEOUT:
|
||||
pass
|
||||
try:
|
||||
# Try and force a prompt to be shown
|
||||
self.sckt.send('\n')
|
||||
self.sckt.expect([r'# ', self.sckt.UNIQUE_PROMPT, r'\[PEXPECT\][\\\$\#]+ '], timeout=60)
|
||||
prompt_found = True
|
||||
except TIMEOUT:
|
||||
pass
|
||||
|
||||
self.logger.info("Setting unique prompt...")
|
||||
|
||||
self.sckt.set_unique_prompt()
|
||||
self.sckt.prompt()
|
||||
self.logger.info("Prompt found and replaced with a unique string")
|
||||
|
||||
# We check that the prompt is what we think it should be. If not, we
|
||||
# need to update the regex we use to match.
|
||||
self.find_prompt()
|
||||
|
||||
self.sckt.setecho(False)
|
||||
self.sync_gem5_shell()
|
||||
self.resize_shell()
|
||||
|
||||
def get_properties(self, context): # pylint: disable=R0801
|
||||
""" Get the property files from the device """
|
||||
for propfile in self.property_files:
|
||||
try:
|
||||
normname = propfile.lstrip(self.path.sep).replace(self.path.sep, '.')
|
||||
outfile = os.path.join(context.host_working_directory, normname)
|
||||
if self.is_file(propfile):
|
||||
self.execute('cat {} > {}'.format(propfile, normname))
|
||||
self.pull_file(normname, outfile)
|
||||
elif self.is_directory(propfile):
|
||||
self.get_directory(context, propfile)
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
except DeviceError:
|
||||
# We pull these files "opportunistically", so if a pull fails
|
||||
# (e.g. we don't have permissions to read the file), just note
|
||||
# it quietly (not as an error/warning) and move on.
|
||||
self.logger.debug('Could not pull property file "{}"'.format(propfile))
|
||||
return {}
|
||||
|
||||
def get_directory(self, context, directory):
|
||||
""" Pull a directory from the device """
|
||||
normname = directory.lstrip(self.path.sep).replace(self.path.sep, '.')
|
||||
outdir = os.path.join(context.host_working_directory, normname)
|
||||
temp_file = os.path.join(context.host_working_directory, "{}.tar".format(normname))
|
||||
# Check that the folder exists
|
||||
self.gem5_shell("ls -la {}".format(directory))
|
||||
# Compress the folder
|
||||
try:
|
||||
self.gem5_shell("{} tar -cvf {}.tar {}".format(self.busybox, normname, directory))
|
||||
except DeviceError:
|
||||
self.logger.debug("Failed to run tar command on device! Not pulling {}".format(directory))
|
||||
return
|
||||
self.pull_file(normname, temp_file)
|
||||
f = tarfile.open(temp_file, 'r')
|
||||
os.mkdir(outdir)
|
||||
f.extractall(outdir)
|
||||
os.remove(temp_file)
|
||||
|
||||
def get_pids_of(self, process_name):
|
||||
""" Returns a list of PIDs of all processes with the specified name. """
|
||||
result = self.gem5_shell('ps | {} grep {}'.format(self.busybox, process_name),
|
||||
check_exit_code=False).strip()
|
||||
if result and 'not found' not in result and len(result.split('\n')) > 2:
|
||||
return [int(x.split()[1]) for x in result.split('\n')]
|
||||
else:
|
||||
return []
|
||||
|
||||
def find_prompt(self):
|
||||
prompt = r'\[PEXPECT\][\\\$\#]+ '
|
||||
synced = False
|
||||
while not synced:
|
||||
self.sckt.send('\n')
|
||||
i = self.sckt.expect([prompt, self.sckt.UNIQUE_PROMPT, r'[\$\#] '], timeout=self.delay)
|
||||
if i == 0:
|
||||
synced = True
|
||||
elif i == 1:
|
||||
prompt = self.sckt.UNIQUE_PROMPT
|
||||
synced = True
|
||||
else:
|
||||
prompt = re.sub(r'\$', r'\\\$', self.sckt.before.strip() + self.sckt.after.strip())
|
||||
prompt = re.sub(r'\#', r'\\\#', prompt)
|
||||
prompt = re.sub(r'\[', r'\[', prompt)
|
||||
prompt = re.sub(r'\]', r'\]', prompt)
|
||||
|
||||
self.sckt.PROMPT = prompt
|
||||
|
||||
def close(self):
|
||||
if self._logcat_poller:
|
||||
self._logcat_poller.stop()
|
||||
|
||||
def reset(self):
|
||||
self.logger.warn("Attempt to restart the gem5 device. This is not "
|
||||
"supported!")
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def push_file(self, source, dest, **kwargs):
|
||||
"""
|
||||
Push a file to the gem5 device using VirtIO
|
||||
|
||||
The file to push to the device is copied to the temporary directory on
|
||||
the host, before being copied within the simulation to the destination.
|
||||
Checks, in the form of 'ls' with error code checking, are performed to
|
||||
ensure that the file is copied to the destination.
|
||||
"""
|
||||
filename = os.path.basename(source)
|
||||
self.logger.debug("Pushing {} to device.".format(source))
|
||||
self.logger.debug("temp_dir: {}".format(self.temp_dir))
|
||||
self.logger.debug("dest: {}".format(dest))
|
||||
self.logger.debug("filename: {}".format(filename))
|
||||
|
||||
# We need to copy the file to copy to the temporary directory
|
||||
self.move_to_temp_dir(source)
|
||||
|
||||
# Back to the gem5 world
|
||||
self.gem5_shell("ls -al /mnt/obb/{}".format(filename))
|
||||
if self.busybox:
|
||||
self.gem5_shell("{} cp /mnt/obb/{} {}".format(self.busybox, filename, dest))
|
||||
else:
|
||||
self.gem5_shell("cat /mnt/obb/{} > {}".format(filename, dest))
|
||||
self.gem5_shell("sync")
|
||||
self.gem5_shell("ls -al {}".format(dest))
|
||||
self.gem5_shell("ls -al /mnt/obb/")
|
||||
self.logger.debug("Push complete.")
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def pull_file(self, source, dest, **kwargs):
|
||||
"""
|
||||
Pull a file from the gem5 device using m5 writefile
|
||||
|
||||
The file is copied to the local directory within the guest as the m5
|
||||
writefile command assumes that the file is local. The file is then
|
||||
written out to the host system using writefile, prior to being moved to
|
||||
the destination on the host.
|
||||
"""
|
||||
filename = os.path.basename(source)
|
||||
|
||||
self.logger.debug("pull_file {} {}".format(source, filename))
|
||||
# We don't check the exit code here because it is non-zero if the source
|
||||
# and destination are the same. The ls below will cause an error if the
|
||||
# file was not where we expected it to be.
|
||||
self.gem5_shell("{} cp {} {}".format(self.busybox, source, filename),
|
||||
check_exit_code=False)
|
||||
self.gem5_shell("sync")
|
||||
self.gem5_shell("ls -la {}".format(filename))
|
||||
self.logger.debug('Finished the copy in the simulator')
|
||||
self.gem5_util("writefile {}".format(filename))
|
||||
|
||||
if 'cpu' not in filename:
|
||||
while not os.path.exists(os.path.join(self.gem5outdir, filename)):
|
||||
time.sleep(1)
|
||||
|
||||
# Perform the local move
|
||||
shutil.move(os.path.join(self.gem5outdir, filename), dest)
|
||||
self.logger.debug("Pull complete.")
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def delete_file(self, filepath, **kwargs):
|
||||
""" Delete a file on the device """
|
||||
self._check_ready()
|
||||
self.gem5_shell("rm '{}'".format(filepath))
|
||||
|
||||
def file_exists(self, filepath):
|
||||
""" Check if a file exists """
|
||||
self._check_ready()
|
||||
output = self.gem5_shell('if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
|
||||
try:
|
||||
if int(output):
|
||||
return True
|
||||
except ValueError:
|
||||
# If we cannot process the output, assume that there is no file
|
||||
pass
|
||||
return False
|
||||
|
||||
def disconnect(self):
|
||||
"""
|
||||
Close and disconnect from the gem5 simulation. Additionally, we remove
|
||||
the temporary directory used to pass files into the simulation.
|
||||
"""
|
||||
self.logger.info("Gracefully terminating the gem5 simulation.")
|
||||
try:
|
||||
self.gem5_util("exit")
|
||||
self.gem5.wait()
|
||||
except EOF:
|
||||
pass
|
||||
self.logger.info("Removing the temporary directory")
|
||||
try:
|
||||
shutil.rmtree(self.temp_dir)
|
||||
except OSError:
|
||||
self.logger.warn("Failed to remove the temporary directory!")
|
||||
|
||||
# gem5 might be slow. Hence, we need to make the ping timeout very long.
|
||||
def ping(self):
|
||||
self.logger.debug("Pinging gem5 to see if it is still alive")
|
||||
self.gem5_shell('ls /', timeout=self.long_delay)
|
||||
|
||||
# Additional Android-specific methods.
|
||||
def forward_port(self, _): # pylint: disable=R0201
|
||||
raise DeviceError('we do not need forwarding')
|
||||
|
||||
# gem5 should dump out a framebuffer. We can use this if it exists. Failing
|
||||
# that, fall back to the parent class implementation.
|
||||
def capture_screen(self, filepath):
|
||||
file_list = os.listdir(self.gem5outdir)
|
||||
screen_caps = []
|
||||
for f in file_list:
|
||||
if '.bmp' in f:
|
||||
screen_caps.append(f)
|
||||
|
||||
if len(screen_caps) == 1:
|
||||
# Bail out if we do not have image, and resort to the slower, built
|
||||
# in method.
|
||||
try:
|
||||
import Image
|
||||
gem5_image = os.path.join(self.gem5outdir, screen_caps[0])
|
||||
temp_image = os.path.join(self.gem5outdir, "file.png")
|
||||
im = Image.open(gem5_image)
|
||||
im.save(temp_image, "PNG")
|
||||
shutil.copy(temp_image, filepath)
|
||||
os.remove(temp_image)
|
||||
self.logger.debug("capture_screen: using gem5 screencap")
|
||||
return True
|
||||
except (shutil.Error, ImportError, IOError):
|
||||
pass
|
||||
return False
|
||||
|
||||
# pylint: disable=W0613
|
||||
def execute(self, command, timeout=1000, check_exit_code=True, background=False,
|
||||
as_root=False, busybox=False, **kwargs):
|
||||
self._check_ready()
|
||||
if as_root and not self.is_rooted:
|
||||
raise DeviceError('Attempting to execute "{}" as root on unrooted device.'.format(command))
|
||||
if busybox:
|
||||
if not self.is_rooted:
|
||||
raise DeviceError('Attempting to execute "{}" with busybox. '.format(command) +
|
||||
'Busybox can only be deployed to rooted devices.')
|
||||
command = ' '.join([self.busybox, command])
|
||||
if background:
|
||||
self.logger.debug("Attempt to execute in background. Not supported "
|
||||
"in gem5, hence ignored.")
|
||||
return self.gem5_shell(command, as_root=as_root)
|
||||
|
||||
# Internal methods: do not use outside of the class.
|
||||
|
||||
def _check_ready(self):
|
||||
"""
|
||||
Check if the device is ready.
|
||||
|
||||
As this is gem5, we just assume that the device is ready once we have
|
||||
connected to the gem5 simulation, and updated the prompt.
|
||||
"""
|
||||
if not self._is_ready:
|
||||
raise DeviceError('Device not ready.')
|
||||
|
||||
def gem5_shell(self, command, as_root=False, timeout=None, check_exit_code=True, sync=True): # pylint: disable=R0912
|
||||
"""
|
||||
Execute a command in the gem5 shell
|
||||
|
||||
This wraps the telnet connection to gem5 and processes the raw output.
|
||||
|
||||
This method waits for the shell to return, and then will try and
|
||||
separate the output from the command from the command itself. If this
|
||||
fails, warn, but continue with the potentially wrong output.
|
||||
|
||||
The exit code is also checked by default, and non-zero exit codes will
|
||||
raise a DeviceError.
|
||||
"""
|
||||
conn = self.sckt
|
||||
if sync:
|
||||
self.sync_gem5_shell()
|
||||
|
||||
self.logger.debug("gem5_shell command: {}".format(command))
|
||||
|
||||
# Send the actual command
|
||||
conn.send("{}\n".format(command))
|
||||
|
||||
# Wait for the response. We just sit here and wait for the prompt to
|
||||
# appear, as gem5 might take a long time to provide the output. This
|
||||
# avoids timeout issues.
|
||||
command_index = -1
|
||||
while command_index == -1:
|
||||
if conn.prompt():
|
||||
output = re.sub(r' \r([^\n])', r'\1', conn.before)
|
||||
output = re.sub(r'[\b]', r'', output)
|
||||
# Deal with line wrapping
|
||||
output = re.sub(r'[\r].+?<', r'', output)
|
||||
command_index = output.find(command)
|
||||
|
||||
# If we have -1, then we cannot match the command, but the
|
||||
# prompt has returned. Hence, we have a bit of an issue. We
|
||||
# warn, and return the whole output.
|
||||
if command_index == -1:
|
||||
self.logger.warn("gem5_shell: Unable to match command in "
|
||||
"command output. Expect parsing errors!")
|
||||
command_index = 0
|
||||
|
||||
output = output[command_index + len(command):].strip()
|
||||
|
||||
# It is possible that gem5 will echo the command. Therefore, we need to
|
||||
# remove that too!
|
||||
command_index = output.find(command)
|
||||
if command_index != -1:
|
||||
output = output[command_index + len(command):].strip()
|
||||
|
||||
self.logger.debug("gem5_shell output: {}".format(output))
|
||||
|
||||
# We get a second prompt. Hence, we need to eat one to make sure that we
|
||||
# stay in sync. If we do not do this, we risk getting out of sync for
|
||||
# slower simulations.
|
||||
self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay)
|
||||
|
||||
if check_exit_code:
|
||||
exit_code_text = self.gem5_shell('echo $?', as_root=as_root,
|
||||
timeout=timeout, check_exit_code=False,
|
||||
sync=False)
|
||||
try:
|
||||
exit_code = int(exit_code_text.split()[0])
|
||||
if exit_code:
|
||||
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'
|
||||
raise DeviceError(message.format(exit_code, command, output))
|
||||
except (ValueError, IndexError):
|
||||
self.logger.warning('Could not get exit code for "{}",\ngot: "{}"'.format(command, exit_code_text))
|
||||
|
||||
return output
|
||||
|
||||
def gem5_util(self, command):
|
||||
""" Execute a gem5 utility command using the m5 binary on the device """
|
||||
self.gem5_shell('{} {}'.format(self.m5_path, command))
|
||||
|
||||
def sync_gem5_shell(self):
|
||||
"""
|
||||
Synchronise with the gem5 shell.
|
||||
|
||||
Write some unique text to the gem5 device to allow us to synchronise
|
||||
with the shell output. We actually get two prompts so we need to match
|
||||
both of these.
|
||||
"""
|
||||
self.logger.debug("Sending Sync")
|
||||
self.sckt.send("echo \*\*sync\*\*\n")
|
||||
self.sckt.expect(r"\*\*sync\*\*", timeout=self.delay)
|
||||
self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay)
|
||||
self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay)
|
||||
|
||||
def resize_shell(self):
|
||||
"""
|
||||
Resize the shell to avoid line wrapping issues.
|
||||
|
||||
"""
|
||||
# Try and avoid line wrapping as much as possible. Don't check the error
|
||||
# codes from these command because some of them WILL fail.
|
||||
self.gem5_shell('stty columns 1024', check_exit_code=False)
|
||||
self.gem5_shell('{} stty columns 1024'.format(self.busybox), check_exit_code=False)
|
||||
self.gem5_shell('stty cols 1024', check_exit_code=False)
|
||||
self.gem5_shell('{} stty cols 1024'.format(self.busybox), check_exit_code=False)
|
||||
self.gem5_shell('reset', check_exit_code=False)
|
||||
|
||||
def move_to_temp_dir(self, source):
|
||||
"""
|
||||
Move a file to the temporary directory on the host for copying to the
|
||||
gem5 device
|
||||
"""
|
||||
command = "cp {} {}".format(source, self.temp_dir)
|
||||
self.logger.debug("Local copy command: {}".format(command))
|
||||
subprocess.call(command.split())
|
||||
subprocess.call("sync".split())
|
||||
|
||||
def checkpoint_gem5(self, end_simulation=False):
|
||||
""" Checkpoint the gem5 simulation, storing all system state """
|
||||
self.logger.info("Taking a post-boot checkpoint")
|
||||
self.gem5_util("checkpoint")
|
||||
if end_simulation:
|
||||
self.disconnect()
|
||||
|
||||
def mount_virtio(self):
|
||||
"""
|
||||
Mount the VirtIO device in the simulated system.
|
||||
"""
|
||||
self.logger.info("Mounting VirtIO device in simulated system")
|
||||
|
||||
self.gem5_shell('mkdir -p /mnt/obb')
|
||||
|
||||
mount_command = "mount -t 9p -o trans=virtio,version=9p2000.L,aname={} gem5 /mnt/obb".format(self.temp_dir)
|
||||
self.gem5_shell(mount_command)
|
||||
|
||||
def deploy_m5(self, context, force=False):
|
||||
"""
|
||||
Deploys the m5 binary to the device and returns the path to the binary
|
||||
on the device.
|
||||
|
||||
:param force: by default, if the binary is already present on the
|
||||
device, it will not be deployed again. Setting force to
|
||||
``True`` overrides that behaviour and ensures that the
|
||||
binary is always copied. Defaults to ``False``.
|
||||
|
||||
:returns: The on-device path to the m5 binary.
|
||||
|
||||
"""
|
||||
on_device_executable = self.path.join(self.binaries_directory, 'm5')
|
||||
if not force and self.file_exists(on_device_executable):
|
||||
# We want to check the version of the binary. We cannot directly
|
||||
# check this because the m5 binary itself is unversioned. We also
|
||||
# need to make sure not to check the error code as "m5 --help"
|
||||
# returns a non-zero error code.
|
||||
output = self.gem5_shell('m5 --help', check_exit_code=False)
|
||||
if "writefile" in output:
|
||||
self.logger.debug("Using the m5 binary on the device...")
|
||||
self.m5_path = on_device_executable
|
||||
return on_device_executable
|
||||
else:
|
||||
self.logger.debug("m5 on device does not support writefile!")
|
||||
host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'm5'))
|
||||
self.logger.info("Installing the m5 binary to the device...")
|
||||
self.m5_path = self.install(host_file)
|
||||
return self.m5_path
|
@@ -1,14 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
@@ -1,924 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# pylint: disable=E1101
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import base64
|
||||
import socket
|
||||
from collections import namedtuple
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
from wlauto.core.extension import Parameter
|
||||
from wlauto.core.device import Device, RuntimeParameter, CoreParameter
|
||||
from wlauto.core.resource import NO_ONE
|
||||
from wlauto.exceptions import ConfigError, DeviceError, TimeoutError, DeviceNotRespondingError
|
||||
from wlauto.common.resources import Executable
|
||||
from wlauto.utils.cpuinfo import Cpuinfo
|
||||
from wlauto.utils.misc import convert_new_lines, escape_double_quotes, ranges_to_list, ABI_MAP
|
||||
from wlauto.utils.misc import isiterable, list_to_mask
|
||||
from wlauto.utils.ssh import SshShell
|
||||
from wlauto.utils.types import boolean, list_of_strings
|
||||
|
||||
|
||||
FSTAB_ENTRY_REGEX = re.compile(r'(\S+) on (\S+) type (\S+) \((\S+)\)')
|
||||
|
||||
FstabEntry = namedtuple('FstabEntry', ['device', 'mount_point', 'fs_type', 'options', 'dump_freq', 'pass_num'])
|
||||
PsEntry = namedtuple('PsEntry', 'user pid ppid vsize rss wchan pc state name')
|
||||
LsmodEntry = namedtuple('LsmodEntry', ['name', 'size', 'use_count', 'used_by'])
|
||||
|
||||
GOOGLE_DNS_SERVER_ADDRESS = '8.8.8.8'
|
||||
|
||||
|
||||
class BaseLinuxDevice(Device): # pylint: disable=abstract-method
|
||||
|
||||
path_module = 'posixpath'
|
||||
has_gpu = True
|
||||
|
||||
parameters = [
|
||||
Parameter('scheduler', kind=str, default='unknown',
|
||||
allowed_values=['unknown', 'smp', 'hmp', 'iks', 'ea', 'other'],
|
||||
description="""
|
||||
Specifies the type of multi-core scheduling model utilized in the device. The value
|
||||
must be one of the following:
|
||||
|
||||
:unknown: A generic Device interface is used to interact with the underlying device
|
||||
and the underlying scheduling model is unkown.
|
||||
:smp: A standard single-core or Symmetric Multi-Processing system.
|
||||
:hmp: ARM Heterogeneous Multi-Processing system.
|
||||
:iks: Linaro In-Kernel Switcher.
|
||||
:ea: ARM Energy-Aware scheduler.
|
||||
:other: Any other system not covered by the above.
|
||||
|
||||
.. note:: most currently-available systems would fall under ``smp`` rather than
|
||||
this value. ``other`` is there to future-proof against new schemes
|
||||
not yet covered by WA.
|
||||
|
||||
"""),
|
||||
Parameter('iks_switch_frequency', kind=int, default=None,
|
||||
description="""
|
||||
This is the switching frequency, in kilohertz, of IKS devices. This parameter *MUST NOT*
|
||||
be set for non-IKS device (i.e. ``scheduler != 'iks'``). If left unset for IKS devices,
|
||||
it will default to ``800000``, i.e. 800MHz.
|
||||
"""),
|
||||
Parameter('property_files', kind=list_of_strings,
|
||||
default=[
|
||||
'/etc/arch-release',
|
||||
'/etc/debian_version',
|
||||
'/etc/lsb-release',
|
||||
'/proc/config.gz',
|
||||
'/proc/cmdline',
|
||||
'/proc/cpuinfo',
|
||||
'/proc/version',
|
||||
'/proc/zconfig',
|
||||
'/sys/kernel/debug/sched_features',
|
||||
'/sys/kernel/hmp',
|
||||
],
|
||||
description='''
|
||||
A list of paths to files containing static OS properties. These will be pulled into the
|
||||
__meta directory in output for each run in order to provide information about the platfrom.
|
||||
These paths do not have to exist and will be ignored if the path is not present on a
|
||||
particular device.
|
||||
'''),
|
||||
Parameter('binaries_directory',
|
||||
description='Location of executable binaries on this device (must be in PATH).'),
|
||||
Parameter('working_directory',
|
||||
description='''
|
||||
Working directory to be used by WA. This must be in a location where the specified user
|
||||
has write permissions. This will default to /home/<username>/wa (or to /root/wa, if
|
||||
username is 'root').
|
||||
'''),
|
||||
|
||||
]
|
||||
|
||||
runtime_parameters = [
|
||||
RuntimeParameter('sysfile_values', 'get_sysfile_values', 'set_sysfile_values', value_name='params'),
|
||||
CoreParameter('${core}_cores', 'get_number_of_online_cores', 'set_number_of_online_cores',
|
||||
value_name='number'),
|
||||
CoreParameter('${core}_min_frequency', 'get_core_min_frequency', 'set_core_min_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_max_frequency', 'get_core_max_frequency', 'set_core_max_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_frequency', 'get_core_cur_frequency', 'set_core_cur_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_governor', 'get_core_governor', 'set_core_governor',
|
||||
value_name='governor'),
|
||||
CoreParameter('${core}_governor_tunables', 'get_core_governor_tunables', 'set_core_governor_tunables',
|
||||
value_name='tunables'),
|
||||
]
|
||||
|
||||
dynamic_modules = [
|
||||
'devcpufreq',
|
||||
'cpuidle',
|
||||
]
|
||||
|
||||
@property
|
||||
def abi(self):
|
||||
if not self._abi:
|
||||
val = self.execute('uname -m').strip()
|
||||
for abi, architectures in ABI_MAP.iteritems():
|
||||
if val in architectures:
|
||||
self._abi = abi
|
||||
break
|
||||
else:
|
||||
self._abi = val
|
||||
return self._abi
|
||||
|
||||
@property
|
||||
def supported_abi(self):
|
||||
return [self.abi]
|
||||
|
||||
@property
|
||||
def online_cpus(self):
|
||||
val = self.get_sysfile_value('/sys/devices/system/cpu/online')
|
||||
return ranges_to_list(val)
|
||||
|
||||
@property
|
||||
def number_of_cores(self):
|
||||
"""
|
||||
Added in version 2.1.4.
|
||||
|
||||
"""
|
||||
if self._number_of_cores is None:
|
||||
corere = re.compile(r'^\s*cpu\d+\s*$')
|
||||
output = self.execute('ls /sys/devices/system/cpu')
|
||||
self._number_of_cores = 0
|
||||
for entry in output.split():
|
||||
if corere.match(entry):
|
||||
self._number_of_cores += 1
|
||||
return self._number_of_cores
|
||||
|
||||
@property
|
||||
def resource_cache(self):
|
||||
return self.path.join(self.working_directory, '.cache')
|
||||
|
||||
@property
|
||||
def file_transfer_cache(self):
|
||||
return self.path.join(self.working_directory, '.transfer')
|
||||
|
||||
@property
|
||||
def cpuinfo(self):
|
||||
if not self._cpuinfo:
|
||||
self._cpuinfo = Cpuinfo(self.execute('cat /proc/cpuinfo'))
|
||||
return self._cpuinfo
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(BaseLinuxDevice, self).__init__(**kwargs)
|
||||
self.busybox = None
|
||||
self._is_initialized = False
|
||||
self._is_ready = False
|
||||
self._just_rebooted = False
|
||||
self._is_rooted = None
|
||||
self._is_root_user = False
|
||||
self._available_frequencies = {}
|
||||
self._available_governors = {}
|
||||
self._available_governor_tunables = {}
|
||||
self._number_of_cores = None
|
||||
self._written_sysfiles = []
|
||||
self._cpuinfo = None
|
||||
self._abi = None
|
||||
|
||||
def validate(self):
|
||||
if self.iks_switch_frequency is not None and self.scheduler != 'iks': # pylint: disable=E0203
|
||||
raise ConfigError('iks_switch_frequency must NOT be set for non-IKS devices.')
|
||||
if self.iks_switch_frequency is None and self.scheduler == 'iks': # pylint: disable=E0203
|
||||
self.iks_switch_frequency = 800000 # pylint: disable=W0201
|
||||
|
||||
def initialize(self, context):
|
||||
self.execute('mkdir -p {}'.format(self.working_directory))
|
||||
if not self.binaries_directory:
|
||||
self._set_binaries_dir()
|
||||
self.execute('mkdir -p {}'.format(self.binaries_directory))
|
||||
self.busybox = self.deploy_busybox(context)
|
||||
|
||||
def _set_binaries_dir(self):
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.binaries_directory = self.path.join(self.working_directory, "bin")
|
||||
|
||||
def is_file(self, filepath):
|
||||
output = self.execute('if [ -f \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
|
||||
# output from ssh my contain part of the expression in the buffer,
|
||||
# split out everything except the last word.
|
||||
return boolean(output.split()[-1]) # pylint: disable=maybe-no-member
|
||||
|
||||
def is_directory(self, filepath):
|
||||
output = self.execute('if [ -d \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
|
||||
# output from ssh my contain part of the expression in the buffer,
|
||||
# split out everything except the last word.
|
||||
return boolean(output.split()[-1]) # pylint: disable=maybe-no-member
|
||||
|
||||
def get_properties(self, context):
|
||||
for propfile in self.property_files:
|
||||
try:
|
||||
normname = propfile.lstrip(self.path.sep).replace(self.path.sep, '.')
|
||||
outfile = os.path.join(context.host_working_directory, normname)
|
||||
if self.is_file(propfile):
|
||||
with open(outfile, 'w') as wfh:
|
||||
if propfile.endswith(".gz"):
|
||||
wfh.write(self.execute('{} zcat {}'.format(self.busybox, propfile)))
|
||||
else:
|
||||
wfh.write(self.execute('cat {}'.format(propfile)))
|
||||
elif self.is_directory(propfile):
|
||||
self.pull_file(propfile, outfile)
|
||||
else:
|
||||
continue
|
||||
except DeviceError:
|
||||
# We pull these files "opportunistically", so if a pull fails
|
||||
# (e.g. we don't have permissions to read the file), just note
|
||||
# it quietly (not as an error/warning) and move on.
|
||||
self.logger.debug('Could not pull property file "{}"'.format(propfile))
|
||||
return {}
|
||||
|
||||
def get_sysfile_value(self, sysfile, kind=None, binary=False):
|
||||
"""
|
||||
Get the contents of the specified sysfile.
|
||||
|
||||
:param sysfile: The file who's contents will be returned.
|
||||
|
||||
:param kind: The type of value to be expected in the sysfile. This can
|
||||
be any Python callable that takes a single str argument.
|
||||
If not specified or is None, the contents will be returned
|
||||
as a string.
|
||||
:param binary: Whether the value should be encoded into base64 for reading
|
||||
to deal with binary format.
|
||||
|
||||
"""
|
||||
if binary:
|
||||
output = self.execute('{} base64 {}'.format(self.busybox, sysfile), as_root=self.is_rooted).strip()
|
||||
output = output.decode('base64')
|
||||
else:
|
||||
output = self.execute('cat \'{}\''.format(sysfile), as_root=self.is_rooted).strip() # pylint: disable=E1103
|
||||
if kind:
|
||||
return kind(output)
|
||||
else:
|
||||
return output
|
||||
|
||||
def set_sysfile_value(self, sysfile, value, verify=True, binary=False):
|
||||
"""
|
||||
Set the value of the specified sysfile. By default, the value will be checked afterwards.
|
||||
Can be overridden by setting ``verify`` parameter to ``False``. By default binary values
|
||||
will not be written correctly this can be changed by setting the ``binary`` parameter to
|
||||
``True``.
|
||||
|
||||
"""
|
||||
value = str(value)
|
||||
if binary:
|
||||
# Value is already string encoded, so need to decode before encoding in base64
|
||||
try:
|
||||
value = str(value.decode('string_escape'))
|
||||
except ValueError as e:
|
||||
msg = 'Can not interpret value "{}" for "{}": {}'
|
||||
raise ValueError(msg.format(value, sysfile, e.message))
|
||||
|
||||
encoded_value = base64.b64encode(value)
|
||||
cmd = 'echo {} | {} base64 -d > \'{}\''.format(encoded_value, self.busybox, sysfile)
|
||||
else:
|
||||
cmd = 'echo {} > \'{}\''.format(value, sysfile)
|
||||
self.execute(cmd, check_exit_code=False, as_root=True)
|
||||
|
||||
if verify:
|
||||
output = self.get_sysfile_value(sysfile, binary=binary)
|
||||
if output.strip() != value: # pylint: disable=E1103
|
||||
message = 'Could not set the value of {} to {}'.format(sysfile, value)
|
||||
raise DeviceError(message)
|
||||
self._written_sysfiles.append((sysfile, binary))
|
||||
|
||||
def get_sysfile_values(self):
|
||||
"""
|
||||
Returns a dict mapping paths of sysfiles that were previously set to their
|
||||
current values.
|
||||
|
||||
"""
|
||||
values = {}
|
||||
for sysfile, binary in self._written_sysfiles:
|
||||
values[sysfile] = self.get_sysfile_value(sysfile, binary=binary)
|
||||
return values
|
||||
|
||||
def set_sysfile_values(self, params):
|
||||
"""
|
||||
The plural version of ``set_sysfile_value``. Takes a single parameter which is a mapping of
|
||||
file paths to values to be set. By default, every value written will be verified. This can
|
||||
be disabled for individual paths by appending ``'!'`` to them. To enable values being
|
||||
written as binary data, a ``'^'`` can be prefixed to the path.
|
||||
|
||||
"""
|
||||
for sysfile, value in params.iteritems():
|
||||
verify = not sysfile.endswith('!')
|
||||
sysfile = sysfile.rstrip('!')
|
||||
binary = sysfile.startswith('^')
|
||||
sysfile = sysfile.lstrip('^')
|
||||
self.set_sysfile_value(sysfile, value, verify=verify, binary=binary)
|
||||
|
||||
def deploy_busybox(self, context, force=False):
|
||||
"""
|
||||
Deploys the busybox binary to the specified device and returns
|
||||
the path to the binary on the device.
|
||||
|
||||
:param context: an instance of ExecutionContext
|
||||
:param force: by default, if the binary is already present on the
|
||||
device, it will not be deployed again. Setting force
|
||||
to ``True`` overrides that behavior and ensures that the
|
||||
binary is always copied. Defaults to ``False``.
|
||||
|
||||
:returns: The on-device path to the busybox binary.
|
||||
|
||||
"""
|
||||
on_device_executable = self.get_binary_path("busybox", search_system_binaries=False)
|
||||
if force or not on_device_executable:
|
||||
host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'busybox'))
|
||||
return self.install(host_file)
|
||||
return on_device_executable
|
||||
|
||||
def is_installed(self, name): # pylint: disable=unused-argument,no-self-use
|
||||
raise AttributeError("""Instead of using is_installed, please use
|
||||
``get_binary_path`` or ``install_if_needed`` instead. You should
|
||||
use the path returned by these functions to then invoke the binary
|
||||
|
||||
please see: https://pythonhosted.org/wlauto/writing_extensions.html""")
|
||||
|
||||
def is_network_connected(self):
|
||||
"""
|
||||
Checks for internet connectivity on the device by pinging IP address provided.
|
||||
|
||||
:param ip_address: IP address to ping. Default is Google's public DNS server (8.8.8.8)
|
||||
|
||||
:returns: ``True`` if internet is available, ``False`` otherwise.
|
||||
|
||||
"""
|
||||
self.logger.debug('Checking for internet connectivity...')
|
||||
return self._ping_server(GOOGLE_DNS_SERVER_ADDRESS)
|
||||
|
||||
def _ping_server(self, ip_address, timeout=1, packet_count=1):
|
||||
output = self.execute('ping -q -c {} -w {} {}'.format(packet_count, timeout, ip_address),
|
||||
check_exit_code=False)
|
||||
|
||||
if 'network is unreachable' in output.lower():
|
||||
self.logger.debug('Cannot find IP address {}'.format(ip_address))
|
||||
return False
|
||||
else:
|
||||
self.logger.debug('Found IP address {}'.format(ip_address))
|
||||
return True
|
||||
|
||||
def get_binary_path(self, name, search_system_binaries=True):
|
||||
"""
|
||||
Searches the devices ``binary_directory`` for the given binary,
|
||||
if it cant find it there it tries using which to find it.
|
||||
|
||||
:param name: The name of the binary
|
||||
:param search_system_binaries: By default this function will try using
|
||||
which to find the binary if it isn't in
|
||||
``binary_directory``. When this is set
|
||||
to ``False`` it will not try this.
|
||||
|
||||
:returns: The on-device path to the binary.
|
||||
|
||||
"""
|
||||
wa_binary = self.path.join(self.binaries_directory, name)
|
||||
if self.file_exists(wa_binary):
|
||||
return wa_binary
|
||||
if search_system_binaries:
|
||||
try:
|
||||
return self.execute('{} which {}'.format(self.busybox, name)).strip()
|
||||
except DeviceError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def install_if_needed(self, host_path, search_system_binaries=True):
|
||||
"""
|
||||
Similar to get_binary_path but will install the binary if not found.
|
||||
|
||||
:param host_path: The path to the binary on the host
|
||||
:param search_system_binaries: By default this function will try using
|
||||
which to find the binary if it isn't in
|
||||
``binary_directory``. When this is set
|
||||
to ``False`` it will not try this.
|
||||
|
||||
:returns: The on-device path to the binary.
|
||||
|
||||
"""
|
||||
binary_path = self.get_binary_path(os.path.split(host_path)[1],
|
||||
search_system_binaries=search_system_binaries)
|
||||
if not binary_path:
|
||||
binary_path = self.install(host_path)
|
||||
return binary_path
|
||||
|
||||
def list_file_systems(self):
|
||||
output = self.execute('mount')
|
||||
fstab = []
|
||||
for line in output.split('\n'):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
match = FSTAB_ENTRY_REGEX.search(line)
|
||||
if match:
|
||||
fstab.append(FstabEntry(match.group(1), match.group(2),
|
||||
match.group(3), match.group(4),
|
||||
None, None))
|
||||
else: # assume pre-M Android
|
||||
fstab.append(FstabEntry(*line.split()))
|
||||
return fstab
|
||||
|
||||
# Process query and control
|
||||
|
||||
def get_pids_of(self, process_name):
|
||||
raise NotImplementedError()
|
||||
|
||||
def ps(self, **kwargs):
|
||||
raise NotImplementedError()
|
||||
|
||||
def kill(self, pid, signal=None, as_root=False): # pylint: disable=W0221
|
||||
"""
|
||||
Kill the specified process.
|
||||
|
||||
:param pid: PID of the process to kill.
|
||||
:param signal: Specify which singal to send to the process. This must
|
||||
be a valid value for -s option of kill. Defaults to ``None``.
|
||||
|
||||
Modified in version 2.1.4: added ``signal`` parameter.
|
||||
|
||||
"""
|
||||
signal_string = '-s {}'.format(signal) if signal else ''
|
||||
self.execute('kill {} {}'.format(signal_string, pid), as_root=as_root)
|
||||
|
||||
def killall(self, process_name, signal=None, as_root=None): # pylint: disable=W0221
|
||||
"""
|
||||
Kill all processes with the specified name.
|
||||
|
||||
:param process_name: The name of the process(es) to kill.
|
||||
:param signal: Specify which singal to send to the process. This must
|
||||
be a valid value for -s option of kill. Defaults to ``None``.
|
||||
|
||||
Modified in version 2.1.5: added ``as_root`` parameter.
|
||||
|
||||
"""
|
||||
if as_root is None:
|
||||
as_root = self.is_rooted
|
||||
for pid in self.get_pids_of(process_name):
|
||||
self.kill(pid, signal=signal, as_root=as_root)
|
||||
|
||||
def get_online_cpus(self, c):
|
||||
if isinstance(c, int): # assume c == cluster
|
||||
return [i for i in self.online_cpus if self.core_clusters[i] == c]
|
||||
elif isinstance(c, basestring): # assume c == core
|
||||
return [i for i in self.online_cpus if self.core_names[i] == c]
|
||||
else:
|
||||
raise ValueError(c)
|
||||
|
||||
# hotplug
|
||||
|
||||
def enable_cpu(self, cpu):
|
||||
"""
|
||||
Enable the specified core.
|
||||
|
||||
:param cpu: CPU core to enable. This must be the full name as it
|
||||
appears in sysfs, e.g. "cpu0".
|
||||
|
||||
"""
|
||||
self.hotplug_cpu(cpu, online=True)
|
||||
|
||||
def disable_cpu(self, cpu):
|
||||
"""
|
||||
Disable the specified core.
|
||||
|
||||
:param cpu: CPU core to disable. This must be the full name as it
|
||||
appears in sysfs, e.g. "cpu0".
|
||||
"""
|
||||
self.hotplug_cpu(cpu, online=False)
|
||||
|
||||
def hotplug_cpu(self, cpu, online):
|
||||
"""
|
||||
Hotplug the specified CPU either on or off.
|
||||
See https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
|
||||
|
||||
:param cpu: The CPU for which the governor is to be set. This must be
|
||||
the full name as it appears in sysfs, e.g. "cpu0".
|
||||
:param online: CPU will be enabled if this value bool()'s to True, and
|
||||
will be disabled otherwise.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
status = 1 if online else 0
|
||||
sysfile = '/sys/devices/system/cpu/{}/online'.format(cpu)
|
||||
self.set_sysfile_value(sysfile, status)
|
||||
|
||||
def get_number_of_online_cores(self, core):
|
||||
if core not in self.core_names:
|
||||
raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names))))
|
||||
online_cpus = self.online_cpus
|
||||
num_active_cores = 0
|
||||
for i, c in enumerate(self.core_names):
|
||||
if c == core and i in online_cpus:
|
||||
num_active_cores += 1
|
||||
return num_active_cores
|
||||
|
||||
def set_number_of_online_cores(self, core, number): # NOQA
|
||||
if core not in self.core_names:
|
||||
raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names))))
|
||||
core_ids = [i for i, c in enumerate(self.core_names) if c == core]
|
||||
max_cores = len(core_ids)
|
||||
if number > max_cores:
|
||||
message = 'Attempting to set the number of active {} to {}; maximum is {}'
|
||||
raise ValueError(message.format(core, number, max_cores))
|
||||
|
||||
if not number:
|
||||
# make sure at least one other core is enabled to avoid trying to
|
||||
# hotplug everything.
|
||||
for i, c in enumerate(self.core_names):
|
||||
if c != core and i in self.online_cpus:
|
||||
break
|
||||
else: # did not find one
|
||||
raise ValueError('Cannot hotplug all cpus on the device!')
|
||||
|
||||
for i in xrange(0, number):
|
||||
self.enable_cpu(core_ids[i])
|
||||
for i in xrange(number, max_cores):
|
||||
self.disable_cpu(core_ids[i])
|
||||
|
||||
def invoke(self, binary, args=None, in_directory=None, on_cpus=None,
|
||||
background=False, as_root=False, timeout=30):
|
||||
"""
|
||||
Executes the specified binary under the specified conditions.
|
||||
|
||||
:binary: binary to execute. Must be present and executable on the device.
|
||||
:args: arguments to be passed to the binary. The can be either a list or
|
||||
a string.
|
||||
:in_directory: execute the binary in the specified directory. This must
|
||||
be an absolute path.
|
||||
:on_cpus: taskset the binary to these CPUs. This may be a single ``int`` (in which
|
||||
case, it will be interpreted as the mask), a list of ``ints``, in which
|
||||
case this will be interpreted as the list of cpus, or string, which
|
||||
will be interpreted as a comma-separated list of cpu ranges, e.g.
|
||||
``"0,4-7"``.
|
||||
:background: If ``True``, a ``subprocess.Popen`` object will be returned straight
|
||||
away. If ``False`` (the default), this will wait for the command to
|
||||
terminate and return the STDOUT output
|
||||
:as_root: Specify whether the command should be run as root
|
||||
:timeout: If the invocation does not terminate within this number of seconds,
|
||||
a ``TimeoutError`` exception will be raised. Set to ``None`` if the
|
||||
invocation should not timeout.
|
||||
|
||||
"""
|
||||
command = binary
|
||||
if args:
|
||||
if isiterable(args):
|
||||
args = ' '.join(args)
|
||||
command = '{} {}'.format(command, args)
|
||||
if on_cpus:
|
||||
if isinstance(on_cpus, basestring):
|
||||
on_cpus = ranges_to_list(on_cpus)
|
||||
if isiterable(on_cpus):
|
||||
on_cpus = list_to_mask(on_cpus) # pylint: disable=redefined-variable-type
|
||||
command = '{} taskset 0x{:x} {}'.format(self.busybox, on_cpus, command)
|
||||
if in_directory:
|
||||
command = 'cd {} && {}'.format(in_directory, command)
|
||||
return self.execute(command, background=background, as_root=as_root, timeout=timeout)
|
||||
|
||||
def get_device_model(self):
|
||||
if self.file_exists("/proc/device-tree/model"):
|
||||
raw_model = self.execute("cat /proc/device-tree/model")
|
||||
device_model_to_return = '_'.join(raw_model.split()[:2])
|
||||
return device_model_to_return.rstrip(' \t\r\n\0')
|
||||
# Right now we don't know any other way to get device model
|
||||
# info in linux on arm platforms
|
||||
return None
|
||||
|
||||
# internal methods
|
||||
|
||||
def _check_ready(self):
|
||||
if not self._is_ready:
|
||||
raise RuntimeError('Device not ready (has connect() been called?)')
|
||||
|
||||
def _get_core_cluster(self, core):
|
||||
"""Returns the first cluster that has cores of the specified type. Raises
|
||||
value error if no cluster for the specified type has been found"""
|
||||
core_indexes = [i for i, c in enumerate(self.core_names) if c == core]
|
||||
core_clusters = set(self.core_clusters[i] for i in core_indexes)
|
||||
if not core_clusters:
|
||||
raise ValueError('No cluster found for core {}'.format(core))
|
||||
return sorted(list(core_clusters))[0]
|
||||
|
||||
|
||||
class LinuxDevice(BaseLinuxDevice):
|
||||
|
||||
platform = 'linux'
|
||||
|
||||
default_timeout = 30
|
||||
delay = 2
|
||||
long_delay = 3 * delay
|
||||
ready_timeout = 60
|
||||
|
||||
parameters = [
|
||||
Parameter('host', mandatory=True, description='Host name or IP address for the device.'),
|
||||
Parameter('username', mandatory=True, description='User name for the account on the device.'),
|
||||
Parameter('password', description='Password for the account on the device (for password-based auth).'),
|
||||
Parameter('keyfile', description='Keyfile to be used for key-based authentication.'),
|
||||
Parameter('port', kind=int, default=22, description='SSH port number on the device.'),
|
||||
Parameter('password_prompt', default='[sudo] password',
|
||||
description='Prompt presented by sudo when requesting the password.'),
|
||||
|
||||
Parameter('use_telnet', kind=boolean, default=False,
|
||||
description='Optionally, telnet may be used instead of ssh, though this is discouraged.'),
|
||||
Parameter('boot_timeout', kind=int, default=120,
|
||||
description='How long to try to connect to the device after a reboot.'),
|
||||
]
|
||||
|
||||
@property
|
||||
def is_rooted(self):
|
||||
self._check_ready()
|
||||
if self._is_rooted is None:
|
||||
# First check if the user is root
|
||||
try:
|
||||
self.execute('test $(id -u) = 0')
|
||||
self._is_root_user = True
|
||||
self._is_rooted = True
|
||||
return self._is_rooted
|
||||
except DeviceError:
|
||||
self._is_root_user = False
|
||||
|
||||
# Otherwise, check if the user has sudo rights
|
||||
try:
|
||||
self.execute('ls /', as_root=True)
|
||||
self._is_rooted = True
|
||||
except DeviceError:
|
||||
self._is_rooted = False
|
||||
return self._is_rooted
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(LinuxDevice, self).__init__(*args, **kwargs)
|
||||
self.shell = None
|
||||
self._is_rooted = None
|
||||
|
||||
def validate(self):
|
||||
if self.working_directory is None: # pylint: disable=access-member-before-definition
|
||||
if self.username == 'root':
|
||||
self.working_directory = '/root/wa' # pylint: disable=attribute-defined-outside-init
|
||||
else:
|
||||
self.working_directory = '/home/{}/wa'.format(self.username) # pylint: disable=attribute-defined-outside-init
|
||||
|
||||
def initialize(self, context, *args, **kwargs):
|
||||
self.execute('mkdir -p {}'.format(self.binaries_directory))
|
||||
self.execute('export PATH={}:$PATH'.format(self.binaries_directory))
|
||||
super(LinuxDevice, self).initialize(context, *args, **kwargs)
|
||||
|
||||
# Power control
|
||||
|
||||
def reset(self):
|
||||
try:
|
||||
self.execute('reboot', as_root=True)
|
||||
except DeviceError as e:
|
||||
if 'Connection dropped' not in e.message:
|
||||
raise e
|
||||
self._is_ready = False
|
||||
|
||||
def hard_reset(self):
|
||||
self._is_ready = False
|
||||
|
||||
def boot(self, hard=False, **kwargs):
|
||||
if hard:
|
||||
self.hard_reset()
|
||||
else:
|
||||
self.reset()
|
||||
self.logger.debug('Waiting for device...')
|
||||
# Wait a fixed delay before starting polling to give the device time to
|
||||
# shut down, otherwise, might create the connection while it's still shutting
|
||||
# down resulting in subsequenct connection failing.
|
||||
initial_delay = 20
|
||||
time.sleep(initial_delay)
|
||||
boot_timeout = max(self.boot_timeout - initial_delay, 10)
|
||||
|
||||
start_time = time.time()
|
||||
while (time.time() - start_time) < boot_timeout:
|
||||
try:
|
||||
s = socket.create_connection((self.host, self.port), timeout=5)
|
||||
s.close()
|
||||
break
|
||||
except socket.timeout:
|
||||
pass
|
||||
except socket.error:
|
||||
time.sleep(5)
|
||||
else:
|
||||
raise DeviceError('Could not connect to {} after reboot'.format(self.host))
|
||||
|
||||
def connect(self): # NOQA pylint: disable=R0912
|
||||
self.shell = SshShell(password_prompt=self.password_prompt,
|
||||
timeout=self.default_timeout, telnet=self.use_telnet)
|
||||
self.shell.login(self.host, self.username, self.password, self.keyfile, self.port)
|
||||
self._is_ready = True
|
||||
|
||||
def disconnect(self): # NOQA pylint: disable=R0912
|
||||
self.shell.logout()
|
||||
self._is_ready = False
|
||||
|
||||
# Execution
|
||||
|
||||
def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False,
|
||||
as_root=False, strip_colors=True, **kwargs):
|
||||
"""
|
||||
Execute the specified command on the device using adb.
|
||||
|
||||
Parameters:
|
||||
|
||||
:param command: The command to be executed. It should appear exactly
|
||||
as if you were typing it into a shell.
|
||||
:param timeout: Time, in seconds, to wait for adb to return before aborting
|
||||
and raising an error. Defaults to ``AndroidDevice.default_timeout``.
|
||||
:param check_exit_code: If ``True``, the return code of the command on the Device will
|
||||
be check and exception will be raised if it is not 0.
|
||||
Defaults to ``True``.
|
||||
:param background: If ``True``, will execute create a new ssh shell rather than using
|
||||
the default session and will return it immediately. If this is ``True``,
|
||||
``timeout``, ``strip_colors`` and (obvisously) ``check_exit_code`` will
|
||||
be ignored; also, with this, ``as_root=True`` is only valid if ``username``
|
||||
for the device was set to ``root``.
|
||||
:param as_root: If ``True``, will attempt to execute command in privileged mode. The device
|
||||
must be rooted, otherwise an error will be raised. Defaults to ``False``.
|
||||
|
||||
Added in version 2.1.3
|
||||
|
||||
:returns: If ``background`` parameter is set to ``True``, the subprocess object will
|
||||
be returned; otherwise, the contents of STDOUT from the device will be returned.
|
||||
|
||||
"""
|
||||
self._check_ready()
|
||||
try:
|
||||
if background:
|
||||
if as_root and self.username != 'root':
|
||||
raise DeviceError('Cannot execute in background with as_root=True unless user is root.')
|
||||
return self.shell.background(command)
|
||||
else:
|
||||
# If we're already the root user, don't bother with sudo
|
||||
if self._is_root_user:
|
||||
as_root = False
|
||||
return self.shell.execute(command, timeout, check_exit_code, as_root, strip_colors)
|
||||
except CalledProcessError as e:
|
||||
raise DeviceError(e)
|
||||
|
||||
def kick_off(self, command, as_root=None):
|
||||
"""
|
||||
Like execute but closes ssh session and returns immediately, leaving the command running on the
|
||||
device (this is different from execute(background=True) which keeps ssh connection open and returns
|
||||
a subprocess object).
|
||||
|
||||
"""
|
||||
if as_root is None:
|
||||
as_root = self.is_rooted
|
||||
self._check_ready()
|
||||
command = 'sh -c "{}" 1>/dev/null 2>/dev/null &'.format(escape_double_quotes(command))
|
||||
return self.shell.execute(command, as_root=as_root)
|
||||
|
||||
def get_pids_of(self, process_name):
|
||||
"""Returns a list of PIDs of all processes with the specified name."""
|
||||
# result should be a column of PIDs with the first row as "PID" header
|
||||
result = self.execute('ps -C {} -o pid'.format(process_name), # NOQA
|
||||
check_exit_code=False).strip().split()
|
||||
if len(result) >= 2: # at least one row besides the header
|
||||
return map(int, result[1:])
|
||||
else:
|
||||
return []
|
||||
|
||||
def ps(self, **kwargs):
|
||||
command = 'ps -eo user,pid,ppid,vsize,rss,wchan,pcpu,state,fname'
|
||||
lines = iter(convert_new_lines(self.execute(command)).split('\n'))
|
||||
lines.next() # header
|
||||
|
||||
result = []
|
||||
for line in lines:
|
||||
parts = re.split(r'\s+', line, maxsplit=8)
|
||||
if parts:
|
||||
result.append(PsEntry(*(parts[0:1] + map(int, parts[1:5]) + parts[5:])))
|
||||
|
||||
if not kwargs:
|
||||
return result
|
||||
else:
|
||||
filtered_result = []
|
||||
for entry in result:
|
||||
if all(getattr(entry, k) == v for k, v in kwargs.iteritems()):
|
||||
filtered_result.append(entry)
|
||||
return filtered_result
|
||||
|
||||
# File management
|
||||
|
||||
def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
|
||||
self._check_ready()
|
||||
try:
|
||||
if not as_root or self.username == 'root':
|
||||
self.shell.push_file(source, dest, timeout=timeout)
|
||||
else:
|
||||
tempfile = self.path.join(self.working_directory, self.path.basename(dest))
|
||||
self.shell.push_file(source, tempfile, timeout=timeout)
|
||||
self.shell.execute('cp -r {} {}'.format(tempfile, dest), timeout=timeout, as_root=True)
|
||||
except CalledProcessError as e:
|
||||
raise DeviceError(e)
|
||||
|
||||
def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
|
||||
self._check_ready()
|
||||
try:
|
||||
if not as_root or self.username == 'root':
|
||||
self.shell.pull_file(source, dest, timeout=timeout)
|
||||
else:
|
||||
tempfile = self.path.join(self.working_directory, self.path.basename(source))
|
||||
self.shell.execute('cp -r {} {}'.format(source, tempfile), timeout=timeout, as_root=True)
|
||||
self.shell.execute('chown -R {} {}'.format(self.username, tempfile), timeout=timeout, as_root=True)
|
||||
self.shell.pull_file(tempfile, dest, timeout=timeout)
|
||||
except CalledProcessError as e:
|
||||
raise DeviceError(e)
|
||||
|
||||
def delete_file(self, filepath, as_root=False): # pylint: disable=W0221
|
||||
self.execute('rm -rf {}'.format(filepath), as_root=as_root)
|
||||
|
||||
def file_exists(self, filepath):
|
||||
output = self.execute('if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
|
||||
# output from ssh my contain part of the expression in the buffer,
|
||||
# split out everything except the last word.
|
||||
return boolean(output.split()[-1]) # pylint: disable=maybe-no-member
|
||||
|
||||
def listdir(self, path, as_root=False, **kwargs):
|
||||
contents = self.execute('ls -1 {}'.format(path), as_root=as_root).strip()
|
||||
if not contents:
|
||||
return []
|
||||
return [x.strip() for x in contents.split('\n')] # pylint: disable=maybe-no-member
|
||||
|
||||
def install(self, filepath, timeout=default_timeout, with_name=None): # pylint: disable=W0221
|
||||
destpath = self.path.join(self.binaries_directory,
|
||||
with_name or self.path.basename(filepath))
|
||||
self.push_file(filepath, destpath, as_root=True)
|
||||
self.execute('chmod a+x {}'.format(destpath), timeout=timeout, as_root=True)
|
||||
return destpath
|
||||
|
||||
install_executable = install # compatibility
|
||||
|
||||
def uninstall(self, executable_name):
|
||||
on_device_executable = self.get_binary_path(executable_name, search_system_binaries=False)
|
||||
if not on_device_executable:
|
||||
raise DeviceError("Could not uninstall {}, binary not found".format(on_device_executable))
|
||||
self.delete_file(on_device_executable, as_root=self.is_rooted)
|
||||
|
||||
uninstall_executable = uninstall # compatibility
|
||||
|
||||
# misc
|
||||
|
||||
def lsmod(self):
|
||||
"""List loaded kernel modules."""
|
||||
lines = self.execute('lsmod').splitlines()
|
||||
entries = []
|
||||
for line in lines[1:]: # first line is the header
|
||||
if not line.strip():
|
||||
continue
|
||||
parts = line.split()
|
||||
name = parts[0]
|
||||
size = int(parts[1])
|
||||
use_count = int(parts[2])
|
||||
if len(parts) > 3:
|
||||
used_by = ''.join(parts[3:]).split(',')
|
||||
else:
|
||||
used_by = []
|
||||
entries.append(LsmodEntry(name, size, use_count, used_by))
|
||||
return entries
|
||||
|
||||
def insmod(self, path):
|
||||
"""Install a kernel module located on the host on the target device."""
|
||||
target_path = self.path.join(self.working_directory, os.path.basename(path))
|
||||
self.push_file(path, target_path)
|
||||
self.execute('insmod {}'.format(target_path), as_root=True)
|
||||
|
||||
def ping(self):
|
||||
try:
|
||||
# May be triggered inside initialize()
|
||||
self.shell.execute('ls /', timeout=5)
|
||||
except (TimeoutError, CalledProcessError):
|
||||
raise DeviceNotRespondingError(self.host)
|
||||
|
||||
def capture_screen(self, filepath):
|
||||
if not self.get_binary_path('scrot'):
|
||||
self.logger.debug('Could not take screenshot as scrot is not installed.')
|
||||
return
|
||||
try:
|
||||
tempfile = self.path.join(self.working_directory, os.path.basename(filepath))
|
||||
self.execute('DISPLAY=:0.0 scrot {}'.format(tempfile))
|
||||
self.pull_file(tempfile, filepath)
|
||||
self.delete_file(tempfile)
|
||||
except DeviceError as e:
|
||||
if "Can't open X dispay." not in e.message:
|
||||
raise e
|
||||
message = e.message.split('OUTPUT:', 1)[1].strip()
|
||||
self.logger.debug('Could not take screenshot: {}'.format(message))
|
||||
|
||||
def is_screen_on(self):
|
||||
pass # TODO
|
||||
|
||||
def ensure_screen_is_on(self):
|
||||
pass # TODO
|
@@ -1,165 +0,0 @@
|
||||
# Copyright 2017 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
from math import ceil
|
||||
|
||||
from wlauto.core.extension import Parameter
|
||||
from wlauto.core.workload import Workload
|
||||
from wlauto.core.resource import NO_ONE
|
||||
from wlauto.common.resources import Executable
|
||||
from wlauto.common.android.resources import ReventFile
|
||||
from wlauto.exceptions import WorkloadError
|
||||
from wlauto.utils.revent import ReventRecording
|
||||
|
||||
class ReventWorkload(Workload):
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
description = """
|
||||
A workload for playing back revent recordings. You can supply three
|
||||
different files:
|
||||
|
||||
1. {device_model}.setup.revent
|
||||
2. {device_model}.run.revent
|
||||
3. {device_model}.teardown.revent
|
||||
|
||||
You may generate these files using the wa record command using the -s flag
|
||||
to specify the stage (``setup``, ``run``, ``teardown``)
|
||||
|
||||
You may also supply an 'idle_time' in seconds in place of the run file.
|
||||
The ``run`` file may only be omitted if you choose to run this way, but
|
||||
while running idle may supply ``setup`` and ``teardown`` files.
|
||||
|
||||
To use a ``setup`` or ``teardown`` file set the setup_required and/or
|
||||
teardown_required class attributes to True (default: False).
|
||||
|
||||
N.B. This is the default description. You may overwrite this for your
|
||||
workload to include more specific information.
|
||||
|
||||
"""
|
||||
|
||||
setup_required = False
|
||||
teardown_required = False
|
||||
|
||||
parameters = [
|
||||
Parameter(
|
||||
'idle_time', kind=int, default=None,
|
||||
description='''
|
||||
The time you wish the device to remain idle for (if a value is
|
||||
given then this overrides any .run revent file).
|
||||
'''),
|
||||
]
|
||||
|
||||
def __init__(self, device, _call_super=True, **kwargs):
|
||||
if _call_super:
|
||||
Workload.__init__(self, device, **kwargs)
|
||||
self.setup_timeout = kwargs.get('setup_timeout', None)
|
||||
self.run_timeout = kwargs.get('run_timeout', None)
|
||||
self.teardown_timeout = kwargs.get('teardown_timeout', None)
|
||||
self.revent_setup_file = None
|
||||
self.revent_run_file = None
|
||||
self.revent_teardown_file = None
|
||||
self.on_device_setup_revent = None
|
||||
self.on_device_run_revent = None
|
||||
self.on_device_teardown_revent = None
|
||||
self.statedefs_dir = None
|
||||
|
||||
def initialize(self, context):
|
||||
devpath = self.device.path
|
||||
self.on_device_revent_binary = devpath.join(self.device.binaries_directory, 'revent')
|
||||
|
||||
def setup(self, context):
|
||||
devpath = self.device.path
|
||||
if self.setup_required:
|
||||
self.revent_setup_file = context.resolver.get(ReventFile(self, 'setup'))
|
||||
if self.revent_setup_file:
|
||||
self.on_device_setup_revent = devpath.join(self.device.working_directory,
|
||||
os.path.split(self.revent_setup_file)[-1])
|
||||
duration = ReventRecording(self.revent_setup_file).duration
|
||||
self.default_setup_timeout = ceil(duration) + 30
|
||||
if not self.idle_time:
|
||||
self.revent_run_file = context.resolver.get(ReventFile(self, 'run'))
|
||||
if self.revent_run_file:
|
||||
self.on_device_run_revent = devpath.join(self.device.working_directory,
|
||||
os.path.split(self.revent_run_file)[-1])
|
||||
self.default_run_timeout = ceil(ReventRecording(self.revent_run_file).duration) + 30
|
||||
if self.teardown_required:
|
||||
self.revent_teardown_file = context.resolver.get(ReventFile(self, 'teardown'))
|
||||
if self.revent_teardown_file:
|
||||
self.on_device_teardown_revent = devpath.join(self.device.working_directory,
|
||||
os.path.split(self.revent_teardown_file)[-1])
|
||||
duration = ReventRecording(self.revent_teardown_file).duration
|
||||
self.default_teardown_timeout = ceil(duration) + 30
|
||||
self._check_revent_files(context)
|
||||
|
||||
Workload.setup(self, context)
|
||||
|
||||
if self.revent_setup_file is not None:
|
||||
self.setup_timeout = self.setup_timeout or self.default_setup_timeout
|
||||
self.device.killall('revent')
|
||||
command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_setup_revent)
|
||||
self.device.execute(command, timeout=self.setup_timeout)
|
||||
self.logger.debug('Revent setup completed.')
|
||||
|
||||
def run(self, context):
|
||||
if not self.idle_time:
|
||||
self.run_timeout = self.run_timeout or self.default_run_timeout
|
||||
command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_run_revent)
|
||||
self.logger.debug('Replaying {}'.format(os.path.basename(self.on_device_run_revent)))
|
||||
self.device.execute(command, timeout=self.run_timeout)
|
||||
self.logger.debug('Replay completed.')
|
||||
else:
|
||||
self.logger.info('Idling for ' + str(self.idle_time) + ' seconds.')
|
||||
self.device.sleep(self.idle_time)
|
||||
self.logger.info('Successfully did nothing for ' + str(self.idle_time) + ' seconds!')
|
||||
|
||||
def update_result(self, context):
|
||||
pass
|
||||
|
||||
def teardown(self, context):
|
||||
if self.revent_teardown_file is not None:
|
||||
self.teardown_timeout = self.teardown_timeout or self.default_teardown_timeout
|
||||
command = '{} replay {}'.format(self.on_device_revent_binary,
|
||||
self.on_device_teardown_revent)
|
||||
self.device.execute(command, timeout=self.teardown_timeout)
|
||||
self.logger.debug('Replay completed.')
|
||||
self.device.killall('revent')
|
||||
if self.revent_setup_file is not None:
|
||||
self.device.delete_file(self.on_device_setup_revent)
|
||||
if not self.idle_time:
|
||||
self.device.delete_file(self.on_device_run_revent)
|
||||
if self.revent_teardown_file is not None:
|
||||
self.device.delete_file(self.on_device_teardown_revent)
|
||||
|
||||
def _check_revent_files(self, context):
|
||||
# check the revent binary
|
||||
revent_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent'))
|
||||
if not os.path.isfile(revent_binary):
|
||||
message = '{} does not exist. '.format(revent_binary)
|
||||
message += 'Please build revent for your system and place it in that location'
|
||||
raise WorkloadError(message)
|
||||
if not self.revent_run_file and not self.idle_time:
|
||||
# pylint: disable=too-few-format-args
|
||||
message = 'It seems a {0}.run.revent file does not exist, '\
|
||||
'Please provide one for your device: {0}.'
|
||||
raise WorkloadError(message.format(self.device.name))
|
||||
|
||||
self.on_device_revent_binary = self.device.install_executable(revent_binary)
|
||||
if self.revent_setup_file is not None:
|
||||
self.device.push_file(self.revent_setup_file, self.on_device_setup_revent)
|
||||
if not self.idle_time:
|
||||
self.device.push_file(self.revent_run_file, self.on_device_run_revent)
|
||||
if self.revent_teardown_file is not None:
|
||||
self.device.push_file(self.revent_teardown_file, self.on_device_teardown_revent)
|
@@ -54,15 +54,11 @@ retry_on_status = ['FAILED', 'PARTIAL']
|
||||
# How many times a job will be re-run before giving up
|
||||
max_retries = 3
|
||||
|
||||
# If WA should delete its files from the device after the run is completed
|
||||
clean_up = False
|
||||
|
||||
####################################################################################################
|
||||
######################################### Device Settings ##########################################
|
||||
####################################################################################################
|
||||
# Specify the device you want to run workload automation on. This must be a #
|
||||
# string with the ID of the device. Common options are 'generic_android' and #
|
||||
# 'generic_linux'. Run ``wa list devices`` to see all available options. #
|
||||
# string with the ID of the device. At the moment, only 'TC2' is supported. #
|
||||
# #
|
||||
device = 'generic_android'
|
||||
|
||||
@@ -88,7 +84,7 @@ device_config = dict(
|
||||
|
||||
|
||||
####################################################################################################
|
||||
################################### Instrumentation Configuration ####################################
|
||||
################################### Instrumention Configuration ####################################
|
||||
####################################################################################################
|
||||
# This defines the additionnal instrumentation that will be enabled during workload execution, #
|
||||
# which in turn determines what additional data (such as /proc/interrupts content or Streamline #
|
||||
@@ -193,7 +189,7 @@ logging = {
|
||||
####################################################################################################
|
||||
#################################### Instruments Configuration #####################################
|
||||
####################################################################################################
|
||||
# Instrumentation Configuration is related to specific instrument's settings. Some of the #
|
||||
# Instrumention Configuration is related to specific insturment's settings. Some of the #
|
||||
# instrumentations require specific settings in order for them to work. These settings are #
|
||||
# specified here. #
|
||||
# Note that these settings only take effect if the corresponding instrument is
|
||||
@@ -226,10 +222,10 @@ logging = {
|
||||
####################################################################################################
|
||||
######################################### DAQ configuration ########################################
|
||||
|
||||
# The host address of the machine that runs the daq Server which the instrument communicates with
|
||||
# The host address of the machine that runs the daq Server which the insturment communicates with
|
||||
#daq_server_host = '10.1.17.56'
|
||||
|
||||
# The port number for daq Server in which daq instrument communicates with
|
||||
# The port number for daq Server in which daq insturment communicates with
|
||||
#daq_server_port = 56788
|
||||
|
||||
# The values of resistors 1 and 2 (in Ohms) across which the voltages are measured
|
||||
|
@@ -12,3 +12,5 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
|
@@ -53,14 +53,13 @@ sys.path.insert(0, os.path.join(_this_dir, '..', 'external'))
|
||||
|
||||
#pylint: disable=C0326
|
||||
_EXTENSION_TYPE_TABLE = [
|
||||
# name, class, default package, default path
|
||||
('command', 'wlauto.core.command.Command', 'wlauto.commands', 'commands'),
|
||||
('device', 'wlauto.core.device.Device', 'wlauto.devices', 'devices'),
|
||||
('instrument', 'wlauto.core.instrumentation.Instrument', 'wlauto.instrumentation', 'instruments'),
|
||||
('module', 'wlauto.core.extension.Module', 'wlauto.modules', 'modules'),
|
||||
('resource_getter', 'wlauto.core.resource.ResourceGetter', 'wlauto.resource_getters', 'resource_getters'),
|
||||
('result_processor', 'wlauto.core.result.ResultProcessor', 'wlauto.result_processors', 'result_processors'),
|
||||
('workload', 'wlauto.core.workload.Workload', 'wlauto.workloads', 'workloads'),
|
||||
# name, class, default package, default path
|
||||
('command', 'wlauto.core.command.Command', 'wlauto.commands', 'commands'),
|
||||
('device_manager', 'wlauto.core.device_manager.DeviceManager', 'wlauto.managers', 'managers'),
|
||||
('instrument', 'wlauto.core.instrumentation.Instrument', 'wlauto.instrumentation', 'instruments'),
|
||||
('resource_getter', 'wlauto.core.resource.ResourceGetter', 'wlauto.resource_getters', 'resource_getters'),
|
||||
('result_processor', 'wlauto.core.result.ResultProcessor', 'wlauto.result_processors', 'result_processors'),
|
||||
('workload', 'wlauto.core.workload.Workload', 'wlauto.workloads', 'workloads'),
|
||||
]
|
||||
_Extension = namedtuple('_Extension', 'name, cls, default_package, default_path')
|
||||
_extensions = [_Extension._make(ext) for ext in _EXTENSION_TYPE_TABLE] # pylint: disable=W0212
|
||||
@@ -114,8 +113,8 @@ class ConfigLoader(object):
|
||||
new_config = load_struct_from_yaml(source)
|
||||
else:
|
||||
raise ConfigError('Unknown config format: {}'.format(source))
|
||||
except (LoadSyntaxError, ValueError) as e:
|
||||
raise ConfigError('Invalid config "{}":\n\t{}'.format(source, e))
|
||||
except LoadSyntaxError as e:
|
||||
raise ConfigError(e)
|
||||
|
||||
self._config = merge_dicts(self._config, new_config,
|
||||
list_duplicates='first',
|
||||
@@ -152,8 +151,7 @@ def init_environment(env_root, dep_dir, extension_paths, overwrite_existing=Fals
|
||||
os.makedirs(env_root)
|
||||
with open(os.path.join(_this_dir, '..', 'config_example.py')) as rf:
|
||||
text = re.sub(r'""".*?"""', '', rf.read(), 1, re.DOTALL)
|
||||
config_path = os.path.join(env_root, 'config.py')
|
||||
with open(config_path, 'w') as wf:
|
||||
with open(os.path.join(_env_root, 'config.py'), 'w') as wf:
|
||||
wf.write(text)
|
||||
|
||||
os.makedirs(dep_dir)
|
||||
@@ -174,11 +172,9 @@ def init_environment(env_root, dep_dir, extension_paths, overwrite_existing=Fals
|
||||
os.chown(os.path.join(root, d), uid, gid)
|
||||
for f in files: # pylint: disable=W0621
|
||||
os.chown(os.path.join(root, f), uid, gid)
|
||||
return config_path
|
||||
|
||||
|
||||
_env_root = os.getenv('WA_USER_DIRECTORY', os.path.join(_user_home, '.workload_automation'))
|
||||
_env_root = os.path.abspath(_env_root)
|
||||
_dep_dir = os.path.join(_env_root, 'dependencies')
|
||||
_extension_paths = [os.path.join(_env_root, ext.default_path) for ext in _extensions]
|
||||
_env_var_paths = os.getenv('WA_EXTENSION_PATHS', '')
|
||||
@@ -192,8 +188,7 @@ for filename in ['config.py', 'config.yaml']:
|
||||
_env_configs.append(filepath)
|
||||
|
||||
if not os.path.isdir(_env_root):
|
||||
cfg_path = init_environment(_env_root, _dep_dir, _extension_paths)
|
||||
_env_configs.append(cfg_path)
|
||||
init_environment(_env_root, _dep_dir, _extension_paths)
|
||||
elif not _env_configs:
|
||||
filepath = os.path.join(_env_root, 'config.py')
|
||||
with open(os.path.join(_this_dir, '..', 'config_example.py')) as f:
|
||||
|
@@ -243,6 +243,13 @@ class RebootPolicy(object):
|
||||
else:
|
||||
return cmp(self.policy, other)
|
||||
|
||||
def to_pod(self):
|
||||
return self.policy
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
return RebootPolicy(pod)
|
||||
|
||||
|
||||
class RunConfigurationItem(object):
|
||||
"""
|
||||
@@ -481,7 +488,6 @@ class RunConfiguration(object):
|
||||
RunConfigurationItem('flashing_config', 'dict', 'replace'),
|
||||
RunConfigurationItem('retry_on_status', 'list', 'replace'),
|
||||
RunConfigurationItem('max_retries', 'scalar', 'replace'),
|
||||
RunConfigurationItem('clean_up', 'scalar', 'replace'),
|
||||
]
|
||||
|
||||
# Configuration specified for each workload spec. "workload_parameters"
|
||||
@@ -758,7 +764,7 @@ class RunConfiguration(object):
|
||||
if spec.match_selectors(selectors):
|
||||
instrumentation_config = self._raw_config['instrumentation']
|
||||
for instname in spec.instrumentation:
|
||||
if instname not in instrumentation_config and not instname.startswith('~'):
|
||||
if instname not in instrumentation_config:
|
||||
instrumentation_config.append(instname)
|
||||
self.workload_specs.append(spec)
|
||||
|
||||
|
@@ -1,472 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
"""
|
||||
Base classes for device interfaces.
|
||||
|
||||
:Device: The base class for all devices. This defines the interface that must be
|
||||
implemented by all devices and therefore any workload and instrumentation
|
||||
can always rely on.
|
||||
:AndroidDevice: Implements most of the :class:`Device` interface, and extends it
|
||||
with a number of Android-specific methods.
|
||||
:BigLittleDevice: Subclasses :class:`AndroidDevice` to implement big.LITTLE-specific
|
||||
runtime parameters.
|
||||
:SimpleMulticoreDevice: Subclasses :class:`AndroidDevice` to implement homogeneous cores
|
||||
device runtime parameters.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import imp
|
||||
import string
|
||||
from collections import OrderedDict
|
||||
from contextlib import contextmanager
|
||||
|
||||
from wlauto.core.extension import Extension, ExtensionMeta, AttributeCollection, Parameter
|
||||
from wlauto.core.extension_loader import ExtensionLoader
|
||||
from wlauto.exceptions import DeviceError, ConfigError
|
||||
from wlauto.utils.types import list_of_integers, list_of, caseless_string
|
||||
|
||||
|
||||
__all__ = ['RuntimeParameter', 'CoreParameter', 'Device', 'DeviceMeta']
|
||||
|
||||
|
||||
class RuntimeParameter(object):
|
||||
"""
|
||||
A runtime parameter which has its getter and setter methods associated it
|
||||
with it.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, name, getter, setter,
|
||||
getter_args=None, setter_args=None,
|
||||
value_name='value', override=False):
|
||||
"""
|
||||
:param name: the name of the parameter.
|
||||
:param getter: the getter method which returns the value of this parameter.
|
||||
:param setter: the setter method which sets the value of this parameter. The setter
|
||||
always expects to be passed one argument when it is called.
|
||||
:param getter_args: keyword arguments to be used when invoking the getter.
|
||||
:param setter_args: keyword arguments to be used when invoking the setter.
|
||||
:param override: A ``bool`` that specifies whether a parameter of the same name further up the
|
||||
hierarchy should be overridden. If this is ``False`` (the default), an exception
|
||||
will be raised by the ``AttributeCollection`` instead.
|
||||
|
||||
"""
|
||||
self.name = name
|
||||
self.getter = getter
|
||||
self.setter = setter
|
||||
self.getter_args = getter_args or {}
|
||||
self.setter_args = setter_args or {}
|
||||
self.value_name = value_name
|
||||
self.override = override
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
class CoreParameter(RuntimeParameter):
|
||||
"""A runtime parameter that will get expanded into a RuntimeParameter for each core type."""
|
||||
|
||||
def get_runtime_parameters(self, core_names):
|
||||
params = []
|
||||
for core in set(core_names):
|
||||
name = string.Template(self.name).substitute(core=core)
|
||||
getter = string.Template(self.getter).substitute(core=core)
|
||||
setter = string.Template(self.setter).substitute(core=core)
|
||||
getargs = dict(self.getter_args.items() + [('core', core)])
|
||||
setargs = dict(self.setter_args.items() + [('core', core)])
|
||||
params.append(RuntimeParameter(name, getter, setter, getargs, setargs, self.value_name, self.override))
|
||||
return params
|
||||
|
||||
|
||||
class DynamicModuleSpec(dict):
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.keys()[0]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
dict.__init__(self)
|
||||
if args:
|
||||
if len(args) > 1:
|
||||
raise ValueError(args)
|
||||
value = args[0]
|
||||
else:
|
||||
value = kwargs
|
||||
if isinstance(value, basestring):
|
||||
self[value] = {}
|
||||
elif isinstance(value, dict) and len(value) == 1:
|
||||
for k, v in value.iteritems():
|
||||
self[k] = v
|
||||
else:
|
||||
raise ValueError(value)
|
||||
|
||||
|
||||
class DeviceMeta(ExtensionMeta):
|
||||
|
||||
to_propagate = ExtensionMeta.to_propagate + [
|
||||
('runtime_parameters', RuntimeParameter, AttributeCollection),
|
||||
('dynamic_modules', DynamicModuleSpec, AttributeCollection),
|
||||
]
|
||||
|
||||
|
||||
class Device(Extension):
|
||||
"""
|
||||
Base class for all devices supported by Workload Automation. Defines
|
||||
the interface the rest of WA uses to interact with devices.
|
||||
|
||||
:name: Unique name used to identify the device.
|
||||
:platform: The name of the device's platform (e.g. ``Android``) this may
|
||||
be used by workloads and instrumentation to assess whether they
|
||||
can run on the device.
|
||||
:working_directory: a string of the directory which is
|
||||
going to be used by the workloads on the device.
|
||||
:binaries_directory: a string of the binary directory for
|
||||
the device.
|
||||
:has_gpu: Should be ``True`` if the device as a separate GPU, and
|
||||
``False`` if graphics processing is done on a CPU.
|
||||
|
||||
.. note:: Pretty much all devices currently on the market
|
||||
have GPUs, however this may not be the case for some
|
||||
development boards.
|
||||
|
||||
:path_module: The name of one of the modules implementing the os.path
|
||||
interface, e.g. ``posixpath`` or ``ntpath``. You can provide
|
||||
your own implementation rather than relying on one of the
|
||||
standard library modules, in which case you need to specify
|
||||
the *full* path to you module. e.g. '/home/joebloggs/mypathimp.py'
|
||||
:parameters: A list of RuntimeParameter objects. The order of the objects
|
||||
is very important as the setters and getters will be called
|
||||
in the order the RuntimeParameter objects inserted.
|
||||
:active_cores: This should be a list of all the currently active cpus in
|
||||
the device in ``'/sys/devices/system/cpu/online'``. The
|
||||
returned list should be read from the device at the time
|
||||
of read request.
|
||||
|
||||
"""
|
||||
__metaclass__ = DeviceMeta
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', kind=list_of(caseless_string), mandatory=True, default=None,
|
||||
description="""
|
||||
This is a list of all cpu cores on the device with each
|
||||
element being the core type, e.g. ``['a7', 'a7', 'a15']``. The
|
||||
order of the cores must match the order they are listed in
|
||||
``'/sys/devices/system/cpu'``. So in this case, ``'cpu0'`` must
|
||||
be an A7 core, and ``'cpu2'`` an A15.'
|
||||
"""),
|
||||
Parameter('core_clusters', kind=list_of_integers, mandatory=True, default=None,
|
||||
description="""
|
||||
This is a list indicating the cluster affinity of the CPU cores,
|
||||
each element correponding to the cluster ID of the core coresponding
|
||||
to its index. E.g. ``[0, 0, 1]`` indicates that cpu0 and cpu1 are on
|
||||
cluster 0, while cpu2 is on cluster 1. If this is not specified, this
|
||||
will be inferred from ``core_names`` if possible (assuming all cores with
|
||||
the same name are on the same cluster).
|
||||
"""),
|
||||
]
|
||||
|
||||
runtime_parameters = []
|
||||
# dynamic modules are loaded or not based on whether the device supports
|
||||
# them (established at runtime by module probling the device).
|
||||
dynamic_modules = []
|
||||
|
||||
# These must be overwritten by subclasses.
|
||||
name = None
|
||||
platform = None
|
||||
default_working_directory = None
|
||||
has_gpu = None
|
||||
path_module = None
|
||||
active_cores = None
|
||||
|
||||
def __init__(self, **kwargs): # pylint: disable=W0613
|
||||
super(Device, self).__init__(**kwargs)
|
||||
if not self.path_module:
|
||||
raise NotImplementedError('path_module must be specified by the deriving classes.')
|
||||
libpath = os.path.dirname(os.__file__)
|
||||
modpath = os.path.join(libpath, self.path_module)
|
||||
if not modpath.lower().endswith('.py'):
|
||||
modpath += '.py'
|
||||
try:
|
||||
self.path = imp.load_source('device_path', modpath)
|
||||
except IOError:
|
||||
raise DeviceError('Unsupported path module: {}'.format(self.path_module))
|
||||
|
||||
def validate(self):
|
||||
# pylint: disable=access-member-before-definition,attribute-defined-outside-init
|
||||
if self.core_names and not self.core_clusters:
|
||||
self.core_clusters = []
|
||||
clusters = []
|
||||
for cn in self.core_names:
|
||||
if cn not in clusters:
|
||||
clusters.append(cn)
|
||||
self.core_clusters.append(clusters.index(cn))
|
||||
if len(self.core_names) != len(self.core_clusters):
|
||||
raise ConfigError('core_names and core_clusters are of different lengths.')
|
||||
|
||||
def initialize(self, context):
|
||||
"""
|
||||
Initialization that is performed at the begining of the run (after the device has
|
||||
been connecte).
|
||||
|
||||
"""
|
||||
loader = ExtensionLoader()
|
||||
for module_spec in self.dynamic_modules:
|
||||
module = self._load_module(loader, module_spec)
|
||||
if not hasattr(module, 'probe'):
|
||||
message = 'Module {} does not have "probe" attribute; cannot be loaded dynamically'
|
||||
raise ValueError(message.format(module.name))
|
||||
if module.probe(self):
|
||||
self.logger.debug('Installing module "{}"'.format(module.name))
|
||||
self._install_module(module)
|
||||
else:
|
||||
self.logger.debug('Module "{}" is not supported by the device'.format(module.name))
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Initiate rebooting of the device.
|
||||
|
||||
Added in version 2.1.3.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def boot(self, *args, **kwargs):
|
||||
"""
|
||||
Perform the seteps necessary to boot the device to the point where it is ready
|
||||
to accept other commands.
|
||||
|
||||
Changed in version 2.1.3: no longer expected to wait until boot completes.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def connect(self, *args, **kwargs):
|
||||
"""
|
||||
Establish a connection to the device that will be used for subsequent commands.
|
||||
|
||||
Added in version 2.1.3.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def disconnect(self):
|
||||
""" Close the established connection to the device. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def ping(self):
|
||||
"""
|
||||
This must return successfully if the device is able to receive commands, or must
|
||||
raise :class:`wlauto.exceptions.DeviceUnresponsiveError` if the device cannot respond.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_runtime_parameter_names(self):
|
||||
return [p.name for p in self._expand_runtime_parameters()]
|
||||
|
||||
def get_runtime_parameters(self):
|
||||
""" returns the runtime parameters that have been set. """
|
||||
# pylint: disable=cell-var-from-loop
|
||||
runtime_parameters = OrderedDict()
|
||||
for rtp in self._expand_runtime_parameters():
|
||||
if not rtp.getter:
|
||||
continue
|
||||
getter = getattr(self, rtp.getter)
|
||||
rtp_value = getter(**rtp.getter_args)
|
||||
runtime_parameters[rtp.name] = rtp_value
|
||||
return runtime_parameters
|
||||
|
||||
def set_runtime_parameters(self, params):
|
||||
"""
|
||||
The parameters are taken from the keyword arguments and are specific to
|
||||
a particular device. See the device documentation.
|
||||
|
||||
"""
|
||||
runtime_parameters = self._expand_runtime_parameters()
|
||||
rtp_map = {rtp.name.lower(): rtp for rtp in runtime_parameters}
|
||||
|
||||
params = OrderedDict((k.lower(), v) for k, v in params.iteritems() if v is not None)
|
||||
|
||||
expected_keys = rtp_map.keys()
|
||||
if not set(params.keys()).issubset(set(expected_keys)):
|
||||
unknown_params = list(set(params.keys()).difference(set(expected_keys)))
|
||||
raise ConfigError('Unknown runtime parameter(s): {}'.format(unknown_params))
|
||||
|
||||
for param in params:
|
||||
self.logger.debug('Setting runtime parameter "{}"'.format(param))
|
||||
rtp = rtp_map[param]
|
||||
setter = getattr(self, rtp.setter)
|
||||
args = dict(rtp.setter_args.items() + [(rtp.value_name, params[rtp.name.lower()])])
|
||||
setter(**args)
|
||||
|
||||
def capture_screen(self, filepath):
|
||||
"""Captures the current device screen into the specified file in a PNG format."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_properties(self, output_path):
|
||||
"""Captures and saves the device configuration properties version and
|
||||
any other relevant information. Return them in a dict"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def listdir(self, path, **kwargs):
|
||||
""" List the contents of the specified directory. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def push_file(self, source, dest):
|
||||
""" Push a file from the host file system onto the device. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def pull_file(self, source, dest):
|
||||
""" Pull a file from device system onto the host file system. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_file(self, filepath):
|
||||
""" Delete the specified file on the device. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def file_exists(self, filepath):
|
||||
""" Check if the specified file or directory exist on the device. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_pids_of(self, process_name):
|
||||
""" Returns a list of PIDs of the specified process name. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def kill(self, pid, as_root=False):
|
||||
""" Kill the process with the specified PID. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def killall(self, process_name, as_root=False):
|
||||
""" Kill all running processes with the specified name. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def install(self, filepath, **kwargs):
|
||||
""" Install the specified file on the device. What "install" means is device-specific
|
||||
and may possibly also depend on the type of file."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def uninstall(self, filepath):
|
||||
""" Uninstall the specified file on the device. What "uninstall" means is device-specific
|
||||
and may possibly also depend on the type of file."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def execute(self, command, timeout=None, **kwargs):
|
||||
"""
|
||||
Execute the specified command command on the device and return the output.
|
||||
|
||||
:param command: Command to be executed on the device.
|
||||
:param timeout: If the command does not return after the specified time,
|
||||
execute() will abort with an error. If there is no timeout for
|
||||
the command, this should be set to 0 or None.
|
||||
|
||||
Other device-specific keyword arguments may also be specified.
|
||||
|
||||
:returns: The stdout output from the command.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def sleep(self, seconds):
|
||||
"""Sleep for the specified time on the target device.
|
||||
|
||||
:param seconds: Time in seconds to sleep on the device
|
||||
|
||||
The sleep is executed on the device using self.execute(). We
|
||||
set the timeout for this command to be 10 seconds longer than
|
||||
the sleep itself to make sure the command has time to complete
|
||||
before we timeout.
|
||||
|
||||
"""
|
||||
self.execute("sleep {}".format(seconds), timeout=seconds + 10)
|
||||
|
||||
def set_sysfile_value(self, filepath, value, verify=True, binary=False):
|
||||
"""
|
||||
Write the specified value to the specified file on the device
|
||||
and verify that the value has actually been written.
|
||||
|
||||
:param file: The file to be modified.
|
||||
:param value: The value to be written to the file. Must be
|
||||
an int or a string convertable to an int.
|
||||
:param verify: Specifies whether the value should be verified, once written.
|
||||
:param binary: Specifies whether the value should be written as binary data.
|
||||
|
||||
Should raise DeviceError if could write value.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_sysfile_value(self, sysfile, kind=None, binary=False):
|
||||
"""
|
||||
Get the contents of the specified sysfile.
|
||||
|
||||
:param sysfile: The file who's contents will be returned.
|
||||
|
||||
:param kind: The type of value to be expected in the sysfile. This can
|
||||
be any Python callable that takes a single str argument.
|
||||
If not specified or is None, the contents will be returned
|
||||
as a string.
|
||||
:param binary: Whether the value should be encoded into base64 for reading
|
||||
to deal with binary format.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
This gets invoked before an iteration is started and is endented to help the
|
||||
device manange any internal supporting functions.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
This gets invoked after iteration execution has completed and is endented to help the
|
||||
device manange any internal supporting functions.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def is_network_connected(self):
|
||||
"""
|
||||
Checks if the device is connected to the internet
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def __str__(self):
|
||||
return 'Device<{}>'.format(self.name)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
def _expand_runtime_parameters(self):
|
||||
expanded_params = []
|
||||
for param in self.runtime_parameters:
|
||||
if isinstance(param, CoreParameter):
|
||||
expanded_params.extend(param.get_runtime_parameters(self.core_names)) # pylint: disable=no-member
|
||||
else:
|
||||
expanded_params.append(param)
|
||||
return expanded_params
|
||||
|
||||
@contextmanager
|
||||
def _check_alive(self):
|
||||
try:
|
||||
yield
|
||||
except Exception as e:
|
||||
self.ping()
|
||||
raise e
|
318
wlauto/core/device_manager.py
Normal file
318
wlauto/core/device_manager.py
Normal file
@@ -0,0 +1,318 @@
|
||||
import string
|
||||
from collections import OrderedDict
|
||||
|
||||
from wlauto.core.extension import Extension, Parameter
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.utils.types import list_of_integers, list_of, caseless_string
|
||||
|
||||
from devlib.platform import Platform
|
||||
from devlib.target import AndroidTarget, Cpuinfo, KernelVersion, KernelConfig
|
||||
|
||||
__all__ = ['RuntimeParameter', 'CoreParameter', 'DeviceManager', 'TargetInfo']
|
||||
|
||||
|
||||
class RuntimeParameter(object):
|
||||
"""
|
||||
A runtime parameter which has its getter and setter methods associated it
|
||||
with it.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, name, getter, setter,
|
||||
getter_args=None, setter_args=None,
|
||||
value_name='value', override=False):
|
||||
"""
|
||||
:param name: the name of the parameter.
|
||||
:param getter: the getter method which returns the value of this parameter.
|
||||
:param setter: the setter method which sets the value of this parameter. The setter
|
||||
always expects to be passed one argument when it is called.
|
||||
:param getter_args: keyword arguments to be used when invoking the getter.
|
||||
:param setter_args: keyword arguments to be used when invoking the setter.
|
||||
:param override: A ``bool`` that specifies whether a parameter of the same name further up the
|
||||
hierarchy should be overridden. If this is ``False`` (the default), an exception
|
||||
will be raised by the ``AttributeCollection`` instead.
|
||||
|
||||
"""
|
||||
self.name = name
|
||||
self.getter = getter
|
||||
self.setter = setter
|
||||
self.getter_args = getter_args or {}
|
||||
self.setter_args = setter_args or {}
|
||||
self.value_name = value_name
|
||||
self.override = override
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
class CoreParameter(RuntimeParameter):
|
||||
"""A runtime parameter that will get expanded into a RuntimeParameter for each core type."""
|
||||
|
||||
def get_runtime_parameters(self, core_names):
|
||||
params = []
|
||||
for core in set(core_names):
|
||||
name = string.Template(self.name).substitute(core=core)
|
||||
getter = string.Template(self.getter).substitute(core=core)
|
||||
setter = string.Template(self.setter).substitute(core=core)
|
||||
getargs = dict(self.getter_args.items() + [('core', core)])
|
||||
setargs = dict(self.setter_args.items() + [('core', core)])
|
||||
params.append(RuntimeParameter(name, getter, setter, getargs, setargs, self.value_name, self.override))
|
||||
return params
|
||||
|
||||
|
||||
class TargetInfo(object):
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
instance = TargetInfo()
|
||||
instance.target = pod['target']
|
||||
instance.abi = pod['abi']
|
||||
instance.cpuinfo = Cpuinfo(pod['cpuinfo'])
|
||||
instance.os = pod['os']
|
||||
instance.os_version = pod['os_version']
|
||||
instance.abi = pod['abi']
|
||||
instance.is_rooted = pod['is_rooted']
|
||||
instance.kernel_version = KernelVersion(pod['kernel_version'])
|
||||
instance.kernel_config = KernelConfig(pod['kernel_config'])
|
||||
|
||||
if pod["target"] == "AndroidTarget":
|
||||
instance.screen_resolution = pod['screen_resolution']
|
||||
instance.prop = pod['prop']
|
||||
instance.prop = pod['android_id']
|
||||
|
||||
return instance
|
||||
|
||||
def __init__(self, target=None):
|
||||
if target:
|
||||
self.target = target.__class__.__name__
|
||||
self.cpuinfo = target.cpuinfo
|
||||
self.os = target.os
|
||||
self.os_version = target.os_version
|
||||
self.abi = target.abi
|
||||
self.is_rooted = target.is_rooted
|
||||
self.kernel_version = target.kernel_version
|
||||
self.kernel_config = target.config
|
||||
|
||||
if isinstance(target, AndroidTarget):
|
||||
self.screen_resolution = target.screen_resolution
|
||||
self.prop = target.getprop()
|
||||
self.android_id = target.android_id
|
||||
|
||||
else:
|
||||
self.target = None
|
||||
self.cpuinfo = None
|
||||
self.os = None
|
||||
self.os_version = None
|
||||
self.abi = None
|
||||
self.is_rooted = None
|
||||
self.kernel_version = None
|
||||
self.kernel_config = None
|
||||
|
||||
if isinstance(target, AndroidTarget):
|
||||
self.screen_resolution = None
|
||||
self.prop = None
|
||||
self.android_id = None
|
||||
|
||||
def to_pod(self):
|
||||
pod = {}
|
||||
pod['target'] = self.target.__class__.__name__
|
||||
pod['abi'] = self.abi
|
||||
pod['cpuinfo'] = self.cpuinfo.text
|
||||
pod['os'] = self.os
|
||||
pod['os_version'] = self.os_version
|
||||
pod['abi'] = self.abi
|
||||
pod['is_rooted'] = self.is_rooted
|
||||
pod['kernel_version'] = self.kernel_version.version
|
||||
pod['kernel_config'] = self.kernel_config.text
|
||||
|
||||
if self.target == "AndroidTarget":
|
||||
pod['screen_resolution'] = self.screen_resolution
|
||||
pod['prop'] = self.prop
|
||||
pod['android_id'] = self.android_id
|
||||
|
||||
return pod
|
||||
|
||||
|
||||
class DeviceManager(Extension):
|
||||
|
||||
name = None
|
||||
target_type = None
|
||||
platform_type = Platform
|
||||
has_gpu = None
|
||||
path_module = None
|
||||
info = None
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', kind=list_of(caseless_string),
|
||||
description="""
|
||||
This is a list of all cpu cores on the device with each
|
||||
element being the core type, e.g. ``['a7', 'a7', 'a15']``. The
|
||||
order of the cores must match the order they are listed in
|
||||
``'/sys/devices/system/cpu'``. So in this case, ``'cpu0'`` must
|
||||
be an A7 core, and ``'cpu2'`` an A15.'
|
||||
"""),
|
||||
Parameter('core_clusters', kind=list_of_integers,
|
||||
description="""
|
||||
This is a list indicating the cluster affinity of the CPU cores,
|
||||
each element correponding to the cluster ID of the core coresponding
|
||||
to its index. E.g. ``[0, 0, 1]`` indicates that cpu0 and cpu1 are on
|
||||
cluster 0, while cpu2 is on cluster 1. If this is not specified, this
|
||||
will be inferred from ``core_names`` if possible (assuming all cores with
|
||||
the same name are on the same cluster).
|
||||
"""),
|
||||
Parameter('working_directory',
|
||||
description='''
|
||||
Working directory to be used by WA. This must be in a location where the specified user
|
||||
has write permissions. This will default to /home/<username>/wa (or to /root/wa, if
|
||||
username is 'root').
|
||||
'''),
|
||||
Parameter('binaries_directory',
|
||||
description='Location of executable binaries on this device (must be in PATH).'),
|
||||
]
|
||||
modules = []
|
||||
|
||||
runtime_parameters = [
|
||||
RuntimeParameter('sysfile_values', 'get_sysfile_values', 'set_sysfile_values', value_name='params'),
|
||||
CoreParameter('${core}_cores', 'get_number_of_online_cpus', 'set_number_of_online_cpus',
|
||||
value_name='number'),
|
||||
CoreParameter('${core}_min_frequency', 'get_core_min_frequency', 'set_core_min_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_max_frequency', 'get_core_max_frequency', 'set_core_max_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_frequency', 'get_core_cur_frequency', 'set_core_cur_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_governor', 'get_core_governor', 'set_core_governor',
|
||||
value_name='governor'),
|
||||
CoreParameter('${core}_governor_tunables', 'get_core_governor_tunables', 'set_core_governor_tunables',
|
||||
value_name='tunables'),
|
||||
]
|
||||
|
||||
# Framework
|
||||
|
||||
def connect(self):
|
||||
raise NotImplementedError("connect method must be implemented for device managers")
|
||||
|
||||
def initialize(self, context):
|
||||
super(DeviceManager, self).initialize(context)
|
||||
self.info = TargetInfo(self.target)
|
||||
self.target.setup()
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
def validate(self):
|
||||
pass
|
||||
|
||||
# Runtime Parameters
|
||||
|
||||
def get_runtime_parameter_names(self):
|
||||
return [p.name for p in self._expand_runtime_parameters()]
|
||||
|
||||
def get_runtime_parameters(self):
|
||||
""" returns the runtime parameters that have been set. """
|
||||
# pylint: disable=cell-var-from-loop
|
||||
runtime_parameters = OrderedDict()
|
||||
for rtp in self._expand_runtime_parameters():
|
||||
if not rtp.getter:
|
||||
continue
|
||||
getter = getattr(self, rtp.getter)
|
||||
rtp_value = getter(**rtp.getter_args)
|
||||
runtime_parameters[rtp.name] = rtp_value
|
||||
return runtime_parameters
|
||||
|
||||
def set_runtime_parameters(self, params):
|
||||
"""
|
||||
The parameters are taken from the keyword arguments and are specific to
|
||||
a particular device. See the device documentation.
|
||||
|
||||
"""
|
||||
runtime_parameters = self._expand_runtime_parameters()
|
||||
rtp_map = {rtp.name.lower(): rtp for rtp in runtime_parameters}
|
||||
|
||||
params = OrderedDict((k.lower(), v) for k, v in params.iteritems() if v is not None)
|
||||
|
||||
expected_keys = rtp_map.keys()
|
||||
if not set(params.keys()).issubset(set(expected_keys)):
|
||||
unknown_params = list(set(params.keys()).difference(set(expected_keys)))
|
||||
raise ConfigError('Unknown runtime parameter(s): {}'.format(unknown_params))
|
||||
|
||||
for param in params:
|
||||
self.logger.debug('Setting runtime parameter "{}"'.format(param))
|
||||
rtp = rtp_map[param]
|
||||
setter = getattr(self, rtp.setter)
|
||||
args = dict(rtp.setter_args.items() + [(rtp.value_name, params[rtp.name.lower()])])
|
||||
setter(**args)
|
||||
|
||||
def _expand_runtime_parameters(self):
|
||||
expanded_params = []
|
||||
for param in self.runtime_parameters:
|
||||
if isinstance(param, CoreParameter):
|
||||
expanded_params.extend(param.get_runtime_parameters(self.target.core_names)) # pylint: disable=no-member
|
||||
else:
|
||||
expanded_params.append(param)
|
||||
return expanded_params
|
||||
|
||||
#Runtime parameter getters/setters
|
||||
|
||||
_written_sysfiles = []
|
||||
|
||||
def get_sysfile_values(self):
|
||||
return self._written_sysfiles
|
||||
|
||||
def set_sysfile_values(self, params):
|
||||
for sysfile, value in params.iteritems():
|
||||
verify = not sysfile.endswith('!')
|
||||
sysfile = sysfile.rstrip('!')
|
||||
self._written_sysfiles.append((sysfile, value))
|
||||
self.target.write_value(sysfile, value, verify=verify)
|
||||
|
||||
# pylint: disable=E1101
|
||||
|
||||
def _get_core_online_cpu(self, core):
|
||||
try:
|
||||
return self.target.list_online_core_cpus(core)[0]
|
||||
except IndexError:
|
||||
raise ValueError("No {} cores are online".format(core))
|
||||
|
||||
def get_number_of_online_cpus(self, core):
|
||||
return len(self._get_core_online_cpu(core))
|
||||
|
||||
def set_number_of_online_cpus(self, core, number):
|
||||
for cpu in self.target.core_cpus(core)[:number]:
|
||||
self.target.hotplug.online(cpu)
|
||||
|
||||
def get_core_min_frequency(self, core):
|
||||
return self.target.cpufreq.get_min_frequency(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_min_frequency(self, core, frequency):
|
||||
self.target.cpufreq.set_min_frequency(self._get_core_online_cpu(core), frequency)
|
||||
|
||||
def get_core_max_frequency(self, core):
|
||||
return self.target.cpufreq.get_max_frequency(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_max_frequency(self, core, frequency):
|
||||
self.target.cpufreq.set_max_frequency(self._get_core_online_cpu(core), frequency)
|
||||
|
||||
def get_core_frequency(self, core):
|
||||
return self.target.cpufreq.get_frequency(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_frequency(self, core, frequency):
|
||||
self.target.cpufreq.set_frequency(self._get_core_online_cpu(core), frequency)
|
||||
|
||||
def get_core_governor(self, core):
|
||||
return self.target.cpufreq.get_cpu_governor(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_governor(self, core, governor):
|
||||
self.target.cpufreq.set_cpu_governor(self._get_core_online_cpu(core), governor)
|
||||
|
||||
def get_core_governor_tunables(self, core):
|
||||
return self.target.cpufreq.get_governor_tunables(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_governor_tunables(self, core, tunables):
|
||||
self.target.cpufreq.set_governor_tunables(self._get_core_online_cpu(core),
|
||||
*tunables)
|
@@ -14,12 +14,10 @@
|
||||
#
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import warnings
|
||||
|
||||
@@ -43,9 +41,6 @@ def load_commands(subparsers):
|
||||
for command in ext_loader.list_commands():
|
||||
settings.commands[command.name] = ext_loader.get_command(command.name, subparsers=subparsers)
|
||||
|
||||
def convert_TERM_into_INT_handler(signal, frame):
|
||||
logger.critical("TERM received, aborting")
|
||||
raise KeyboardInterrupt()
|
||||
|
||||
def main():
|
||||
try:
|
||||
@@ -67,7 +62,6 @@ def main():
|
||||
settings.update(args.config)
|
||||
init_logging(settings.verbosity)
|
||||
|
||||
signal.signal(signal.SIGTERM, convert_TERM_into_INT_handler)
|
||||
command = settings.commands[args.command]
|
||||
sys.exit(command.execute(args))
|
||||
|
||||
|
@@ -56,8 +56,7 @@ from wlauto.core.extension_loader import ExtensionLoader
|
||||
from wlauto.core.resolver import ResourceResolver
|
||||
from wlauto.core.result import ResultManager, IterationResult, RunResult
|
||||
from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError,
|
||||
DeviceError, DeviceNotRespondingError, ResourceError,
|
||||
HostError)
|
||||
DeviceError, DeviceNotRespondingError)
|
||||
from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, merge_dicts, format_duration
|
||||
|
||||
|
||||
@@ -143,8 +142,9 @@ class ExecutionContext(object):
|
||||
def result(self):
|
||||
return getattr(self.current_job, 'result', self.run_result)
|
||||
|
||||
def __init__(self, device, config):
|
||||
self.device = device
|
||||
def __init__(self, device_manager, config):
|
||||
self.device_manager = device_manager
|
||||
self.device = self.device_manager.target
|
||||
self.config = config
|
||||
self.reboot_policy = config.reboot_policy
|
||||
self.output_directory = None
|
||||
@@ -205,9 +205,6 @@ class ExecutionContext(object):
|
||||
def add_metric(self, *args, **kwargs):
|
||||
self.result.add_metric(*args, **kwargs)
|
||||
|
||||
def add_classifiers(self, **kwargs):
|
||||
self.result.classifiers.update(kwargs)
|
||||
|
||||
def add_artifact(self, name, path, kind, *args, **kwargs):
|
||||
if self.current_job is None:
|
||||
self.add_run_artifact(name, path, kind, *args, **kwargs)
|
||||
@@ -262,6 +259,7 @@ class Executor(object):
|
||||
self.warning_logged = False
|
||||
self.config = None
|
||||
self.ext_loader = None
|
||||
self.device_manager = None
|
||||
self.device = None
|
||||
self.context = None
|
||||
|
||||
@@ -305,10 +303,11 @@ class Executor(object):
|
||||
self.logger.debug('Initialising device configuration.')
|
||||
if not self.config.device:
|
||||
raise ConfigError('Make sure a device is specified in the config.')
|
||||
self.device = self.ext_loader.get_device(self.config.device, **self.config.device_config)
|
||||
self.device.validate()
|
||||
self.device_manager = self.ext_loader.get_device_manager(self.config.device, **self.config.device_config)
|
||||
self.device_manager.validate()
|
||||
self.device = self.device_manager.target
|
||||
|
||||
self.context = ExecutionContext(self.device, self.config)
|
||||
self.context = ExecutionContext(self.device_manager, self.config)
|
||||
|
||||
self.logger.debug('Loading resource discoverers.')
|
||||
self.context.initialize()
|
||||
@@ -345,11 +344,6 @@ class Executor(object):
|
||||
runner = self._get_runner(result_manager)
|
||||
runner.init_queue(self.config.workload_specs)
|
||||
runner.run()
|
||||
|
||||
if getattr(self.config, "clean_up", False):
|
||||
self.logger.info('Clearing WA files from device')
|
||||
self.device.delete_file(self.device.binaries_directory)
|
||||
self.device.delete_file(self.device.working_directory)
|
||||
self.execute_postamble()
|
||||
|
||||
def execute_postamble(self):
|
||||
@@ -393,7 +387,7 @@ class Executor(object):
|
||||
runnercls = RandomRunner
|
||||
else:
|
||||
raise ConfigError('Unexpected execution order: {}'.format(self.config.execution_order))
|
||||
return runnercls(self.device, self.context, result_manager)
|
||||
return runnercls(self.device_manager, self.context, result_manager)
|
||||
|
||||
def _error_signalled_callback(self):
|
||||
self.error_logged = True
|
||||
@@ -473,8 +467,9 @@ class Runner(object):
|
||||
return True
|
||||
return self.current_job.spec.id != self.next_job.spec.id
|
||||
|
||||
def __init__(self, device, context, result_manager):
|
||||
self.device = device
|
||||
def __init__(self, device_manager, context, result_manager):
|
||||
self.device_manager = device_manager
|
||||
self.device = device_manager.target
|
||||
self.context = context
|
||||
self.result_manager = result_manager
|
||||
self.logger = logging.getLogger('Runner')
|
||||
@@ -542,14 +537,13 @@ class Runner(object):
|
||||
self.context.run_info.start_time = datetime.utcnow()
|
||||
self._connect_to_device()
|
||||
self.logger.info('Initializing device')
|
||||
self.device.initialize(self.context)
|
||||
self.device_manager.initialize(self.context)
|
||||
|
||||
self.logger.info('Initializing workloads')
|
||||
for workload_spec in self.context.config.workload_specs:
|
||||
workload_spec.workload.initialize(self.context)
|
||||
|
||||
props = self.device.get_properties(self.context)
|
||||
self.context.run_info.device_properties = props
|
||||
self.context.run_info.device_properties = self.device_manager.info
|
||||
self.result_manager.initialize(self.context)
|
||||
self._send(signal.RUN_INIT)
|
||||
|
||||
@@ -559,7 +553,7 @@ class Runner(object):
|
||||
def _connect_to_device(self):
|
||||
if self.context.reboot_policy.perform_initial_boot:
|
||||
try:
|
||||
self.device.connect()
|
||||
self.device_manager.connect()
|
||||
except DeviceError: # device may be offline
|
||||
if self.device.can('reset_power'):
|
||||
with self._signal_wrap('INITIAL_BOOT'):
|
||||
@@ -573,7 +567,7 @@ class Runner(object):
|
||||
self._reboot_device()
|
||||
else:
|
||||
self.logger.info('Connecting to device')
|
||||
self.device.connect()
|
||||
self.device_manager.connect()
|
||||
|
||||
def _init_job(self):
|
||||
self.current_job.result.status = IterationResult.RUNNING
|
||||
@@ -606,7 +600,7 @@ class Runner(object):
|
||||
|
||||
instrumentation.disable_all()
|
||||
instrumentation.enable(spec.instrumentation)
|
||||
self.device.start()
|
||||
self.device_manager.start()
|
||||
|
||||
if self.spec_changed:
|
||||
self._send(signal.WORKLOAD_SPEC_START)
|
||||
@@ -615,7 +609,7 @@ class Runner(object):
|
||||
try:
|
||||
setup_ok = False
|
||||
with self._handle_errors('Setting up device parameters'):
|
||||
self.device.set_runtime_parameters(spec.runtime_parameters)
|
||||
self.device_manager.set_runtime_parameters(spec.runtime_parameters)
|
||||
setup_ok = True
|
||||
|
||||
if setup_ok:
|
||||
@@ -634,7 +628,7 @@ class Runner(object):
|
||||
if self.spec_will_change or not spec.enabled:
|
||||
self._send(signal.WORKLOAD_SPEC_END)
|
||||
finally:
|
||||
self.device.stop()
|
||||
self.device_manager.stop()
|
||||
|
||||
def _finalize_job(self):
|
||||
self.context.run_result.iteration_results.append(self.current_job.result)
|
||||
@@ -642,7 +636,7 @@ class Runner(object):
|
||||
job.iteration = self.context.current_iteration
|
||||
if job.result.status in self.config.retry_on_status:
|
||||
if job.retry >= self.config.max_retries:
|
||||
self.logger.error('Exceeded maximum number of retries. Abandoning job.')
|
||||
self.logger.error('Exceeded maxium number of retries. Abandoning job.')
|
||||
else:
|
||||
self.logger.info('Job status was {}. Retrying...'.format(job.result.status))
|
||||
retry_job = RunnerJob(job.spec, job.retry + 1)
|
||||
@@ -737,13 +731,6 @@ class Runner(object):
|
||||
filepath = os.path.join(settings.output_directory, filename)
|
||||
self.device.capture_screen(filepath)
|
||||
|
||||
def _take_uiautomator_dump(self, filename):
|
||||
if self.context.output_directory:
|
||||
filepath = os.path.join(self.context.output_directory, filename)
|
||||
else:
|
||||
filepath = os.path.join(settings.output_directory, filename)
|
||||
self.device.capture_ui_hierarchy(filepath)
|
||||
|
||||
@contextmanager
|
||||
def _handle_errors(self, action, on_error_status=IterationResult.FAILED):
|
||||
try:
|
||||
@@ -753,25 +740,19 @@ class Runner(object):
|
||||
except (KeyboardInterrupt, DeviceNotRespondingError):
|
||||
raise
|
||||
except (WAError, TimeoutError), we:
|
||||
self.device.ping()
|
||||
self.device.check_responsive()
|
||||
if self.current_job:
|
||||
self.current_job.result.status = on_error_status
|
||||
self.current_job.result.add_event(str(we))
|
||||
|
||||
# There is no point in taking a screenshot ect if the issue is not
|
||||
# with the device but with the host or a missing resource
|
||||
if not (isinstance(we, ResourceError) or isinstance(we, HostError)):
|
||||
try:
|
||||
self._take_screenshot('error.png')
|
||||
if self.device.platform == 'android':
|
||||
self._take_uiautomator_dump('error.uix')
|
||||
except Exception, e: # pylint: disable=W0703
|
||||
# We're already in error state, so the fact that taking a
|
||||
# screenshot failed is not surprising...
|
||||
pass
|
||||
try:
|
||||
self._take_screenshot('error.png')
|
||||
except Exception, e: # pylint: disable=W0703
|
||||
# We're already in error state, so the fact that taking a
|
||||
# screenshot failed is not surprising...
|
||||
pass
|
||||
if action:
|
||||
action = action[0].lower() + action[1:]
|
||||
self.logger.error('Error while {}:\n\t{}'.format(action, str(we).replace("\n", "\n\t")))
|
||||
self.logger.error('Error while {}:\n\t{}'.format(action, we))
|
||||
except Exception, e: # pylint: disable=W0703
|
||||
error_text = '{}("{}")'.format(e.__class__.__name__, e)
|
||||
if self.current_job:
|
||||
|
@@ -224,11 +224,18 @@ class Param(object):
|
||||
else:
|
||||
new_value = current_value + [value]
|
||||
setattr(obj, self.name, new_value)
|
||||
|
||||
def validate(self, obj):
|
||||
value = getattr(obj, self.name, None)
|
||||
if value is not None:
|
||||
if self.allowed_values:
|
||||
self._validate_allowed_values(obj, value)
|
||||
if self.constraint:
|
||||
self._validate_constraint(obj, value)
|
||||
else:
|
||||
if self.mandatory:
|
||||
msg = 'No value specified for mandatory parameter {} in {}.'
|
||||
raise ConfigError(msg.format(self.name, obj.name))
|
||||
|
||||
def get_type_name(self):
|
||||
typename = str(self.kind)
|
||||
@@ -560,9 +567,7 @@ class Extension(object):
|
||||
if self.name is None:
|
||||
raise ValidationError('Name not set for {}'.format(self._classname))
|
||||
for param in self.parameters:
|
||||
if param.mandatory and getattr(self, param.name, None) is None:
|
||||
msg = 'No value specified for mandatory parameter {} in {}.'
|
||||
raise ConfigError(msg.format(param.name, self.name))
|
||||
param.validate(self)
|
||||
|
||||
def initialize(self, context):
|
||||
pass
|
||||
|
@@ -32,3 +32,4 @@ def get_extension_type(ext):
|
||||
if isinstance(ext, cls):
|
||||
return name
|
||||
raise ValueError('Unknown extension type: {}'.format(ext.__class__.__name__))
|
||||
|
||||
|
@@ -61,7 +61,7 @@ we want to push the file to the target device and then change the file mode to
|
||||
755 ::
|
||||
|
||||
def setup(self, context):
|
||||
self.device.push_file(BINARY_FILE, self.device.working_directory)
|
||||
self.device.push(BINARY_FILE, self.device.working_directory)
|
||||
self.device.execute('chmod 755 {}'.format(self.trace_on_device))
|
||||
|
||||
Then we implemented the start method, which will simply run the file to start
|
||||
@@ -85,7 +85,7 @@ are metric key, value, unit and lower_is_better, which is a boolean. ::
|
||||
def update_result(self, context):
|
||||
# pull the trace file to the device
|
||||
result = os.path.join(self.device.working_directory, 'trace.txt')
|
||||
self.device.pull_file(result, context.working_directory)
|
||||
self.device.pull(result, context.working_directory)
|
||||
|
||||
# parse the file if needs to be parsed, or add result to
|
||||
# context.result
|
||||
@@ -94,7 +94,7 @@ At the end, we might want to delete any files generated by the instrumentation
|
||||
and the code to clear these file goes in teardown method. ::
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.delete_file(os.path.join(self.device.working_directory, 'trace.txt'))
|
||||
self.device.remove(os.path.join(self.device.working_directory, 'trace.txt'))
|
||||
|
||||
"""
|
||||
|
||||
@@ -241,7 +241,7 @@ class ManagedCallback(object):
|
||||
except (KeyboardInterrupt, DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
|
||||
raise
|
||||
except Exception as e: # pylint: disable=W0703
|
||||
logger.error('Error in instrument {}'.format(self.instrument.name))
|
||||
logger.error('Error in insturment {}'.format(self.instrument.name))
|
||||
global failures_detected # pylint: disable=W0603
|
||||
failures_detected = True
|
||||
if isinstance(e, WAError):
|
||||
@@ -396,3 +396,4 @@ class Instrument(Extension):
|
||||
|
||||
def __repr__(self):
|
||||
return 'Instrument({})'.format(self.name)
|
||||
|
||||
|
@@ -69,11 +69,7 @@ class ResourceResolver(object):
|
||||
self.logger.debug('\t{}'.format(result))
|
||||
return result
|
||||
if strict:
|
||||
if kwargs:
|
||||
criteria = ', '.join(['{}:{}'.format(k, v) for k, v in kwargs.iteritems()])
|
||||
raise ResourceError('{} ({}) could not be found'.format(resource, criteria))
|
||||
else:
|
||||
raise ResourceError('{} could not be found'.format(resource))
|
||||
raise ResourceError('{} could not be found'.format(resource))
|
||||
self.logger.debug('Resource {} not found.'.format(resource))
|
||||
return None
|
||||
|
||||
|
@@ -38,8 +38,8 @@ class GetterPriority(object):
|
||||
"""
|
||||
cached = 20
|
||||
preferred = 10
|
||||
remote = 5
|
||||
environment = 0
|
||||
remote = -4
|
||||
external_package = -5
|
||||
package = -10
|
||||
|
||||
|
@@ -327,3 +327,4 @@ class Metric(object):
|
||||
return '<{}>'.format(result)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
@@ -186,3 +186,4 @@ def send(signal, sender, *args, **kwargs):
|
||||
|
||||
"""
|
||||
dispatcher.send(signal, sender, *args, **kwargs)
|
||||
|
||||
|
@@ -18,7 +18,7 @@ from collections import namedtuple
|
||||
|
||||
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision'])
|
||||
|
||||
version = VersionTuple(2, 7, 0)
|
||||
version = VersionTuple(2, 4, 0)
|
||||
|
||||
|
||||
def get_wa_version():
|
||||
|
@@ -37,7 +37,6 @@ class Workload(Extension):
|
||||
supported_devices = []
|
||||
supported_platforms = []
|
||||
summary_metrics = []
|
||||
requires_network = False
|
||||
|
||||
def __init__(self, device, **kwargs):
|
||||
"""
|
||||
@@ -48,8 +47,9 @@ class Workload(Extension):
|
||||
super(Workload, self).__init__(**kwargs)
|
||||
if self.supported_devices and device.name not in self.supported_devices:
|
||||
raise WorkloadError('Workload {} does not support device {}'.format(self.name, device.name))
|
||||
if self.supported_platforms and device.platform not in self.supported_platforms:
|
||||
raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.platform))
|
||||
|
||||
if self.supported_platforms and device.os not in self.supported_platforms:
|
||||
raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.os))
|
||||
self.device = device
|
||||
|
||||
def init_resources(self, context):
|
||||
@@ -70,7 +70,7 @@ class Workload(Extension):
|
||||
"""
|
||||
pass
|
||||
|
||||
def setup(self, context): # pylint: disable=unused-argument
|
||||
def setup(self, context):
|
||||
"""
|
||||
Perform the setup necessary to run the workload, such as copying the necessary files
|
||||
to the device, configuring the environments, etc.
|
||||
@@ -79,8 +79,7 @@ class Workload(Extension):
|
||||
the workload.
|
||||
|
||||
"""
|
||||
if self.requires_network:
|
||||
self.check_network_connected()
|
||||
pass
|
||||
|
||||
def run(self, context):
|
||||
"""Execute the workload. This is the method that performs the actual "work" of the"""
|
||||
@@ -101,10 +100,5 @@ class Workload(Extension):
|
||||
def finalize(self, context):
|
||||
pass
|
||||
|
||||
def check_network_connected(self):
|
||||
if not self.device.is_network_connected():
|
||||
message = 'Workload "{}" requires internet. Device "{}" does not appear to be connected to the internet.'
|
||||
raise WorkloadError(message.format(self.name, self.device.name))
|
||||
|
||||
def __str__(self):
|
||||
return '<Workload {}>'.format(self.name)
|
||||
|
@@ -1,14 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
@@ -1,212 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Original implementation by Rene de Jong. Updated by Sascha Bischoff.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
from wlauto.common.gem5.device import BaseGem5Device
|
||||
from wlauto.exceptions import DeviceError
|
||||
|
||||
|
||||
class Gem5AndroidDevice(BaseGem5Device, AndroidDevice):
|
||||
"""
|
||||
Implements gem5 Android device.
|
||||
|
||||
This class allows a user to connect WA to a simulation using gem5. The
|
||||
connection to the device is made using the telnet connection of the
|
||||
simulator, and is used for all commands. The simulator does not have ADB
|
||||
support, and therefore we need to fall back to using standard shell
|
||||
commands.
|
||||
|
||||
Files are copied into the simulation using a VirtIO 9P device in gem5. Files
|
||||
are copied out of the simulated environment using the m5 writefile command
|
||||
within the simulated system.
|
||||
|
||||
When starting the workload run, the simulator is automatically started by
|
||||
Workload Automation, and a connection to the simulator is established. WA
|
||||
will then wait for Android to boot on the simulated system (which can take
|
||||
hours), prior to executing any other commands on the device. It is also
|
||||
possible to resume from a checkpoint when starting the simulation. To do
|
||||
this, please append the relevant checkpoint commands from the gem5
|
||||
simulation script to the gem5_discription argument in the agenda.
|
||||
|
||||
Host system requirements:
|
||||
* VirtIO support. We rely on diod on the host system. This can be
|
||||
installed on ubuntu using the following command:
|
||||
|
||||
sudo apt-get install diod
|
||||
|
||||
Guest requirements:
|
||||
* VirtIO support. We rely on VirtIO to move files into the simulation.
|
||||
Please make sure that the following are set in the kernel
|
||||
configuration:
|
||||
|
||||
CONFIG_NET_9P=y
|
||||
|
||||
CONFIG_NET_9P_VIRTIO=y
|
||||
|
||||
CONFIG_9P_FS=y
|
||||
|
||||
CONFIG_9P_FS_POSIX_ACL=y
|
||||
|
||||
CONFIG_9P_FS_SECURITY=y
|
||||
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
|
||||
* m5 binary. Please make sure that the m5 binary is on the device and
|
||||
can by found in the path.
|
||||
"""
|
||||
|
||||
name = 'gem5_android'
|
||||
platform = 'android'
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=[], override=True),
|
||||
Parameter('core_clusters', default=[], override=True),
|
||||
]
|
||||
|
||||
# Overwritten from Device. For documentation, see corresponding method in
|
||||
# Device.
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.logger = logging.getLogger('Gem5AndroidDevice')
|
||||
AndroidDevice.__init__(self, **kwargs)
|
||||
BaseGem5Device.__init__(self)
|
||||
|
||||
def login_to_device(self):
|
||||
pass
|
||||
|
||||
def wait_for_boot(self):
|
||||
"""
|
||||
Wait for the system to boot
|
||||
|
||||
We monitor the sys.boot_completed and service.bootanim.exit system
|
||||
properties to determine when the system has finished booting. In the
|
||||
event that we cannot coerce the result of service.bootanim.exit to an
|
||||
integer, we assume that the boot animation was disabled and do not wait
|
||||
for it to finish.
|
||||
|
||||
"""
|
||||
self.logger.info("Waiting for Android to boot...")
|
||||
while True:
|
||||
booted = False
|
||||
anim_finished = True # Assume boot animation was disabled on except
|
||||
try:
|
||||
booted = (int('0' + self.gem5_shell('getprop sys.boot_completed', check_exit_code=False).strip()) == 1)
|
||||
anim_finished = (int(self.gem5_shell('getprop service.bootanim.exit', check_exit_code=False).strip()) == 1)
|
||||
except ValueError:
|
||||
pass
|
||||
if booted and anim_finished:
|
||||
break
|
||||
time.sleep(60)
|
||||
|
||||
self.logger.info("Android booted")
|
||||
|
||||
def install(self, filepath, timeout=3 * 3600): # pylint: disable=W0221
|
||||
""" Install an APK or a normal executable """
|
||||
ext = os.path.splitext(filepath)[1].lower()
|
||||
if ext == '.apk':
|
||||
return self.install_apk(filepath, timeout)
|
||||
else:
|
||||
return self.install_executable(filepath)
|
||||
|
||||
def install_apk(self, filepath, timeout=3 * 3600): # pylint: disable=W0221
|
||||
"""
|
||||
Install an APK on the gem5 device
|
||||
|
||||
The APK is pushed to the device. Then the file and folder permissions
|
||||
are changed to ensure that the APK can be correctly installed. The APK
|
||||
is then installed on the device using 'pm'.
|
||||
"""
|
||||
self._check_ready()
|
||||
self.logger.info("Installing {}".format(filepath))
|
||||
ext = os.path.splitext(filepath)[1].lower()
|
||||
if ext == '.apk':
|
||||
filename = os.path.basename(filepath)
|
||||
on_device_path = os.path.join('/data/local/tmp', filename)
|
||||
self.push_file(filepath, on_device_path)
|
||||
# We need to make sure that the folder permissions are set
|
||||
# correctly, else the APK does not install correctly.
|
||||
self.gem5_shell('chmod 775 /data/local/tmp')
|
||||
self.gem5_shell('chmod 774 {}'.format(on_device_path))
|
||||
self.logger.debug("Actually installing the APK: {}".format(on_device_path))
|
||||
return self.gem5_shell("pm install {}".format(on_device_path))
|
||||
else:
|
||||
raise DeviceError('Can\'t install {}: unsupported format.'.format(filepath))
|
||||
|
||||
def install_executable(self, filepath, with_name=None):
|
||||
""" Install an executable """
|
||||
executable_name = os.path.basename(filepath)
|
||||
on_device_file = self.path.join(self.working_directory, executable_name)
|
||||
on_device_executable = self.path.join(self.binaries_directory, executable_name)
|
||||
self.push_file(filepath, on_device_file)
|
||||
if self.busybox:
|
||||
self.execute('{} cp {} {}'.format(self.busybox, on_device_file, on_device_executable))
|
||||
else:
|
||||
self.execute('cat {} > {}'.format(on_device_file, on_device_executable))
|
||||
self.execute('chmod 0777 {}'.format(on_device_executable))
|
||||
return on_device_executable
|
||||
|
||||
def uninstall(self, package):
|
||||
self._check_ready()
|
||||
self.gem5_shell("pm uninstall {}".format(package))
|
||||
|
||||
def dump_logcat(self, outfile, filter_spec=None):
|
||||
""" Extract logcat from simulation """
|
||||
self.logger.info("Extracting logcat from the simulated system")
|
||||
filename = outfile.split('/')[-1]
|
||||
command = 'logcat -d > {}'.format(filename)
|
||||
self.gem5_shell(command)
|
||||
self.pull_file("{}".format(filename), outfile)
|
||||
|
||||
def clear_logcat(self):
|
||||
"""Clear (flush) logcat log."""
|
||||
if self._logcat_poller:
|
||||
return self._logcat_poller.clear_buffer()
|
||||
else:
|
||||
return self.gem5_shell('logcat -c')
|
||||
|
||||
def disable_selinux(self):
|
||||
""" Disable SELinux. Overridden as parent implementation uses ADB """
|
||||
api_level = int(self.gem5_shell('getprop ro.build.version.sdk').strip())
|
||||
|
||||
# SELinux was added in Android 4.3 (API level 18). Trying to
|
||||
# 'getenforce' in earlier versions will produce an error.
|
||||
if api_level >= 18:
|
||||
se_status = self.execute('getenforce', as_root=True).strip()
|
||||
if se_status == 'Enforcing':
|
||||
self.execute('setenforce 0', as_root=True)
|
||||
|
||||
def get_properties(self, context): # pylint: disable=R0801
|
||||
""" Get the property files from the device """
|
||||
BaseGem5Device.get_properties(self, context)
|
||||
props = self._get_android_properties(context)
|
||||
return props
|
||||
|
||||
def capture_screen(self, filepath):
|
||||
if BaseGem5Device.capture_screen(self, filepath):
|
||||
return
|
||||
|
||||
# If we didn't manage to do the above, call the parent class.
|
||||
self.logger.warning("capture_screen: falling back to parent class implementation")
|
||||
AndroidDevice.capture_screen(self, filepath)
|
||||
|
||||
def initialize(self, context):
|
||||
self.resize_shell()
|
||||
self.deploy_m5(context, force=False)
|
@@ -1,38 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
|
||||
|
||||
class GenericDevice(AndroidDevice):
|
||||
name = 'generic_android'
|
||||
description = """
|
||||
A generic Android device interface. Use this if you do not have an interface
|
||||
for your device.
|
||||
|
||||
This should allow basic WA functionality on most Android devices using adb over
|
||||
USB. Some additional configuration may be required for some WA extensions
|
||||
(e.g. configuring ``core_names`` and ``core_clusters``).
|
||||
|
||||
"""
|
||||
|
||||
default_working_directory = '/storage/sdcard0/working'
|
||||
has_gpu = True
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=[], override=True),
|
||||
Parameter('core_clusters', default=[], override=True),
|
||||
]
|
@@ -1,220 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
# pylint: disable=E1101
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
|
||||
import pexpect
|
||||
|
||||
from wlauto import BigLittleDevice, Parameter
|
||||
from wlauto.exceptions import DeviceError
|
||||
from wlauto.utils.serial_port import open_serial_connection, pulse_dtr
|
||||
from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices
|
||||
from wlauto.utils.uefi import UefiMenu, UefiConfig
|
||||
from wlauto.utils.uboot import UbootMenu
|
||||
|
||||
|
||||
AUTOSTART_MESSAGE = 'Press Enter to stop auto boot...'
|
||||
|
||||
|
||||
class Juno(BigLittleDevice):
|
||||
|
||||
name = 'juno'
|
||||
description = """
|
||||
ARM Juno next generation big.LITTLE development platform.
|
||||
"""
|
||||
|
||||
capabilities = ['reset_power']
|
||||
|
||||
has_gpu = True
|
||||
|
||||
core_modules = [
|
||||
'vexpress',
|
||||
]
|
||||
|
||||
parameters = [
|
||||
Parameter('retries', kind=int, default=2,
|
||||
description="""Specifies the number of times the device will attempt to recover
|
||||
(normally, with a hard reset) if it detects that something went wrong."""),
|
||||
|
||||
Parameter('microsd_mount_point', default='/media/JUNO',
|
||||
description='Location at which the device\'s MicroSD card will be mounted.'),
|
||||
Parameter('port', default='/dev/ttyS0', description='Serial port on which the device is connected.'),
|
||||
Parameter('baudrate', kind=int, default=115200, description='Serial connection baud.'),
|
||||
Parameter('timeout', kind=int, default=300, description='Serial connection timeout.'),
|
||||
Parameter('core_names', default=['a53', 'a53', 'a53', 'a53', 'a57', 'a57'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1], override=True),
|
||||
|
||||
Parameter('bootloader', default='uefi', allowed_values=['uefi', 'u-boot'],
|
||||
description="""Bootloader used on the device."""),
|
||||
Parameter('actually_disconnect', kind=bool, default=False,
|
||||
description="""
|
||||
Actually perfom "adb disconnect" on closing the connection to the device.
|
||||
"""),
|
||||
|
||||
# VExpress flasher expects a device to have these:
|
||||
Parameter('uefi_entry', default='WA',
|
||||
description='The name of the entry to use (will be created if does not exist).'),
|
||||
Parameter('uefi_config', kind=UefiConfig,
|
||||
description='''Specifies the configuration for the UEFI entry for his device. In an
|
||||
entry specified by ``uefi_entry`` parameter doesn't exist in UEFI menu,
|
||||
it will be created using this config. This configuration will also be
|
||||
used, when flashing new images.''',
|
||||
default={
|
||||
'image_name': 'Image',
|
||||
'image_args': None, # populated from bootargs if not specified
|
||||
'fdt_support': True,
|
||||
}
|
||||
),
|
||||
Parameter('bootargs',
|
||||
description='''Default boot arguments to use when boot_arguments were not.'''),
|
||||
]
|
||||
|
||||
short_delay = 1
|
||||
firmware_prompt = 'Cmd>'
|
||||
|
||||
def validate(self):
|
||||
if not self.uefi_config.image_args:
|
||||
self.uefi_config.image_args = self.bootargs
|
||||
|
||||
def boot(self, hard=False, **kwargs):
|
||||
if kwargs:
|
||||
self.bootargs = kwargs # pylint: disable=attribute-defined-outside-init
|
||||
if hard:
|
||||
self.logger.debug('Performing a hard reset.')
|
||||
self.hard_reset()
|
||||
else:
|
||||
self.logger.debug('Resetting the device.')
|
||||
self.reset()
|
||||
if self.bootloader == 'uefi':
|
||||
self._boot_via_uefi()
|
||||
else:
|
||||
self._boot_via_uboot(bootargs=self.bootargs)
|
||||
|
||||
def _boot_via_uboot(self, **kwargs):
|
||||
if not kwargs:
|
||||
# Standard linaro configuration will proceed directly to the kernel
|
||||
return
|
||||
with open_serial_connection(port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=self.timeout,
|
||||
init_dtr=0) as target:
|
||||
menu = UbootMenu(target)
|
||||
self.logger.debug('Waiting for U-Boot prompt...')
|
||||
menu.open(timeout=120)
|
||||
for var, value in kwargs.iteritems():
|
||||
menu.setenv(var, value)
|
||||
menu.boot()
|
||||
|
||||
def _boot_via_uefi(self):
|
||||
with open_serial_connection(port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=self.timeout,
|
||||
init_dtr=0) as target:
|
||||
menu = UefiMenu(target)
|
||||
self.logger.debug('Waiting for UEFI menu...')
|
||||
menu.open(timeout=120)
|
||||
try:
|
||||
menu.select(self.uefi_entry)
|
||||
except LookupError:
|
||||
self.logger.debug('{} UEFI entry not found.'.format(self.uefi_entry))
|
||||
self.logger.debug('Attempting to create one using default flasher configuration.')
|
||||
menu.create_entry(self.uefi_entry, self.uefi_config)
|
||||
menu.select(self.uefi_entry)
|
||||
self.logger.debug('Waiting for the Android prompt.')
|
||||
target.expect(self.android_prompt, timeout=self.timeout)
|
||||
|
||||
def connect(self):
|
||||
if not self._is_ready:
|
||||
if not self.adb_name: # pylint: disable=E0203
|
||||
with open_serial_connection(timeout=self.timeout,
|
||||
port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
init_dtr=0) as target:
|
||||
target.sendline('')
|
||||
self.logger.debug('Waiting for the Android prompt.')
|
||||
target.expect(self.android_prompt)
|
||||
|
||||
self.logger.debug('Waiting for IP address...')
|
||||
wait_start_time = time.time()
|
||||
while True:
|
||||
target.sendline('ip addr list eth0')
|
||||
time.sleep(1)
|
||||
try:
|
||||
target.expect(r'inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
|
||||
self.adb_name = target.match.group(1) + ':5555' # pylint: disable=W0201
|
||||
break
|
||||
except pexpect.TIMEOUT:
|
||||
pass # We have our own timeout -- see below.
|
||||
if (time.time() - wait_start_time) > self.ready_timeout:
|
||||
raise DeviceError('Could not acquire IP address.')
|
||||
|
||||
if self.adb_name in adb_list_devices():
|
||||
adb_disconnect(self.adb_name)
|
||||
adb_connect(self.adb_name, timeout=self.timeout)
|
||||
super(Juno, self).connect() # wait for boot to complete etc.
|
||||
self._is_ready = True
|
||||
|
||||
def disconnect(self):
|
||||
if self._is_ready:
|
||||
super(Juno, self).disconnect()
|
||||
if self.actually_disconnect:
|
||||
adb_disconnect(self.adb_name)
|
||||
self._is_ready = False
|
||||
|
||||
def reset(self):
|
||||
# Currently, reboot is not working in Android on Juno, so
|
||||
# perfrom a ahard reset instead
|
||||
self.hard_reset()
|
||||
|
||||
def hard_reset(self):
|
||||
self.disconnect()
|
||||
self.adb_name = None # Force re-acquire IP address on reboot. pylint: disable=attribute-defined-outside-init
|
||||
with open_serial_connection(port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=300,
|
||||
init_dtr=0,
|
||||
get_conn=True) as (target, conn):
|
||||
pulse_dtr(conn, state=True, duration=0.1) # TRM specifies a pulse of >=100ms
|
||||
|
||||
i = target.expect([AUTOSTART_MESSAGE, self.firmware_prompt])
|
||||
if i:
|
||||
self.logger.debug('Saw firmware prompt.')
|
||||
time.sleep(self.short_delay)
|
||||
target.sendline('reboot')
|
||||
else:
|
||||
self.logger.debug('Saw auto boot message.')
|
||||
|
||||
def wait_for_microsd_mount_point(self, target, timeout=100):
|
||||
attempts = 1 + self.retries
|
||||
if os.path.exists(os.path.join(self.microsd_mount_point, 'config.txt')):
|
||||
return
|
||||
|
||||
self.logger.debug('Waiting for VExpress MicroSD to mount...')
|
||||
for i in xrange(attempts):
|
||||
if i: # Do not reboot on the first attempt.
|
||||
target.sendline('reboot')
|
||||
for _ in xrange(timeout):
|
||||
time.sleep(self.short_delay)
|
||||
if os.path.exists(os.path.join(self.microsd_mount_point, 'config.txt')):
|
||||
return
|
||||
raise DeviceError('Did not detect MicroSD mount on {}'.format(self.microsd_mount_point))
|
||||
|
||||
def get_android_id(self):
|
||||
# Android ID currenlty not set properly in Juno Android builds.
|
||||
return 'abad1deadeadbeef'
|
@@ -1,14 +0,0 @@
|
||||
from wlauto import AndroidDevice
|
||||
|
||||
|
||||
class MeizuMX6(AndroidDevice):
|
||||
|
||||
name = 'meizumx6'
|
||||
|
||||
@property
|
||||
def is_rooted(self):
|
||||
# "su" executable on a rooted Meizu MX6 is targeted
|
||||
# specifically towards Android application and cannot
|
||||
# be used to execute a command line shell. This makes it
|
||||
# "broken" from WA prespective.
|
||||
return False
|
@@ -1,48 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import time
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
|
||||
|
||||
class Nexus10Device(AndroidDevice):
|
||||
|
||||
name = 'Nexus10'
|
||||
description = """
|
||||
Nexus10 is a 10 inch tablet device, which has dual-core A15.
|
||||
|
||||
To be able to use Nexus10 in WA, the following must be true:
|
||||
|
||||
- USB Debugging Mode is enabled.
|
||||
- Generate USB debugging authorisation for the host machine
|
||||
|
||||
"""
|
||||
|
||||
default_working_directory = '/sdcard/working'
|
||||
has_gpu = True
|
||||
max_cores = 2
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=['A15', 'A15'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0], override=True),
|
||||
]
|
||||
|
||||
def initialize(self, context):
|
||||
time.sleep(self.long_delay)
|
||||
self.execute('svc power stayon true', check_exit_code=False)
|
||||
time.sleep(self.long_delay)
|
||||
self.execute('input keyevent 82')
|
@@ -1,40 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
|
||||
|
||||
class Nexus5Device(AndroidDevice):
|
||||
|
||||
name = 'Nexus5'
|
||||
description = """
|
||||
Adapter for Nexus 5.
|
||||
|
||||
To be able to use Nexus5 in WA, the following must be true:
|
||||
|
||||
- USB Debugging Mode is enabled.
|
||||
- Generate USB debugging authorisation for the host machine
|
||||
|
||||
"""
|
||||
|
||||
default_working_directory = '/storage/sdcard0/working'
|
||||
has_gpu = True
|
||||
max_cores = 4
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=['krait400', 'krait400', 'krait400', 'krait400'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0], override=True),
|
||||
]
|
@@ -1,76 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import time
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
from wlauto.exceptions import TimeoutError
|
||||
from wlauto.utils.android import adb_shell
|
||||
|
||||
|
||||
class Note3Device(AndroidDevice):
|
||||
|
||||
name = 'Note3'
|
||||
description = """
|
||||
Adapter for Galaxy Note 3.
|
||||
|
||||
To be able to use Note3 in WA, the following must be true:
|
||||
|
||||
- USB Debugging Mode is enabled.
|
||||
- Generate USB debugging authorisation for the host machine
|
||||
|
||||
"""
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=['A15', 'A15', 'A15', 'A15'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0], override=True),
|
||||
Parameter('working_directory', default='/storage/sdcard0/wa-working', override=True),
|
||||
]
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(Note3Device, self).__init__(**kwargs)
|
||||
self._just_rebooted = False
|
||||
|
||||
def initialize(self, context):
|
||||
self.execute('svc power stayon true', check_exit_code=False)
|
||||
|
||||
def reset(self):
|
||||
super(Note3Device, self).reset()
|
||||
self._just_rebooted = True
|
||||
|
||||
def hard_reset(self):
|
||||
super(Note3Device, self).hard_reset()
|
||||
self._just_rebooted = True
|
||||
|
||||
def connect(self): # NOQA pylint: disable=R0912
|
||||
super(Note3Device, self).connect()
|
||||
if self._just_rebooted:
|
||||
self.logger.debug('Waiting for boot to complete...')
|
||||
# On the Note 3, adb connection gets reset some time after booting.
|
||||
# This causes errors during execution. To prevent this, open a shell
|
||||
# session and wait for it to be killed. Once its killed, give adb
|
||||
# enough time to restart, and then the device should be ready.
|
||||
try:
|
||||
adb_shell(self.adb_name, '', timeout=20) # pylint: disable=no-member
|
||||
time.sleep(5) # give adb time to re-initialize
|
||||
except TimeoutError:
|
||||
pass # timed out waiting for the session to be killed -- assume not going to be.
|
||||
|
||||
self.logger.debug('Boot completed.')
|
||||
self._just_rebooted = False
|
||||
# Swipe upwards to unlock the screen.
|
||||
time.sleep(self.long_delay)
|
||||
self.execute('input touchscreen swipe 540 1600 560 800 ')
|
@@ -1,37 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
|
||||
|
||||
class OdroidXU3(AndroidDevice):
|
||||
|
||||
name = "odroidxu3"
|
||||
description = 'HardKernel Odroid XU3 development board.'
|
||||
|
||||
core_modules = [
|
||||
'odroidxu3-fan',
|
||||
]
|
||||
|
||||
parameters = [
|
||||
Parameter('adb_name', default='BABABEEFBABABEEF', override=True),
|
||||
Parameter('working_directory', default='/data/local/wa-working', override=True),
|
||||
Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True),
|
||||
Parameter('port', default='/dev/ttyUSB0', kind=str,
|
||||
description='Serial port on which the device is connected'),
|
||||
Parameter('baudrate', default=115200, kind=int, description='Serial connection baud rate'),
|
||||
]
|
@@ -1,849 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import string
|
||||
import shutil
|
||||
import time
|
||||
from collections import Counter
|
||||
|
||||
import pexpect
|
||||
|
||||
from wlauto import BigLittleDevice, RuntimeParameter, Parameter, settings
|
||||
from wlauto.exceptions import ConfigError, DeviceError
|
||||
from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices
|
||||
from wlauto.utils.serial_port import open_serial_connection
|
||||
from wlauto.utils.misc import merge_dicts
|
||||
from wlauto.utils.types import boolean
|
||||
|
||||
|
||||
BOOT_FIRMWARE = {
|
||||
'uefi': {
|
||||
'SCC_0x010': '0x000003E0',
|
||||
'reboot_attempts': 0,
|
||||
},
|
||||
'bootmon': {
|
||||
'SCC_0x010': '0x000003D0',
|
||||
'reboot_attempts': 2,
|
||||
},
|
||||
}
|
||||
|
||||
MODES = {
|
||||
'mp_a7_only': {
|
||||
'images_file': 'images_mp.txt',
|
||||
'dtb': 'mp_a7',
|
||||
'initrd': 'init_mp',
|
||||
'kernel': 'kern_mp',
|
||||
'SCC_0x700': '0x1032F003',
|
||||
'cpus': ['a7', 'a7', 'a7'],
|
||||
},
|
||||
'mp_a7_bootcluster': {
|
||||
'images_file': 'images_mp.txt',
|
||||
'dtb': 'mp_a7bc',
|
||||
'initrd': 'init_mp',
|
||||
'kernel': 'kern_mp',
|
||||
'SCC_0x700': '0x1032F003',
|
||||
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
|
||||
},
|
||||
'mp_a15_only': {
|
||||
'images_file': 'images_mp.txt',
|
||||
'dtb': 'mp_a15',
|
||||
'initrd': 'init_mp',
|
||||
'kernel': 'kern_mp',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a15', 'a15'],
|
||||
},
|
||||
'mp_a15_bootcluster': {
|
||||
'images_file': 'images_mp.txt',
|
||||
'dtb': 'mp_a15bc',
|
||||
'initrd': 'init_mp',
|
||||
'kernel': 'kern_mp',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a15', 'a15', 'a7', 'a7', 'a7'],
|
||||
},
|
||||
'iks_cpu': {
|
||||
'images_file': 'images_iks.txt',
|
||||
'dtb': 'iks',
|
||||
'initrd': 'init_iks',
|
||||
'kernel': 'kern_iks',
|
||||
'SCC_0x700': '0x1032F003',
|
||||
'cpus': ['a7', 'a7'],
|
||||
},
|
||||
'iks_a15': {
|
||||
'images_file': 'images_iks.txt',
|
||||
'dtb': 'iks',
|
||||
'initrd': 'init_iks',
|
||||
'kernel': 'kern_iks',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a15', 'a15'],
|
||||
},
|
||||
'iks_a7': {
|
||||
'images_file': 'images_iks.txt',
|
||||
'dtb': 'iks',
|
||||
'initrd': 'init_iks',
|
||||
'kernel': 'kern_iks',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a7', 'a7'],
|
||||
},
|
||||
'iks_ns_a15': {
|
||||
'images_file': 'images_iks.txt',
|
||||
'dtb': 'iks',
|
||||
'initrd': 'init_iks',
|
||||
'kernel': 'kern_iks',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
|
||||
},
|
||||
'iks_ns_a7': {
|
||||
'images_file': 'images_iks.txt',
|
||||
'dtb': 'iks',
|
||||
'initrd': 'init_iks',
|
||||
'kernel': 'kern_iks',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
|
||||
},
|
||||
}
|
||||
|
||||
A7_ONLY_MODES = ['mp_a7_only', 'iks_a7', 'iks_cpu']
|
||||
A15_ONLY_MODES = ['mp_a15_only', 'iks_a15']
|
||||
|
||||
DEFAULT_A7_GOVERNOR_TUNABLES = {
|
||||
'interactive': {
|
||||
'above_hispeed_delay': 80000,
|
||||
'go_hispeed_load': 85,
|
||||
'hispeed_freq': 800000,
|
||||
'min_sample_time': 80000,
|
||||
'timer_rate': 20000,
|
||||
},
|
||||
'ondemand': {
|
||||
'sampling_rate': 50000,
|
||||
},
|
||||
}
|
||||
|
||||
DEFAULT_A15_GOVERNOR_TUNABLES = {
|
||||
'interactive': {
|
||||
'above_hispeed_delay': 80000,
|
||||
'go_hispeed_load': 85,
|
||||
'hispeed_freq': 1000000,
|
||||
'min_sample_time': 80000,
|
||||
'timer_rate': 20000,
|
||||
},
|
||||
'ondemand': {
|
||||
'sampling_rate': 50000,
|
||||
},
|
||||
}
|
||||
|
||||
ADB_SHELL_TIMEOUT = 30
|
||||
|
||||
|
||||
class _TC2DeviceConfig(object):
|
||||
|
||||
name = 'TC2 Configuration'
|
||||
device_name = 'TC2'
|
||||
|
||||
def __init__(self, # pylint: disable=R0914,W0613
|
||||
root_mount='/media/VEMSD',
|
||||
|
||||
disable_boot_configuration=False,
|
||||
boot_firmware=None,
|
||||
mode=None,
|
||||
|
||||
fs_medium='usb',
|
||||
|
||||
device_working_directory='/data/local/usecase',
|
||||
|
||||
bm_image='bm_v519r.axf',
|
||||
|
||||
serial_device='/dev/ttyS0',
|
||||
serial_baud=38400,
|
||||
serial_max_timeout=600,
|
||||
serial_log=sys.stdout,
|
||||
|
||||
init_timeout=120,
|
||||
|
||||
always_delete_uefi_entry=True,
|
||||
psci_enable=True,
|
||||
|
||||
host_working_directory=None,
|
||||
|
||||
a7_governor_tunables=None,
|
||||
a15_governor_tunables=None,
|
||||
|
||||
adb_name=None,
|
||||
# Compatibility with other android devices.
|
||||
enable_screen_check=None, # pylint: disable=W0613
|
||||
**kwargs
|
||||
):
|
||||
self.root_mount = root_mount
|
||||
self.disable_boot_configuration = disable_boot_configuration
|
||||
if not disable_boot_configuration:
|
||||
self.boot_firmware = boot_firmware or 'uefi'
|
||||
self.default_mode = mode or 'mp_a7_bootcluster'
|
||||
elif boot_firmware or mode:
|
||||
raise ConfigError('boot_firmware and/or mode cannot be specified when disable_boot_configuration is enabled.')
|
||||
|
||||
self.mode = self.default_mode
|
||||
self.working_directory = device_working_directory
|
||||
self.serial_device = serial_device
|
||||
self.serial_baud = serial_baud
|
||||
self.serial_max_timeout = serial_max_timeout
|
||||
self.serial_log = serial_log
|
||||
self.bootmon_prompt = re.compile('^([KLM]:\\\)?>', re.MULTILINE)
|
||||
|
||||
self.fs_medium = fs_medium.lower()
|
||||
|
||||
self.bm_image = bm_image
|
||||
|
||||
self.init_timeout = init_timeout
|
||||
|
||||
self.always_delete_uefi_entry = always_delete_uefi_entry
|
||||
self.psci_enable = psci_enable
|
||||
|
||||
self.resource_dir = os.path.join(os.path.dirname(__file__), 'resources')
|
||||
self.board_dir = os.path.join(self.root_mount, 'SITE1', 'HBI0249A')
|
||||
self.board_file = 'board.txt'
|
||||
self.board_file_bak = 'board.bak'
|
||||
self.images_file = 'images.txt'
|
||||
|
||||
self.host_working_directory = host_working_directory or settings.meta_directory
|
||||
|
||||
if not a7_governor_tunables:
|
||||
self.a7_governor_tunables = DEFAULT_A7_GOVERNOR_TUNABLES
|
||||
else:
|
||||
self.a7_governor_tunables = merge_dicts(DEFAULT_A7_GOVERNOR_TUNABLES, a7_governor_tunables)
|
||||
|
||||
if not a15_governor_tunables:
|
||||
self.a15_governor_tunables = DEFAULT_A15_GOVERNOR_TUNABLES
|
||||
else:
|
||||
self.a15_governor_tunables = merge_dicts(DEFAULT_A15_GOVERNOR_TUNABLES, a15_governor_tunables)
|
||||
|
||||
self.adb_name = adb_name
|
||||
|
||||
@property
|
||||
def src_images_template_file(self):
|
||||
return os.path.join(self.resource_dir, MODES[self.mode]['images_file'])
|
||||
|
||||
@property
|
||||
def src_images_file(self):
|
||||
return os.path.join(self.host_working_directory, 'images.txt')
|
||||
|
||||
@property
|
||||
def src_board_template_file(self):
|
||||
return os.path.join(self.resource_dir, 'board_template.txt')
|
||||
|
||||
@property
|
||||
def src_board_file(self):
|
||||
return os.path.join(self.host_working_directory, 'board.txt')
|
||||
|
||||
@property
|
||||
def kernel_arguments(self):
|
||||
kernel_args = ' console=ttyAMA0,38400 androidboot.console=ttyAMA0 selinux=0'
|
||||
if self.fs_medium == 'usb':
|
||||
kernel_args += ' androidboot.hardware=arm-versatileexpress-usb'
|
||||
if 'iks' in self.mode:
|
||||
kernel_args += ' no_bL_switcher=0'
|
||||
return kernel_args
|
||||
|
||||
@property
|
||||
def kernel(self):
|
||||
return MODES[self.mode]['kernel']
|
||||
|
||||
@property
|
||||
def initrd(self):
|
||||
return MODES[self.mode]['initrd']
|
||||
|
||||
@property
|
||||
def dtb(self):
|
||||
return MODES[self.mode]['dtb']
|
||||
|
||||
@property
|
||||
def SCC_0x700(self):
|
||||
return MODES[self.mode]['SCC_0x700']
|
||||
|
||||
@property
|
||||
def SCC_0x010(self):
|
||||
return BOOT_FIRMWARE[self.boot_firmware]['SCC_0x010']
|
||||
|
||||
@property
|
||||
def reboot_attempts(self):
|
||||
return BOOT_FIRMWARE[self.boot_firmware]['reboot_attempts']
|
||||
|
||||
def validate(self):
|
||||
valid_modes = MODES.keys()
|
||||
if self.mode not in valid_modes:
|
||||
message = 'Invalid mode: {}; must be in {}'.format(
|
||||
self.mode, valid_modes)
|
||||
raise ConfigError(message)
|
||||
|
||||
valid_boot_firmware = BOOT_FIRMWARE.keys()
|
||||
if self.boot_firmware not in valid_boot_firmware:
|
||||
message = 'Invalid boot_firmware: {}; must be in {}'.format(
|
||||
self.boot_firmware,
|
||||
valid_boot_firmware)
|
||||
raise ConfigError(message)
|
||||
|
||||
if self.fs_medium not in ['usb', 'sdcard']:
|
||||
message = 'Invalid filesystem medium: {} allowed values : usb, sdcard '.format(self.fs_medium)
|
||||
raise ConfigError(message)
|
||||
|
||||
|
||||
class TC2Device(BigLittleDevice):
|
||||
|
||||
name = 'TC2'
|
||||
description = """
|
||||
TC2 is a development board, which has three A7 cores and two A15 cores.
|
||||
|
||||
TC2 has a number of boot parameters which are:
|
||||
|
||||
:root_mount: Defaults to '/media/VEMSD'
|
||||
:boot_firmware: It has only two boot firmware options, which are
|
||||
uefi and bootmon. Defaults to 'uefi'.
|
||||
:fs_medium: Defaults to 'usb'.
|
||||
:device_working_directory: The direcitory that WA will be using to copy
|
||||
files to. Defaults to 'data/local/usecase'
|
||||
:serial_device: The serial device which TC2 is connected to. Defaults to
|
||||
'/dev/ttyS0'.
|
||||
:serial_baud: Defaults to 38400.
|
||||
:serial_max_timeout: Serial timeout value in seconds. Defaults to 600.
|
||||
:serial_log: Defaults to standard output.
|
||||
:init_timeout: The timeout in seconds to init the device. Defaults set
|
||||
to 30.
|
||||
:always_delete_uefi_entry: If true, it will delete the ufi entry.
|
||||
Defaults to True.
|
||||
:psci_enable: Enabling the psci. Defaults to True.
|
||||
:host_working_directory: The host working directory. Defaults to None.
|
||||
:disable_boot_configuration: Disables boot configuration through images.txt and board.txt. When
|
||||
this is ``True``, those two files will not be overwritten in VEMSD.
|
||||
This option may be necessary if the firmware version in the ``TC2``
|
||||
is not compatible with the templates in WA. Please note that enabling
|
||||
this will prevent you form being able to set ``boot_firmware`` and
|
||||
``mode`` parameters. Defaults to ``False``.
|
||||
|
||||
TC2 can also have a number of different booting mode, which are:
|
||||
|
||||
:mp_a7_only: Only the A7 cluster.
|
||||
:mp_a7_bootcluster: Both A7 and A15 clusters, but it boots on A7
|
||||
cluster.
|
||||
:mp_a15_only: Only the A15 cluster.
|
||||
:mp_a15_bootcluster: Both A7 and A15 clusters, but it boots on A15
|
||||
clusters.
|
||||
:iks_cpu: Only A7 cluster with only 2 cpus.
|
||||
:iks_a15: Only A15 cluster.
|
||||
:iks_a7: Same as iks_cpu
|
||||
:iks_ns_a15: Both A7 and A15 clusters.
|
||||
:iks_ns_a7: Both A7 and A15 clusters.
|
||||
|
||||
The difference between mp and iks is the scheduling policy.
|
||||
|
||||
TC2 takes the following runtime parameters
|
||||
|
||||
:a7_cores: Number of active A7 cores.
|
||||
:a15_cores: Number of active A15 cores.
|
||||
:a7_governor: CPUFreq governor for the A7 cluster.
|
||||
:a15_governor: CPUFreq governor for the A15 cluster.
|
||||
:a7_min_frequency: Minimum CPU frequency for the A7 cluster.
|
||||
:a15_min_frequency: Minimum CPU frequency for the A15 cluster.
|
||||
:a7_max_frequency: Maximum CPU frequency for the A7 cluster.
|
||||
:a15_max_frequency: Maximum CPU frequency for the A7 cluster.
|
||||
:irq_affinity: lambda x: Which cluster will receive IRQs.
|
||||
:cpuidle: Whether idle states should be enabled.
|
||||
:sysfile_values: A dict mapping a complete file path to the value that
|
||||
should be echo'd into it. By default, the file will be
|
||||
subsequently read to verify that the value was written
|
||||
into it with DeviceError raised otherwise. For write-only
|
||||
files, this check can be disabled by appending a ``!`` to
|
||||
the end of the file path.
|
||||
|
||||
"""
|
||||
|
||||
has_gpu = False
|
||||
a15_only_modes = A15_ONLY_MODES
|
||||
a7_only_modes = A7_ONLY_MODES
|
||||
not_configurable_modes = ['iks_a7', 'iks_cpu', 'iks_a15']
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', mandatory=False, override=True,
|
||||
description='This parameter will be ignored for TC2'),
|
||||
Parameter('core_clusters', mandatory=False, override=True,
|
||||
description='This parameter will be ignored for TC2'),
|
||||
]
|
||||
|
||||
runtime_parameters = [
|
||||
RuntimeParameter('irq_affinity', lambda d, x: d.set_irq_affinity(x.lower()), lambda: None),
|
||||
RuntimeParameter('cpuidle', lambda d, x: d.enable_idle_states() if boolean(x) else d.disable_idle_states(),
|
||||
lambda d: d.get_cpuidle())
|
||||
]
|
||||
|
||||
def get_mode(self):
|
||||
return self.config.mode
|
||||
|
||||
def set_mode(self, mode):
|
||||
if self._has_booted:
|
||||
raise DeviceError('Attempting to set boot mode when already booted.')
|
||||
valid_modes = MODES.keys()
|
||||
if mode is None:
|
||||
mode = self.config.default_mode
|
||||
if mode not in valid_modes:
|
||||
message = 'Invalid mode: {}; must be in {}'.format(mode, valid_modes)
|
||||
raise ConfigError(message)
|
||||
self.config.mode = mode
|
||||
|
||||
mode = property(get_mode, set_mode)
|
||||
|
||||
def _get_core_names(self):
|
||||
return MODES[self.mode]['cpus']
|
||||
|
||||
def _set_core_names(self, value):
|
||||
pass
|
||||
|
||||
core_names = property(_get_core_names, _set_core_names)
|
||||
|
||||
def _get_core_clusters(self):
|
||||
seen = set([])
|
||||
core_clusters = []
|
||||
cluster_id = -1
|
||||
for core in MODES[self.mode]['cpus']:
|
||||
if core not in seen:
|
||||
seen.add(core)
|
||||
cluster_id += 1
|
||||
core_clusters.append(cluster_id)
|
||||
return core_clusters
|
||||
|
||||
def _set_core_clusters(self, value):
|
||||
pass
|
||||
|
||||
core_clusters = property(_get_core_clusters, _set_core_clusters)
|
||||
|
||||
@property
|
||||
def cpu_cores(self):
|
||||
return MODES[self.mode]['cpus']
|
||||
|
||||
@property
|
||||
def max_a7_cores(self):
|
||||
return Counter(MODES[self.mode]['cpus'])['a7']
|
||||
|
||||
@property
|
||||
def max_a15_cores(self):
|
||||
return Counter(MODES[self.mode]['cpus'])['a15']
|
||||
|
||||
@property
|
||||
def a7_governor_tunables(self):
|
||||
return self.config.a7_governor_tunables
|
||||
|
||||
@property
|
||||
def a15_governor_tunables(self):
|
||||
return self.config.a15_governor_tunables
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(TC2Device, self).__init__()
|
||||
self.config = _TC2DeviceConfig(**kwargs)
|
||||
self.working_directory = self.config.working_directory
|
||||
self._serial = None
|
||||
self._has_booted = None
|
||||
|
||||
def boot(self, **kwargs): # NOQA
|
||||
mode = kwargs.get('os_mode', None)
|
||||
self._is_ready = False
|
||||
self._has_booted = False
|
||||
|
||||
self.mode = mode
|
||||
self.logger.debug('Booting in {} mode'.format(self.mode))
|
||||
|
||||
with open_serial_connection(timeout=self.config.serial_max_timeout,
|
||||
port=self.config.serial_device,
|
||||
baudrate=self.config.serial_baud) as target:
|
||||
if self.config.boot_firmware == 'bootmon':
|
||||
self._boot_using_bootmon(target)
|
||||
elif self.config.boot_firmware == 'uefi':
|
||||
self._boot_using_uefi(target)
|
||||
else:
|
||||
message = 'Unexpected boot firmware: {}'.format(self.config.boot_firmware)
|
||||
raise ConfigError(message)
|
||||
|
||||
try:
|
||||
target.sendline('')
|
||||
self.logger.debug('Waiting for the Android prompt.')
|
||||
target.expect(self.android_prompt, timeout=40) # pylint: disable=E1101
|
||||
except pexpect.TIMEOUT:
|
||||
# Try a second time before giving up.
|
||||
self.logger.debug('Did not get Android prompt, retrying...')
|
||||
target.sendline('')
|
||||
target.expect(self.android_prompt, timeout=10) # pylint: disable=E1101
|
||||
|
||||
self.logger.debug('Waiting for OS to initialize...')
|
||||
started_waiting_time = time.time()
|
||||
time.sleep(20) # we know it's not going to to take less time than this.
|
||||
boot_completed, got_ip_address = False, False
|
||||
while True:
|
||||
try:
|
||||
if not boot_completed:
|
||||
target.sendline('getprop sys.boot_completed')
|
||||
boot_completed = target.expect(['0.*', '1.*'], timeout=10)
|
||||
if not got_ip_address:
|
||||
target.sendline('getprop dhcp.eth0.ipaddress')
|
||||
# regexes are processed in order, so ip regex has to
|
||||
# come first (as we only want to match new line if we
|
||||
# don't match the IP). We do a "not" make the logic
|
||||
# consistent with boot_completed.
|
||||
got_ip_address = not target.expect(['[1-9]\d*.\d+.\d+.\d+', '\n'], timeout=10)
|
||||
except pexpect.TIMEOUT:
|
||||
pass # We have our own timeout -- see below.
|
||||
if boot_completed and got_ip_address:
|
||||
break
|
||||
time.sleep(5)
|
||||
if (time.time() - started_waiting_time) > self.config.init_timeout:
|
||||
raise DeviceError('Timed out waiting for the device to initialize.')
|
||||
|
||||
self._has_booted = True
|
||||
|
||||
def connect(self):
|
||||
if not self._is_ready:
|
||||
if self.config.adb_name:
|
||||
self.adb_name = self.config.adb_name # pylint: disable=attribute-defined-outside-init
|
||||
else:
|
||||
with open_serial_connection(timeout=self.config.serial_max_timeout,
|
||||
port=self.config.serial_device,
|
||||
baudrate=self.config.serial_baud) as target:
|
||||
# Get IP address and push the Gator and PMU logger.
|
||||
target.sendline('su') # as of Android v5.0.2, Linux does not boot into root shell
|
||||
target.sendline('netcfg')
|
||||
ipaddr_re = re.compile('eth0 +UP +(.+)/.+', re.MULTILINE)
|
||||
target.expect(ipaddr_re)
|
||||
output = target.after
|
||||
match = re.search('eth0 +UP +(.+)/.+', output)
|
||||
if not match:
|
||||
raise DeviceError('Could not get adb IP address.')
|
||||
ipaddr = match.group(1)
|
||||
|
||||
# Connect to device using adb.
|
||||
target.expect(self.android_prompt) # pylint: disable=E1101
|
||||
self.adb_name = ipaddr + ":5555" # pylint: disable=W0201
|
||||
|
||||
if self.adb_name in adb_list_devices():
|
||||
adb_disconnect(self.adb_name)
|
||||
adb_connect(self.adb_name)
|
||||
self._is_ready = True
|
||||
self.execute("input keyevent 82", timeout=ADB_SHELL_TIMEOUT)
|
||||
self.execute("svc power stayon true", timeout=ADB_SHELL_TIMEOUT)
|
||||
|
||||
def disconnect(self):
|
||||
adb_disconnect(self.adb_name)
|
||||
self._is_ready = False
|
||||
|
||||
# TC2-specific methods. You should avoid calling these in
|
||||
# Workloads/Instruments as that would tie them to TC2 (and if that is
|
||||
# the case, then you should set the supported_devices parameter in the
|
||||
# Workload/Instrument accordingly). Most of these can be replace with a
|
||||
# call to set_runtime_parameters.
|
||||
|
||||
def get_cpuidle(self):
|
||||
return self.get_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable')
|
||||
|
||||
def enable_idle_states(self):
|
||||
"""
|
||||
Fully enables idle states on TC2.
|
||||
See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
|
||||
and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
|
||||
|
||||
"""
|
||||
# Enable C1 (cluster shutdown).
|
||||
self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 0, verify=False)
|
||||
# Enable C0 on A15 cluster.
|
||||
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0, verify=False)
|
||||
# Enable C0 on A7 cluster.
|
||||
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 1, verify=False)
|
||||
|
||||
def disable_idle_states(self):
|
||||
"""
|
||||
Disable idle states on TC2.
|
||||
See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
|
||||
and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
|
||||
|
||||
"""
|
||||
# Disable C1 (cluster shutdown).
|
||||
self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 1, verify=False)
|
||||
# Disable C0.
|
||||
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0xFF, verify=False)
|
||||
|
||||
def set_irq_affinity(self, cluster):
|
||||
"""
|
||||
Set's IRQ affinity to the specified cluster.
|
||||
|
||||
This method will only work if the device mode is mp_a7_bootcluster or
|
||||
mp_a15_bootcluster. This operation does not make sense if there is only one
|
||||
cluster active (all IRQs will obviously go to that), and it will not work for
|
||||
IKS kernel because clusters are not exposed to sysfs.
|
||||
|
||||
:param cluster: must be either 'a15' or 'a7'.
|
||||
|
||||
"""
|
||||
if self.config.mode not in ('mp_a7_bootcluster', 'mp_a15_bootcluster'):
|
||||
raise ConfigError('Cannot set IRQ affinity with mode {}'.format(self.config.mode))
|
||||
if cluster == 'a7':
|
||||
self.execute('/sbin/set_irq_affinity.sh 0xc07', check_exit_code=False)
|
||||
elif cluster == 'a15':
|
||||
self.execute('/sbin/set_irq_affinity.sh 0xc0f', check_exit_code=False)
|
||||
else:
|
||||
raise ConfigError('cluster must either "a15" or "a7"; got {}'.format(cluster))
|
||||
|
||||
def _boot_using_uefi(self, target):
|
||||
self.logger.debug('Booting using UEFI.')
|
||||
self._wait_for_vemsd_mount(target)
|
||||
self._setup_before_reboot()
|
||||
self._perform_uefi_reboot(target)
|
||||
|
||||
# Get to the UEFI menu.
|
||||
self.logger.debug('Waiting for UEFI default selection.')
|
||||
target.sendline('reboot')
|
||||
target.expect('The default boot selection will start in'.rstrip())
|
||||
time.sleep(1)
|
||||
target.sendline(''.rstrip())
|
||||
|
||||
# If delete every time is specified, try to delete entry.
|
||||
if self.config.always_delete_uefi_entry:
|
||||
self._delete_uefi_entry(target, entry='workload_automation_MP')
|
||||
self.config.always_delete_uefi_entry = False
|
||||
|
||||
# Specify argument to be passed specifying that psci is (or is not) enabled
|
||||
if self.config.psci_enable:
|
||||
psci_enable = ' psci=enable'
|
||||
else:
|
||||
psci_enable = ''
|
||||
|
||||
# Identify the workload automation entry.
|
||||
selection_pattern = r'\[([0-9]*)\] '
|
||||
|
||||
try:
|
||||
target.expect(re.compile(selection_pattern + 'workload_automation_MP'), timeout=5)
|
||||
wl_menu_item = target.match.group(1)
|
||||
except pexpect.TIMEOUT:
|
||||
self._create_uefi_entry(target, psci_enable, entry_name='workload_automation_MP')
|
||||
# At this point the board should be rebooted so we need to retry to boot
|
||||
self._boot_using_uefi(target)
|
||||
else: # Did not time out.
|
||||
try:
|
||||
#Identify the boot manager menu item
|
||||
target.expect(re.compile(selection_pattern + 'Boot Manager'))
|
||||
boot_manager_menu_item = target.match.group(1)
|
||||
|
||||
#Update FDT
|
||||
target.sendline(boot_manager_menu_item)
|
||||
target.expect(re.compile(selection_pattern + 'Update FDT path'), timeout=15)
|
||||
update_fdt_menu_item = target.match.group(1)
|
||||
target.sendline(update_fdt_menu_item)
|
||||
target.expect(re.compile(selection_pattern + 'NOR Flash .*'), timeout=15)
|
||||
bootmonfs_menu_item = target.match.group(1)
|
||||
target.sendline(bootmonfs_menu_item)
|
||||
target.expect('File path of the FDT blob:')
|
||||
target.sendline(self.config.dtb)
|
||||
|
||||
#Return to main manu and boot from wl automation
|
||||
target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
|
||||
return_to_main_menu_item = target.match.group(1)
|
||||
target.sendline(return_to_main_menu_item)
|
||||
target.sendline(wl_menu_item)
|
||||
except pexpect.TIMEOUT:
|
||||
raise DeviceError('Timed out')
|
||||
|
||||
def _setup_before_reboot(self):
|
||||
if not self.config.disable_boot_configuration:
|
||||
self.logger.debug('Performing pre-boot setup.')
|
||||
substitution = {
|
||||
'SCC_0x010': self.config.SCC_0x010,
|
||||
'SCC_0x700': self.config.SCC_0x700,
|
||||
}
|
||||
with open(self.config.src_board_template_file, 'r') as fh:
|
||||
template_board_txt = string.Template(fh.read())
|
||||
with open(self.config.src_board_file, 'w') as wfh:
|
||||
wfh.write(template_board_txt.substitute(substitution))
|
||||
|
||||
with open(self.config.src_images_template_file, 'r') as fh:
|
||||
template_images_txt = string.Template(fh.read())
|
||||
with open(self.config.src_images_file, 'w') as wfh:
|
||||
wfh.write(template_images_txt.substitute({'bm_image': self.config.bm_image}))
|
||||
|
||||
shutil.copyfile(self.config.src_board_file,
|
||||
os.path.join(self.config.board_dir, self.config.board_file))
|
||||
shutil.copyfile(self.config.src_images_file,
|
||||
os.path.join(self.config.board_dir, self.config.images_file))
|
||||
os.system('sync') # make sure everything is flushed to microSD
|
||||
else:
|
||||
self.logger.debug('Boot configuration disabled proceeding with existing board.txt and images.txt.')
|
||||
|
||||
def _delete_uefi_entry(self, target, entry): # pylint: disable=R0201
|
||||
"""
|
||||
this method deletes the entry specified as parameter
|
||||
as a precondition serial port input needs to be parsed AT MOST up to
|
||||
the point BEFORE recognizing this entry (both entry and boot manager has
|
||||
not yet been parsed)
|
||||
|
||||
"""
|
||||
try:
|
||||
selection_pattern = r'\[([0-9]+)\] *'
|
||||
|
||||
try:
|
||||
target.expect(re.compile(selection_pattern + entry), timeout=5)
|
||||
wl_menu_item = target.match.group(1)
|
||||
except pexpect.TIMEOUT:
|
||||
return # Entry does not exist, nothing to delete here...
|
||||
|
||||
# Identify and select boot manager menu item
|
||||
target.expect(selection_pattern + 'Boot Manager', timeout=15)
|
||||
bootmanager_item = target.match.group(1)
|
||||
target.sendline(bootmanager_item)
|
||||
|
||||
# Identify and select 'Remove entry'
|
||||
target.expect(selection_pattern + 'Remove Boot Device Entry', timeout=15)
|
||||
new_entry_item = target.match.group(1)
|
||||
target.sendline(new_entry_item)
|
||||
|
||||
# Delete entry
|
||||
target.expect(re.compile(selection_pattern + entry), timeout=5)
|
||||
wl_menu_item = target.match.group(1)
|
||||
target.sendline(wl_menu_item)
|
||||
|
||||
# Return to main manu
|
||||
target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
|
||||
return_to_main_menu_item = target.match.group(1)
|
||||
target.sendline(return_to_main_menu_item)
|
||||
except pexpect.TIMEOUT:
|
||||
raise DeviceError('Timed out while deleting UEFI entry.')
|
||||
|
||||
def _create_uefi_entry(self, target, psci_enable, entry_name):
|
||||
"""
|
||||
Creates the default boot entry that is expected when booting in uefi mode.
|
||||
|
||||
"""
|
||||
self._wait_for_vemsd_mount(target)
|
||||
try:
|
||||
selection_pattern = '\[([0-9]+)\] *'
|
||||
|
||||
# Identify and select boot manager menu item.
|
||||
target.expect(selection_pattern + 'Boot Manager', timeout=15)
|
||||
bootmanager_item = target.match.group(1)
|
||||
target.sendline(bootmanager_item)
|
||||
|
||||
# Identify and select 'add new entry'.
|
||||
target.expect(selection_pattern + 'Add Boot Device Entry', timeout=15)
|
||||
new_entry_item = target.match.group(1)
|
||||
target.sendline(new_entry_item)
|
||||
|
||||
# Identify and select BootMonFs.
|
||||
target.expect(selection_pattern + 'NOR Flash .*', timeout=15)
|
||||
BootMonFs_item = target.match.group(1)
|
||||
target.sendline(BootMonFs_item)
|
||||
|
||||
# Specify the parameters of the new entry.
|
||||
target.expect('.+the kernel', timeout=5)
|
||||
target.sendline(self.config.kernel) # kernel path
|
||||
target.expect('Has FDT support\?.*\[y\/n\].*', timeout=5)
|
||||
time.sleep(0.5)
|
||||
target.sendline('y') # Has Fdt support? -> y
|
||||
target.expect('Add an initrd.*\[y\/n\].*', timeout=5)
|
||||
time.sleep(0.5)
|
||||
target.sendline('y') # add an initrd? -> y
|
||||
target.expect('.+the initrd.*', timeout=5)
|
||||
time.sleep(0.5)
|
||||
target.sendline(self.config.initrd) # initrd path
|
||||
target.expect('.+to the binary.*', timeout=5)
|
||||
time.sleep(0.5)
|
||||
_slow_sendline(target, self.config.kernel_arguments + psci_enable) # arguments to pass to binary
|
||||
time.sleep(0.5)
|
||||
target.expect('.+new Entry.+', timeout=5)
|
||||
_slow_sendline(target, entry_name) # Entry name
|
||||
target.expect('Choice.+', timeout=15)
|
||||
time.sleep(2)
|
||||
except pexpect.TIMEOUT:
|
||||
raise DeviceError('Timed out while creating UEFI entry.')
|
||||
self._perform_uefi_reboot(target)
|
||||
|
||||
def _perform_uefi_reboot(self, target):
|
||||
self._wait_for_vemsd_mount(target)
|
||||
open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
|
||||
|
||||
def _wait_for_vemsd_mount(self, target, timeout=100):
|
||||
attempts = 1 + self.config.reboot_attempts
|
||||
if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
|
||||
return
|
||||
|
||||
self.logger.debug('Waiting for VEMSD to mount...')
|
||||
for i in xrange(attempts):
|
||||
if i: # Do not reboot on the first attempt.
|
||||
target.sendline('reboot')
|
||||
target.sendline('usb_on')
|
||||
for _ in xrange(timeout):
|
||||
time.sleep(1)
|
||||
if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
|
||||
return
|
||||
|
||||
raise DeviceError('Timed out waiting for VEMSD to mount.')
|
||||
|
||||
def _boot_using_bootmon(self, target):
|
||||
"""
|
||||
This method Boots TC2 using the bootmon interface.
|
||||
"""
|
||||
self.logger.debug('Booting using bootmon.')
|
||||
|
||||
try:
|
||||
self._wait_for_vemsd_mount(target, timeout=20)
|
||||
except DeviceError:
|
||||
# OK, something's wrong. Reboot the board and try again.
|
||||
self.logger.debug('VEMSD not mounted, attempting to power cycle device.')
|
||||
target.sendline(' ')
|
||||
state = target.expect(['Cmd> ', self.config.bootmon_prompt, self.android_prompt]) # pylint: disable=E1101
|
||||
|
||||
if state == 0 or state == 1:
|
||||
# Reboot - Bootmon
|
||||
target.sendline('reboot')
|
||||
target.expect('Powering up system...')
|
||||
elif state == 2:
|
||||
target.sendline('reboot -n')
|
||||
target.expect('Powering up system...')
|
||||
else:
|
||||
raise DeviceError('Unexpected board state {}; should be 0, 1 or 2'.format(state))
|
||||
|
||||
self._wait_for_vemsd_mount(target)
|
||||
|
||||
self._setup_before_reboot()
|
||||
|
||||
# Reboot - Bootmon
|
||||
self.logger.debug('Rebooting into bootloader...')
|
||||
open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
|
||||
target.expect('Powering up system...')
|
||||
target.expect(self.config.bootmon_prompt)
|
||||
|
||||
# Wait for VEMSD to mount
|
||||
self._wait_for_vemsd_mount(target)
|
||||
|
||||
#Boot Linux - Bootmon
|
||||
target.sendline('fl linux fdt ' + self.config.dtb)
|
||||
target.expect(self.config.bootmon_prompt)
|
||||
target.sendline('fl linux initrd ' + self.config.initrd)
|
||||
target.expect(self.config.bootmon_prompt)
|
||||
#Workaround TC2 bootmon serial issue for loading large initrd blob
|
||||
target.sendline(' ')
|
||||
target.expect(self.config.bootmon_prompt)
|
||||
target.sendline('fl linux boot ' + self.config.kernel + self.config.kernel_arguments)
|
||||
|
||||
|
||||
# Utility functions.
|
||||
|
||||
def _slow_sendline(target, line):
|
||||
for c in line:
|
||||
target.send(c)
|
||||
time.sleep(0.1)
|
||||
target.sendline('')
|
@@ -1,96 +0,0 @@
|
||||
BOARD: HBI0249
|
||||
TITLE: V2P-CA15_A7 Configuration File
|
||||
|
||||
[DCCS]
|
||||
TOTALDCCS: 1 ;Total Number of DCCS
|
||||
M0FILE: dbb_v110.ebf ;DCC0 Filename
|
||||
M0MODE: MICRO ;DCC0 Programming Mode
|
||||
|
||||
[FPGAS]
|
||||
TOTALFPGAS: 0 ;Total Number of FPGAs
|
||||
|
||||
[TAPS]
|
||||
TOTALTAPS: 3 ;Total Number of TAPs
|
||||
T0NAME: STM32TMC ;TAP0 Device Name
|
||||
T0FILE: NONE ;TAP0 Filename
|
||||
T0MODE: NONE ;TAP0 Programming Mode
|
||||
T1NAME: STM32CM3 ;TAP1 Device Name
|
||||
T1FILE: NONE ;TAP1 Filename
|
||||
T1MODE: NONE ;TAP1 Programming Mode
|
||||
T2NAME: CORTEXA15 ;TAP2 Device Name
|
||||
T2FILE: NONE ;TAP2 Filename
|
||||
T2MODE: NONE ;TAP2 Programming Mode
|
||||
|
||||
[OSCCLKS]
|
||||
TOTALOSCCLKS: 9 ;Total Number of OSCCLKS
|
||||
OSC0: 50.0 ;CPUREFCLK0 A15 CPU (20:1 - 1.0GHz)
|
||||
OSC1: 50.0 ;CPUREFCLK1 A15 CPU (20:1 - 1.0GHz)
|
||||
OSC2: 40.0 ;CPUREFCLK0 A7 CPU (20:1 - 800MHz)
|
||||
OSC3: 40.0 ;CPUREFCLK1 A7 CPU (20:1 - 800MHz)
|
||||
OSC4: 40.0 ;HSBM AXI (40MHz)
|
||||
OSC5: 23.75 ;HDLCD (23.75MHz - TC PLL is in bypass)
|
||||
OSC6: 50.0 ;SMB (50MHz)
|
||||
OSC7: 50.0 ;SYSREFCLK (20:1 - 1.0GHz, ACLK - 500MHz)
|
||||
OSC8: 50.0 ;DDR2 (8:1 - 400MHz)
|
||||
|
||||
[SCC REGISTERS]
|
||||
TOTALSCCS: 33 ;Total Number of SCC registers
|
||||
|
||||
;SCC: 0x010 0x000003D0 ;Remap to NOR0
|
||||
SCC: 0x010 $SCC_0x010 ;Switch between NOR0/NOR1
|
||||
SCC: 0x01C 0xFF00FF00 ;CFGRW3 - SMC CS6/7 N/U
|
||||
SCC: 0x118 0x01CD1011 ;CFGRW17 - HDLCD PLL external bypass
|
||||
;SCC: 0x700 0x00320003 ;CFGRW48 - [25:24]Boot CPU [28]Boot Cluster (default CA7_0)
|
||||
SCC: 0x700 $SCC_0x700 ;CFGRW48 - [25:24]Boot CPU [28]Boot Cluster (default CA7_0)
|
||||
; Bootmon configuration:
|
||||
; [15]: A7 Event stream generation (default: disabled)
|
||||
; [14]: A15 Event stream generation (default: disabled)
|
||||
; [13]: Power down the non-boot cluster (default: disabled)
|
||||
; [12]: Use per-cpu mailboxes for power management (default: disabled)
|
||||
; [11]: A15 executes WFEs as nops (default: disabled)
|
||||
|
||||
SCC: 0x400 0x33330c00 ;CFGREG41 - A15 configuration register 0 (Default 0x33330c80)
|
||||
; [29:28] SPNIDEN
|
||||
; [25:24] SPIDEN
|
||||
; [21:20] NIDEN
|
||||
; [17:16] DBGEN
|
||||
; [13:12] CFGTE
|
||||
; [9:8] VINITHI_CORE
|
||||
; [7] IMINLN
|
||||
; [3:0] CLUSTER_ID
|
||||
|
||||
;Set the CPU clock PLLs
|
||||
SCC: 0x120 0x022F1010 ;CFGRW19 - CA15_0 PLL control - 20:1 (lock OFF)
|
||||
SCC: 0x124 0x0011710D ;CFGRW20 - CA15_0 PLL value
|
||||
SCC: 0x128 0x022F1010 ;CFGRW21 - CA15_1 PLL control - 20:1 (lock OFF)
|
||||
SCC: 0x12C 0x0011710D ;CFGRW22 - CA15_1 PLL value
|
||||
SCC: 0x130 0x022F1010 ;CFGRW23 - CA7_0 PLL control - 20:1 (lock OFF)
|
||||
SCC: 0x134 0x0011710D ;CFGRW24 - CA7_0 PLL value
|
||||
SCC: 0x138 0x022F1010 ;CFGRW25 - CA7_1 PLL control - 20:1 (lock OFF)
|
||||
SCC: 0x13C 0x0011710D ;CFGRW26 - CA7_1 PLL value
|
||||
|
||||
;Power management interface
|
||||
SCC: 0xC00 0x00000005 ;Control: [0]PMI_EN [1]DBG_EN [2]SPC_SYSCFG
|
||||
SCC: 0xC04 0x060E0356 ;Latency in uS max: [15:0]DVFS [31:16]PWRUP
|
||||
SCC: 0xC08 0x00000000 ;Reserved
|
||||
SCC: 0xC0C 0x00000000 ;Reserved
|
||||
|
||||
;CA15 performance values: 0xVVVFFFFF
|
||||
SCC: 0xC10 0x384061A8 ;CA15 PERFVAL0, 900mV, 20,000*20= 500MHz
|
||||
SCC: 0xC14 0x38407530 ;CA15 PERFVAL1, 900mV, 25,000*20= 600MHz
|
||||
SCC: 0xC18 0x384088B8 ;CA15 PERFVAL2, 900mV, 30,000*20= 700MHz
|
||||
SCC: 0xC1C 0x38409C40 ;CA15 PERFVAL3, 900mV, 35,000*20= 800MHz
|
||||
SCC: 0xC20 0x3840AFC8 ;CA15 PERFVAL4, 900mV, 40,000*20= 900MHz
|
||||
SCC: 0xC24 0x3840C350 ;CA15 PERFVAL5, 900mV, 45,000*20=1000MHz
|
||||
SCC: 0xC28 0x3CF0D6D8 ;CA15 PERFVAL6, 975mV, 50,000*20=1100MHz
|
||||
SCC: 0xC2C 0x41A0EA60 ;CA15 PERFVAL7, 1050mV, 55,000*20=1200MHz
|
||||
|
||||
;CA7 performance values: 0xVVVFFFFF
|
||||
SCC: 0xC30 0x3840445C ;CA7 PERFVAL0, 900mV, 10,000*20= 350MHz
|
||||
SCC: 0xC34 0x38404E20 ;CA7 PERFVAL1, 900mV, 15,000*20= 400MHz
|
||||
SCC: 0xC38 0x384061A8 ;CA7 PERFVAL2, 900mV, 20,000*20= 500MHz
|
||||
SCC: 0xC3C 0x38407530 ;CA7 PERFVAL3, 900mV, 25,000*20= 600MHz
|
||||
SCC: 0xC40 0x384088B8 ;CA7 PERFVAL4, 900mV, 30,000*20= 700MHz
|
||||
SCC: 0xC44 0x38409C40 ;CA7 PERFVAL5, 900mV, 35,000*20= 800MHz
|
||||
SCC: 0xC48 0x3CF0AFC8 ;CA7 PERFVAL6, 975mV, 40,000*20= 900MHz
|
||||
SCC: 0xC4C 0x41A0C350 ;CA7 PERFVAL7, 1050mV, 45,000*20=1000MHz
|
@@ -1,25 +0,0 @@
|
||||
TITLE: Versatile Express Images Configuration File
|
||||
|
||||
[IMAGES]
|
||||
TOTALIMAGES: 4 ;Number of Images (Max : 32)
|
||||
NOR0UPDATE: AUTO ;Image Update:NONE/AUTO/FORCE
|
||||
NOR0ADDRESS: BOOT ;Image Flash Address
|
||||
NOR0FILE: \SOFTWARE\$bm_image ;Image File Name
|
||||
|
||||
NOR1UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR1ADDRESS: 0x00000000 ;Image Flash Address
|
||||
NOR1FILE: \SOFTWARE\kern_iks.bin ;Image File Name
|
||||
NOR1LOAD: 0x80008000
|
||||
NOR1ENTRY: 0x80008000
|
||||
|
||||
NOR2UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR2ADDRESS: 0x00000000 ;Image Flash Address
|
||||
NOR2FILE: \SOFTWARE\iks.dtb ;Image File Name for booting in A7 cluster
|
||||
NOR2LOAD: 0x84000000
|
||||
NOR2ENTRY: 0x84000000
|
||||
|
||||
NOR3UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR3ADDRESS: 0x00000000 ;Image Flash Address
|
||||
NOR3FILE: \SOFTWARE\init_iks.bin ;Image File Name
|
||||
NOR3LOAD: 0x90100000
|
||||
NOR3ENTRY: 0x90100000
|
@@ -1,55 +0,0 @@
|
||||
TITLE: Versatile Express Images Configuration File
|
||||
[IMAGES]
|
||||
TOTALIMAGES: 9 ;Number of Images (Max: 32)
|
||||
NOR0UPDATE: AUTO ;Image Update:NONE/AUTO/FORCE
|
||||
NOR0ADDRESS: BOOT ;Image Flash Address
|
||||
NOR0FILE: \SOFTWARE\$bm_image ;Image File Name
|
||||
|
||||
NOR1UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR1ADDRESS: 0x0E000000 ;Image Flash Address
|
||||
NOR1FILE: \SOFTWARE\kern_mp.bin ;Image File Name
|
||||
NOR1LOAD: 0x80008000
|
||||
NOR1ENTRY: 0x80008000
|
||||
|
||||
NOR2UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR2ADDRESS: 0x0E800000 ;Image Flash Address
|
||||
NOR2FILE: \SOFTWARE\mp_a7.dtb ;Image File Name for booting in A7 cluster
|
||||
NOR2LOAD: 0x84000000
|
||||
NOR2ENTRY: 0x84000000
|
||||
|
||||
NOR3UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR3ADDRESS: 0x0E900000 ;Image Flash Address
|
||||
NOR3FILE: \SOFTWARE\mp_a15.dtb ;Image File Name
|
||||
NOR3LOAD: 0x84000000
|
||||
NOR3ENTRY: 0x84000000
|
||||
|
||||
NOR4UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR4ADDRESS: 0x0EA00000 ;Image Flash Address
|
||||
NOR4FILE: \SOFTWARE\mp_a7bc.dtb ;Image File Name
|
||||
NOR4LOAD: 0x84000000
|
||||
NOR4ENTRY: 0x84000000
|
||||
|
||||
NOR5UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR5ADDRESS: 0x0EB00000 ;Image Flash Address
|
||||
NOR5FILE: \SOFTWARE\mp_a15bc.dtb ;Image File Name
|
||||
NOR5LOAD: 0x84000000
|
||||
NOR5ENTRY: 0x84000000
|
||||
|
||||
NOR6UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR6ADDRESS: 0x0EC00000 ;Image Flash Address
|
||||
NOR6FILE: \SOFTWARE\init_mp.bin ;Image File Name
|
||||
NOR6LOAD: 0x85000000
|
||||
NOR6ENTRY: 0x85000000
|
||||
|
||||
NOR7UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR7ADDRESS: 0x0C000000 ;Image Flash Address
|
||||
NOR7FILE: \SOFTWARE\tc2_sec.bin ;Image File Name
|
||||
NOR7LOAD: 0
|
||||
NOR7ENTRY: 0
|
||||
|
||||
NOR8UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR8ADDRESS: 0x0D000000 ;Image Flash Address
|
||||
NOR8FILE: \SOFTWARE\tc2_uefi.bin ;Image File Name
|
||||
NOR8LOAD: 0
|
||||
NOR8ENTRY: 0
|
||||
|
@@ -1,35 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import LinuxDevice, Parameter
|
||||
|
||||
|
||||
class Xe503c12Chormebook(LinuxDevice):
|
||||
|
||||
name = "XE503C12"
|
||||
description = 'A developer-unlocked Samsung XE503C12 running sshd.'
|
||||
platform = 'chromeos'
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=['a15', 'a15', 'a15', 'a15'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0], override=True),
|
||||
Parameter('username', default='chronos', override=True),
|
||||
Parameter('password', default='', override=True),
|
||||
Parameter('password_prompt', default='Password:', override=True),
|
||||
Parameter('binaries_directory', default='/home/chronos/bin', override=True),
|
||||
]
|
||||
|
||||
abi = 'armeabi'
|
@@ -1,14 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
@@ -1,99 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import re
|
||||
|
||||
from wlauto import LinuxDevice, Parameter
|
||||
from wlauto.exceptions import DeviceError
|
||||
from wlauto.core.device import RuntimeParameter
|
||||
from wlauto.utils.misc import convert_new_lines
|
||||
from wlauto.utils.types import boolean
|
||||
|
||||
|
||||
class ChromeOsDevice(LinuxDevice):
|
||||
|
||||
name = "chromeos_test_image"
|
||||
description = """
|
||||
Chrome OS test image device. Use this if you are working on a Chrome OS device with a test
|
||||
image. An off the shelf device will not work with this device interface.
|
||||
|
||||
More information on how to build a Chrome OS test image can be found here:
|
||||
|
||||
https://www.chromium.org/chromium-os/developer-guide#TOC-Build-a-disk-image-for-your-board
|
||||
|
||||
"""
|
||||
|
||||
platform = 'chromeos'
|
||||
abi = 'armeabi'
|
||||
has_gpu = True
|
||||
default_timeout = 100
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=[], override=True),
|
||||
Parameter('core_clusters', default=[], override=True),
|
||||
Parameter('username', default='root', override=True),
|
||||
Parameter('password_prompt', default='Password:', override=True),
|
||||
Parameter('binaries_directory', default='/usr/local/bin', override=True),
|
||||
Parameter('working_directory', default='/home/root/wa-working', override=True),
|
||||
]
|
||||
|
||||
runtime_parameters = [
|
||||
RuntimeParameter('ui', 'get_ui_status', 'set_ui_status', value_name='status'),
|
||||
]
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(ChromeOsDevice, self).__init__(**kwargs)
|
||||
self.ui_status = None
|
||||
|
||||
def validate(self):
|
||||
# pylint: disable=access-member-before-definition,attribute-defined-outside-init
|
||||
if self.password is None and not self.keyfile:
|
||||
self.password = 'test0000'
|
||||
|
||||
def initialize(self, context, *args, **kwargs):
|
||||
if self.busybox == 'busybox':
|
||||
self.logger.debug('Busybox already installed on the device: replacing with wa version')
|
||||
self.uninstall('busybox')
|
||||
self.busybox = self.deploy_busybox(context)
|
||||
|
||||
def get_ui_status(self):
|
||||
return self.ui_status
|
||||
|
||||
def set_ui_status(self, status):
|
||||
self.ui_status = boolean(status)
|
||||
if self.ui_status is None:
|
||||
pass
|
||||
elif self.ui_status:
|
||||
try:
|
||||
self.execute('start ui')
|
||||
except DeviceError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
self.execute('stop ui')
|
||||
except DeviceError:
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
if self.ui_status is None:
|
||||
pass
|
||||
elif not self.ui_status:
|
||||
try:
|
||||
self.execute('start ui')
|
||||
except DeviceError:
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
self.ui_status = None
|
@@ -1,120 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Original implementation by Rene de Jong. Updated by Sascha Bischoff.
|
||||
|
||||
import logging
|
||||
|
||||
from wlauto import LinuxDevice, Parameter
|
||||
from wlauto.common.gem5.device import BaseGem5Device
|
||||
from wlauto.utils import types
|
||||
|
||||
|
||||
class Gem5LinuxDevice(BaseGem5Device, LinuxDevice):
|
||||
"""
|
||||
Implements gem5 Linux device.
|
||||
|
||||
This class allows a user to connect WA to a simulation using gem5. The
|
||||
connection to the device is made using the telnet connection of the
|
||||
simulator, and is used for all commands. The simulator does not have ADB
|
||||
support, and therefore we need to fall back to using standard shell
|
||||
commands.
|
||||
|
||||
Files are copied into the simulation using a VirtIO 9P device in gem5. Files
|
||||
are copied out of the simulated environment using the m5 writefile command
|
||||
within the simulated system.
|
||||
|
||||
When starting the workload run, the simulator is automatically started by
|
||||
Workload Automation, and a connection to the simulator is established. WA
|
||||
will then wait for Android to boot on the simulated system (which can take
|
||||
hours), prior to executing any other commands on the device. It is also
|
||||
possible to resume from a checkpoint when starting the simulation. To do
|
||||
this, please append the relevant checkpoint commands from the gem5
|
||||
simulation script to the gem5_discription argument in the agenda.
|
||||
|
||||
Host system requirements:
|
||||
* VirtIO support. We rely on diod on the host system. This can be
|
||||
installed on ubuntu using the following command:
|
||||
|
||||
sudo apt-get install diod
|
||||
|
||||
Guest requirements:
|
||||
* VirtIO support. We rely on VirtIO to move files into the simulation.
|
||||
Please make sure that the following are set in the kernel
|
||||
configuration:
|
||||
|
||||
CONFIG_NET_9P=y
|
||||
|
||||
CONFIG_NET_9P_VIRTIO=y
|
||||
|
||||
CONFIG_9P_FS=y
|
||||
|
||||
CONFIG_9P_FS_POSIX_ACL=y
|
||||
|
||||
CONFIG_9P_FS_SECURITY=y
|
||||
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
|
||||
* m5 binary. Please make sure that the m5 binary is on the device and
|
||||
can by found in the path.
|
||||
"""
|
||||
|
||||
name = 'gem5_linux'
|
||||
platform = 'linux'
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=[], override=True),
|
||||
Parameter('core_clusters', default=[], override=True),
|
||||
Parameter('host', default='localhost', override=True,
|
||||
description='Host name or IP address for the device.'),
|
||||
Parameter('login_prompt', kind=types.list_of_strs,
|
||||
default=['login:', 'AEL login:', 'username:'],
|
||||
mandatory=False),
|
||||
Parameter('login_password_prompt', kind=types.list_of_strs,
|
||||
default=['password:'], mandatory=False),
|
||||
]
|
||||
|
||||
# Overwritten from Device. For documentation, see corresponding method in
|
||||
# Device.
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.logger = logging.getLogger('Gem5LinuxDevice')
|
||||
LinuxDevice.__init__(self, **kwargs)
|
||||
BaseGem5Device.__init__(self)
|
||||
|
||||
def login_to_device(self):
|
||||
# Wait for the login prompt
|
||||
prompt = self.login_prompt + [self.sckt.UNIQUE_PROMPT]
|
||||
i = self.sckt.expect(prompt, timeout=10)
|
||||
# Check if we are already at a prompt, or if we need to log in.
|
||||
if i < len(prompt) - 1:
|
||||
self.sckt.sendline("{}".format(self.username))
|
||||
password_prompt = self.login_password_prompt + [r'# ', self.sckt.UNIQUE_PROMPT]
|
||||
j = self.sckt.expect(password_prompt, timeout=self.delay)
|
||||
if j < len(password_prompt) - 2:
|
||||
self.sckt.sendline("{}".format(self.password))
|
||||
self.sckt.expect([r'# ', self.sckt.UNIQUE_PROMPT], timeout=self.delay)
|
||||
|
||||
def capture_screen(self, filepath):
|
||||
if BaseGem5Device.capture_screen(self, filepath):
|
||||
return
|
||||
|
||||
# If we didn't manage to do the above, call the parent class.
|
||||
self.logger.warning("capture_screen: falling back to parent class implementation")
|
||||
LinuxDevice.capture_screen(self, filepath)
|
||||
|
||||
def initialize(self, context):
|
||||
self.resize_shell()
|
||||
self.deploy_m5(context, force=False)
|
@@ -1,37 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import LinuxDevice, Parameter
|
||||
|
||||
|
||||
class GenericDevice(LinuxDevice):
|
||||
name = 'generic_linux'
|
||||
description = """
|
||||
A generic Linux device interface. Use this if you do not have an interface
|
||||
for your device.
|
||||
|
||||
This should allow basic WA functionality on most Linux devices with SSH access
|
||||
configured. Some additional configuration may be required for some WA extensions
|
||||
(e.g. configuring ``core_names`` and ``core_clusters``).
|
||||
|
||||
"""
|
||||
|
||||
has_gpu = True
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=[], override=True),
|
||||
Parameter('core_clusters', default=[], override=True),
|
||||
]
|
@@ -1,34 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import LinuxDevice, Parameter
|
||||
|
||||
|
||||
class OdroidXU3LinuxDevice(LinuxDevice):
|
||||
|
||||
name = "odroidxu3_linux"
|
||||
description = 'HardKernel Odroid XU3 development board (Ubuntu image).'
|
||||
|
||||
core_modules = [
|
||||
'odroidxu3-fan',
|
||||
]
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True),
|
||||
]
|
||||
|
||||
abi = 'armeabi'
|
@@ -141,3 +141,20 @@ class WorkerThreadError(WAError):
|
||||
message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread)
|
||||
message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
|
||||
super(WorkerThreadError, self).__init__(message)
|
||||
|
||||
|
||||
class SerializerSyntaxError(Exception):
|
||||
"""
|
||||
Error loading a serialized structure from/to a file handle.
|
||||
"""
|
||||
|
||||
def __init__(self, message, line=None, column=None):
|
||||
super(SerializerSyntaxError, self).__init__(message)
|
||||
self.line = line
|
||||
self.column = column
|
||||
|
||||
def __str__(self):
|
||||
linestring = ' on line {}'.format(self.line) if self.line else ''
|
||||
colstring = ' in column {}'.format(self.column) if self.column else ''
|
||||
message = 'Syntax Error{}: {}'
|
||||
return message.format(''.join([linestring, colstring]), self.message)
|
||||
|
@@ -55,3 +55,4 @@ logObserver.start()
|
||||
|
||||
def start_logging(level, fmt='%(asctime)s %(levelname)-8s: %(message)s'):
|
||||
logging.basicConfig(level=getattr(logging, level), format=fmt)
|
||||
|
||||
|
7
wlauto/external/revent/Makefile
vendored
7
wlauto/external/revent/Makefile
vendored
@@ -1,12 +1,7 @@
|
||||
# CROSS_COMPILE=aarch64-linux-gnu- make
|
||||
#
|
||||
CC=gcc
|
||||
|
||||
ifdef DEBUG
|
||||
CFLAGS=-static -lc -g
|
||||
else
|
||||
CFLAGS=-static -lc -O2
|
||||
endif
|
||||
CFLAGS=-static -lc
|
||||
|
||||
revent: revent.c
|
||||
$(CROSS_COMPILE)$(CC) $(CFLAGS) revent.c -o revent
|
||||
|
1911
wlauto/external/revent/revent.c
vendored
1911
wlauto/external/revent/revent.c
vendored
File diff suppressed because it is too large
Load Diff
11
wlauto/external/sqlite/README
vendored
11
wlauto/external/sqlite/README
vendored
@@ -1,11 +0,0 @@
|
||||
For WA we use a slightly modified version of sqlite3 so that it can
|
||||
be built statically. We used the amalgamated sqlite3 version 3.12.2.
|
||||
which is under the public domain.
|
||||
|
||||
https://www.sqlite.org/download.html
|
||||
|
||||
Build command:
|
||||
gcc shell.c sqlite3.c -lpthread -ldl -static -O2 -fPIC -DPIC -DSQLITE_OMIT_LOAD_EXTENSION
|
||||
|
||||
You will need to apply the diff in static.patch
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user