commit a747ec7e4c2ea8a25bfc675f80042eb6600c7050 Author: Sergei Trofimov Date: Tue Mar 10 13:09:31 2015 +0000 Initial commit of open source Workload Automation. diff --git a/.gitignore b/.gitignore new file mode 100755 index 00000000..0f568f55 --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +*.egg-info +*.pyc +*.bak +*.o +*.cmd +Module.symvers +modules.order +*~ +tags +build/ +dist/ +.ropeproject/ +wa_output/ +doc/source/api/ +doc/source/extensions/ +MANIFEST +wlauto/external/uiautomator/bin/ +wlauto/external/uiautomator/*.properties +wlauto/external/uiautomator/build.xml +*.orig +local.properties +wlauto/external/revent/libs/ +wlauto/external/revent/obj/ +wlauto/external/bbench_server/libs/ +wlauto/external/bbench_server/obj/ +pmu_logger.mod.c +.tmp_versions +obj/ +libs/armeabi +wlauto/workloads/*/uiauto/bin/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..9790e788 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include scripts * +recursive-include doc * diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..a48285fc --- /dev/null +++ b/README.rst @@ -0,0 +1,73 @@ +Workload Automation ++++++++++++++++++++ + +Workload Automation (WA) is a framework for executing workloads and collecting +measurements on Android and Linux devices. WA includes automation for nearly 50 +workloads (mostly Android), some common instrumentation (ftrace, ARM +Streamline, hwmon). A number of output formats are supported. + +Workload Automation is designed primarily as a developer tool/framework to +facilitate data driven development by providing a method of collecting +measurements from a device in a repeatable way. + +Workload Automation is highly extensible. Most of the concrete functionality is +implemented via plug-ins, and it is easy to write new plug-ins to support new +device types, workloads, instrumentation or output processing. + + +Requirements +============ + +- Python 2.7 +- Linux (should work on other Unixes, but untested) +- Latest Android SDK (ANDROID_HOME must be set) for Android devices, or +- SSH for Linux devices + + +Installation +============ + +To install:: + + python setup.py sdist + sudo pip install dist/wlauto-*.tar.gz + +Please refer to the `installation section <./doc/source/installation.rst>`_ +in the documentation for more details. + + +Basic Usage +=========== + +Please see the `Quickstart <./doc/source/quickstart.rst>`_ section of the +documentation. + + +Documentation +============= + +Documentation in reStructuredText format may be found under ``doc/source``. To +compile it into cross-linked HTML, make sure you have `Sphinx +`_ installed, and then :: + + cd doc + make html + + +License +======= + +Workload Automation is distributed under `Apache v2.0 License +`_. Workload automation includes +binaries distributed under differnt licenses (see LICENSE files in specfic +directories). + + +Feedback, Contrubutions and Support +=================================== + +- Please use the GitHub Issue Tracker associated with this repository for + feedback. +- ARM licensees may contact ARM directly via their partner managers. +- We welcome code contributions via GitHub Pull requests. Please see + "Contributing Code" section of the documentation for details. diff --git a/dev_scripts/README b/dev_scripts/README new file mode 100644 index 00000000..6ea0e95c --- /dev/null +++ b/dev_scripts/README @@ -0,0 +1,23 @@ +This directory contains scripts that aid the development of Workload Automation. +They were written to work as part of WA development environment and are not +guarnteed to work if moved outside their current location. They should not be +distributed as part of WA releases. + +Scripts +------- + +:clean_install: Performs a clean install of WA from source. This will remove any + existing WA install (regardless of whether it was made from + source or through a tarball with pip). + +:clear_env: Clears ~/.workload_automation. + +:get_apk_versions: Prints out a table of APKs and their versons found under the + path specified as the argument. + +:pep8: Runs pep8 code checker (must be installed) over wlauto with the correct + settings for WA. + +:pylint: Runs pylint (must be installed) over wlauto with the correct settings + for WA. + diff --git a/dev_scripts/clean_install b/dev_scripts/clean_install new file mode 100755 index 00000000..7d647e5a --- /dev/null +++ b/dev_scripts/clean_install @@ -0,0 +1,34 @@ +#!/usr/bin/env python +import os +import sys +import shutil +import logging + + +logging.basicConfig(level=logging.INFO) + + +def get_installed_path(): + paths = [p for p in sys.path if len(p) > 2] + for path in paths: + candidate = os.path.join(path, 'wlauto') + if os.path.isdir(candidate): + return candidate + + +if __name__ == '__main__': + installed_path = get_installed_path() + if installed_path: + logging.info('Removing installed package from {}.'.format(installed_path)) + shutil.rmtree(installed_path) + if os.path.isdir('build'): + logging.info('Removing local build directory.') + shutil.rmtree('build') + logging.info('Removing *.pyc files.') + for root, dirs, files in os.walk('wlauto'): + for file in files: + if file.lower().endswith('.pyc'): + os.remove(os.path.join(root, file)) + + os.system('python setup.py install') + diff --git a/dev_scripts/clear_env b/dev_scripts/clear_env new file mode 100755 index 00000000..ff720d75 --- /dev/null +++ b/dev_scripts/clear_env @@ -0,0 +1,3 @@ +#!/bin/bash +# Clear workload automation user environment. +rm -rf ~/.workload_automation/ diff --git a/dev_scripts/get_apk_versions b/dev_scripts/get_apk_versions new file mode 100755 index 00000000..39b08772 --- /dev/null +++ b/dev_scripts/get_apk_versions @@ -0,0 +1,25 @@ +#!/usr/bin/env python +import os +import sys +import argparse + +sys.path.append(os.path.join(os.path.dirname(__file__), '..')) + +from wlauto.exceptions import WAError +from wlauto.utils.misc import write_table +from distmanagement.apk import get_aapt_path, get_apk_versions + + +if __name__ == '__main__': + try: + aapt = get_aapt_path() + parser = argparse.ArgumentParser() + parser.add_argument('path', metavar='PATH', help='Location to look for APKs.') + args = parser.parse_args() + + versions = get_apk_versions(args.path, aapt) + write_table([v.to_tuple() for v in versions], sys.stdout, + align='<<<>>', headers=['path', 'package', 'name', 'version code', 'version name']) + except WAError, e: + logging.error(e) + sys.exit(1) diff --git a/dev_scripts/pep8 b/dev_scripts/pep8 new file mode 100755 index 00000000..25507f38 --- /dev/null +++ b/dev_scripts/pep8 @@ -0,0 +1,22 @@ +#!/bin/bash + +EXCLUDE=wlauto/external/,wlauto/tests +EXCLUDE_COMMA=wlauto/core/bootstrap.py,wlauto/workloads/geekbench/__init__.py +IGNORE=E501,E265,E266,W391 + +if ! hash pep8 2>/dev/null; then + echo "pep8 not found in PATH" + echo "you can install it with \"sudo pip install pep8\"" + exit 1 +fi + +if [[ "$1" == "" ]]; then + THIS_DIR="`dirname \"$0\"`" + pushd $THIS_DIR/.. > /dev/null + pep8 --exclude=$EXCLUDE,$EXCLUDE_COMMA --ignore=$IGNORE wlauto + pep8 --exclude=$EXCLUDE --ignore=$IGNORE,E241 $(echo "$EXCLUDE_COMMA" | sed 's/,/ /g') + popd > /dev/null +else + pep8 --exclude=$EXCLUDE,$EXCLUDE_COMMA --ignore=$IGNORE $1 +fi + diff --git a/dev_scripts/pylint b/dev_scripts/pylint new file mode 100755 index 00000000..487d3c95 --- /dev/null +++ b/dev_scripts/pylint @@ -0,0 +1,47 @@ +#!/bin/bash + +target=$1 + +compare_versions() { + if [[ $1 == $2 ]]; then + return 0 + fi + + local IFS=. + local i ver1=($1) ver2=($2) + + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]]; then + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})); then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})); then + return 2 + fi + done + + return 0 +} + +pylint_version=$(python -c 'from pylint.__pkginfo__ import version; print version') +compare_versions $pylint_version "1.3.0" +result=$? +if [ "$result" == "2" ]; then + echo "ERROR: pylint version must be at least 1.3.0; found $pylint_version" + exit 1 +fi + +THIS_DIR="`dirname \"$0\"`" +if [[ "$target" == "" ]]; then + pushd $THIS_DIR/.. > /dev/null + pylint --rcfile extras/pylintrc wlauto + popd > /dev/null +else + pylint --rcfile $THIS_DIR/../extras/pylintrc $target +fi diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 00000000..64237c70 --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,184 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = build + +SPHINXAPI = sphinx-apidoc +SPHINXAPIOPTS = + +WAEXT = ./build_extension_docs.py +WAEXTOPTS = source/extensions ../wlauto ../wlauto/external ../wlauto/tests + + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source +ALLSPHINXAPIOPTS = -f $(SPHINXAPIOPTS) -o source/api ../wlauto +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " coverage to run documentation coverage checks" + +clean: + rm -rf $(BUILDDIR)/* + rm -rf source/api/* + rm -rf source/extensions/* + rm -rf source/instrumentation_method_map.rst + +coverage: + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo + @echo "Build finished. The coverage reports are in $(BUILDDIR)/coverage." + +api: ../wlauto + rm -rf source/api/* + $(SPHINXAPI) $(ALLSPHINXAPIOPTS) + +waext: ../wlauto + rm -rf source/extensions + mkdir -p source/extensions + $(WAEXT) $(WAEXTOPTS) + + +sigtab: ../wlauto/core/instrumentation.py source/instrumentation_method_map.template + rm -rf source/instrumentation_method_map.rst + ./build_instrumentation_method_map.py source/instrumentation_method_map.rst + +html: api waext sigtab + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: api waext sigtab + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: api waext sigtab + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: api waext sigtab + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: api waext sigtab + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: api waext sigtab + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: api waext sigtab + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/WorkloadAutomation2.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/WorkloadAutomation2.qhc" + +devhelp: api + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/WorkloadAutomation2" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/WorkloadAutomation2" + @echo "# devhelp" + +epub: api waext sigtab + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: api waext sigtab + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: api waext sigtab + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: api waext sigtab + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: api waext sigtab + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: api waext sigtab + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: api waext sigtab + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: api waext sigtab + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: api waext sigtab + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: api waext sigtab + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: api waext sigtab + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/build_extension_docs.py b/doc/build_extension_docs.py new file mode 100755 index 00000000..bee61cdd --- /dev/null +++ b/doc/build_extension_docs.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import os +import sys + +from wlauto import ExtensionLoader +from wlauto.utils.doc import get_rst_from_extension, underline +from wlauto.utils.misc import capitalize + + +GENERATE_FOR = ['workload', 'instrument', 'result_processor', 'device'] + + +def generate_extension_documentation(source_dir, outdir, ignore_paths): + loader = ExtensionLoader(keep_going=True) + loader.clear() + loader.update(paths=[source_dir], ignore_paths=ignore_paths) + for ext_type in loader.extension_kinds: + if not ext_type in GENERATE_FOR: + continue + outfile = os.path.join(outdir, '{}s.rst'.format(ext_type)) + with open(outfile, 'w') as wfh: + wfh.write('.. _{}s:\n\n'.format(ext_type)) + wfh.write(underline(capitalize('{}s'.format(ext_type)))) + exts = loader.list_extensions(ext_type) + for ext in sorted(exts, key=lambda x: x.name): + wfh.write(get_rst_from_extension(ext)) + + +if __name__ == '__main__': + generate_extension_documentation(sys.argv[2], sys.argv[1], sys.argv[3:]) diff --git a/doc/build_instrumentation_method_map.py b/doc/build_instrumentation_method_map.py new file mode 100755 index 00000000..a9438c92 --- /dev/null +++ b/doc/build_instrumentation_method_map.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import sys +import string +from copy import copy + +from wlauto.core.instrumentation import SIGNAL_MAP, PRIORITY_MAP +from wlauto.utils.doc import format_simple_table + + +CONVINIENCE_ALIASES = ['initialize', 'setup', 'start', 'stop', 'process_workload_result', + 'update_result', 'teardown', 'finalize'] + +OUTPUT_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), 'source', 'instrumentation_method_map.template') + + +def escape_trailing_underscore(value): + if value.endswith('_'): + return value[:-1] + '\_' + + +def generate_instrumentation_method_map(outfile): + signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.iteritems()], + headers=['method name', 'signal'], align='<<') + priority_table = format_simple_table([(escape_trailing_underscore(k), v) for k, v in PRIORITY_MAP.iteritems()], + headers=['prefix', 'priority'], align='<>') + with open(OUTPUT_TEMPLATE_FILE) as fh: + template = string.Template(fh.read()) + with open(outfile, 'w') as wfh: + wfh.write(template.substitute(signal_names=signal_table, priority_prefixes=priority_table)) + + +if __name__ == '__main__': + generate_instrumentation_method_map(sys.argv[1]) diff --git a/doc/source/_static/.gitignore b/doc/source/_static/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/doc/source/_templates/.gitignore b/doc/source/_templates/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/doc/source/additional_topics.rst b/doc/source/additional_topics.rst new file mode 100644 index 00000000..520b3170 --- /dev/null +++ b/doc/source/additional_topics.rst @@ -0,0 +1,101 @@ +Additional Topics ++++++++++++++++++ + +Modules +======= + +Modules are essentially plug-ins for Extensions. They provide a way of defining +common and reusable functionality. An Extension can load zero or more modules +during it's creation. Loaded modules will then add their capabilities (see +Capabilities_) to those of the Extension. When calling code tries to access an +attribute of an Extension the Extension doesn't have, it will try to find the +attribute among it's loaded modules and will return that instead. + +.. note:: Modules are themselves extensions, and can therefore load their own + modules. *Do not* abuse this. + +For example, calling code may wish to reboot an unresponsive device by calling +``device.hard_reset()``, but the ``Device`` in question does not have a +``hard_reset`` method; however the ``Device`` has loaded ``netio_switch`` +module which allows to disable power supply over a network (say this device +is in a rack and is powered through such a switch). The module has +``reset_power`` capability (see Capabilities_ below) and so implements +``hard_reset``. This will get invoked when ``device.hard_rest()`` is called. + +.. note:: Modules can only extend Extensions with new attributes; they cannot + override existing functionality. In the example above, if the + ``Device`` has implemented ``hard_reset()`` itself, then *that* will + get invoked irrespective of which modules it has loaded. + +If two loaded modules have the same capability or implement the same method, +then the last module to be loaded "wins" and its method will be invoke, +effectively overriding the module that was loaded previously. + +Specifying Modules +------------------ + +Modules get loaded when an Extension is instantiated by the extension loader. +There are two ways to specify which modules should be loaded for a device. + + +Capabilities +============ + +Capabilities define the functionality that is implemented by an Extension, +either within the Extension itself or through loadable modules. A capability is +just a label, but there is an implied contract. When an Extension claims to have +a particular capability, it promises to expose a particular set of +functionality through a predefined interface. + +Currently used capabilities are described below. + +.. note:: Since capabilities are basically random strings, the user can always + define their own; and it is then up to the user to define, enforce and + document the contract associated with their capability. Below, are the + "standard" capabilities used in WA. + + +.. note:: The method signatures in the descriptions below show the calling + signature (i.e. they're omitting the initial self parameter). + +active_cooling +-------------- + +Intended to be used by devices and device modules, this capability implies +that the device implements a controllable active cooling solution (e.g. +a programmable fan). The device/module must implement the following methods: + +start_active_cooling() + Active cooling is started (e.g. the fan is turned on) + +stop_active_cooling() + Active cooling is stopped (e.g. the fan is turned off) + + +reset_power +----------- + +Intended to be used by devices and device modules, this capability implies +that the device is capable of performing a hard reset by toggling power. The +device/module must implement the following method: + +hard_reset() + The device is restarted. This method cannot rely on the device being + responsive and must work even if the software on the device has crashed. + + +flash +----- + +Intended to be used by devices and device modules, this capability implies +that the device can be flashed with new images. The device/module must +implement the following method: + +flash(image_bundle=None, images=None) + ``image_bundle`` is a path to a "bundle" (e.g. a tarball) that contains + all the images to be flashed. Which images go where must also be defined + within the bundle. ``images`` is a dict mapping image destination (e.g. + partition name) to the path to that specific image. Both + ``image_bundle`` and ``images`` may be specified at the same time. If + there is overlap between the two, ``images`` wins and its contents will + be flashed in preference to the ``image_bundle``. diff --git a/doc/source/agenda.rst b/doc/source/agenda.rst new file mode 100644 index 00000000..5b5ac690 --- /dev/null +++ b/doc/source/agenda.rst @@ -0,0 +1,608 @@ +.. _agenda: + +====== +Agenda +====== + +An agenda specifies what is to be done during a Workload Automation run, +including which workloads will be run, with what configuration, which +instruments and result processors will be enabled, etc. Agenda syntax is +designed to be both succinct and expressive. + +Agendas are specified using YAML_ notation. It is recommended that you +familiarize yourself with the linked page. + +.. _YAML: http://en.wikipedia.org/wiki/YAML + +.. note:: Earlier versions of WA have supported CSV-style agendas. These were + there to facilitate transition from WA1 scripts. The format was more + awkward and supported only a limited subset of the features. Support + for it has now been removed. + + +Specifying which workloads to run +================================= + +The central purpose of an agenda is to specify what workloads to run. A +minimalist agenda contains a single entry at the top level called "workloads" +that maps onto a list of workload names to run: + +.. code-block:: yaml + + workloads: + - dhrystone + - memcpy + - cyclictest + +This specifies a WA run consisting of ``dhrystone`` followed by ``memcpy``, followed by +``cyclictest`` workloads, and using instruments and result processors specified in +config.py (see :ref:`configuration-specification` section). + +.. note:: If you're familiar with YAML, you will recognize the above as a single-key + associative array mapping onto a list. YAML has two notations for both + associative arrays and lists: block notation (seen above) and also + in-line notation. This means that the above agenda can also be + written in a single line as :: + + workloads: [dhrystone, memcpy, cyclictest] + + (with the list in-lined), or :: + + {workloads: [dhrystone, memcpy, cyclictest]} + + (with both the list and the associative array in-line). WA doesn't + care which of the notations is used as they all get parsed into the + same structure by the YAML parser. You can use whatever format you + find easier/clearer. + +Multiple iterations +------------------- + +There will normally be some variability in workload execution when running on a +real device. In order to quantify it, multiple iterations of the same workload +are usually performed. You can specify the number of iterations for each +workload by adding ``iterations`` field to the workload specifications (or +"specs"): + +.. code-block:: yaml + + workloads: + - name: dhrystone + iterations: 5 + - name: memcpy + iterations: 5 + - name: cyclictest + iterations: 5 + +Now that we're specifying both the workload name and the number of iterations in +each spec, we have to explicitly name each field of the spec. + +It is often the case that, as in in the example above, you will want to run all +workloads for the same number of iterations. Rather than having to specify it +for each and every spec, you can do with a single entry by adding a ``global`` +section to your agenda: + +.. code-block:: yaml + + global: + iterations: 5 + workloads: + - dhrystone + - memcpy + - cyclictest + +The global section can contain the same fields as a workload spec. The +fields in the global section will get added to each spec. If the same field is +defined both in global section and in a spec, then the value in the spec will +overwrite the global value. For example, suppose we wanted to run all our workloads +for five iterations, except cyclictest which we want to run for ten (e.g. +because we know it to be particularly unstable). This can be specified like +this: + +.. code-block:: yaml + + global: + iterations: 5 + workloads: + - dhrystone + - memcpy + - name: cyclictest + iterations: 10 + +Again, because we are now specifying two fields for cyclictest spec, we have to +explicitly name them. + +Configuring workloads +--------------------- + +Some workloads accept configuration parameters that modify their behavior. These +parameters are specific to a particular workload and can alter the workload in +any number of ways, e.g. set the duration for which to run, or specify a media +file to be used, etc. The vast majority of workload parameters will have some +default value, so it is only necessary to specify the name of the workload in +order for WA to run it. However, sometimes you want more control over how a +workload runs. + +For example, by default, dhrystone will execute 10 million loops across four +threads. Suppose you device has six cores available and you want the workload to +load them all. You also want to increase the total number of loops accordingly +to 15 million. You can specify this using dhrystone's parameters: + +.. code-block:: yaml + + global: + iterations: 5 + workloads: + - name: dhrystone + params: + threads: 6 + mloops: 15 + - memcpy + - name: cyclictest + iterations: 10 + +.. note:: You can find out what parameters a workload accepts by looking it up + in the :ref:`Workloads` section. You can also look it up using WA itself + with "show" command:: + + wa show dhrystone + + see the :ref:`Invocation` section for details. + +In addition to configuring the workload itself, we can also specify +configuration for the underlying device. This can be done by setting runtime +parameters in the workload spec. For example, suppose we want to ensure the +maximum score for our benchmarks, at the expense of power consumption, by +setting the cpufreq governor to "performance" on cpu0 (assuming all our cores +are in the same DVFS domain and so setting the governor for cpu0 will affect all +cores). This can be done like this: + +.. code-block:: yaml + + global: + iterations: 5 + workloads: + - name: dhrystone + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance + workload_params: + threads: 6 + mloops: 15 + - memcpy + - name: cyclictest + iterations: 10 + + +Here, we're specifying ``sysfile_values`` runtime parameter for the device. The +value for this parameter is a mapping (an associative array, in YAML) of file +paths onto values that should be written into those files. ``sysfile_values`` is +the only runtime parameter that is available for any (Linux) device. Other +runtime parameters will depend on the specifics of the device used (e.g. its +CPU cores configuration). I've renamed ``params`` to ``workload_params`` for +clarity, but that wasn't strictly necessary as ``params`` is interpreted as +``workload_params`` inside a workload spec. + +.. note:: ``params`` field is interpreted differently depending on whether it's in a + workload spec or the global section. In a workload spec, it translates to + ``workload_params``, in the global section it translates to ``runtime_params``. + +Runtime parameters do not automatically reset at the end of workload spec +execution, so all subsequent iterations will also be affected unless they +explicitly change the parameter (in the example above, performance governor will +also be used for ``memcpy`` and ``cyclictest``. There are two ways around this: +either set ``reboot_policy`` WA setting (see :ref:`configuration-specification` section) such that +the device gets rebooted between spec executions, thus being returned to its +initial state, or set the default runtime parameter values in the ``global`` +section of the agenda so that they get set for every spec that doesn't +explicitly override them. + +.. note:: "In addition to ``runtime_params`` there are also ``boot_params`` that + work in a similar way, but they get passed to the device when it + reboots. At the moment ``TC2`` is the only device that defines a boot + parameter, which is explained in ``TC2`` documentation, so boot + parameters will not be mentioned further. + +IDs and Labels +-------------- + +It is possible to list multiple specs with the same workload in an agenda. You +may wish to this if you want to run a workload with different parameter values +or under different runtime configurations of the device. The workload name +therefore does not uniquely identify a spec. To be able to distinguish between +different specs (e.g. in reported results), each spec has an ID which is unique +to all specs within an agenda (and therefore with a single WA run). If an ID +isn't explicitly specified using ``id`` field (note that the field name is in +lower case), one will be automatically assigned to the spec at the beginning of +the WA run based on the position of the spec within the list. The first spec +*without an explicit ID* will be assigned ID ``1``, the second spec *without an +explicit ID* will be assigned ID ``2``, and so forth. + +Numerical IDs aren't particularly easy to deal with, which is why it is +recommended that, for non-trivial agendas, you manually set the ids to something +more meaningful (or use labels -- see below). An ID can be pretty much anything +that will pass through the YAML parser. The only requirement is that it is +unique to the agenda. However, is usually better to keep them reasonably short +(they don't need to be *globally* unique), and to stick with alpha-numeric +characters and underscores/dashes. While WA can handle other characters as well, +getting too adventurous with your IDs may cause issues further down the line +when processing WA results (e.g. when uploading them to a database that may have +its own restrictions). + +In addition to IDs, you can also specify labels for your workload specs. These +are similar to IDs but do not have the uniqueness restriction. If specified, +labels will be used by some result processes instead of (or in addition to) the +workload name. For example, the ``csv`` result processor will put the label in the +"workload" column of the CSV file. + +It is up to you how you chose to use IDs and labels. WA itself doesn't expect +any particular format (apart from uniqueness for IDs). Below is the earlier +example updated to specify explicit IDs and label dhrystone spec to reflect +parameters used. + +.. code-block:: yaml + + global: + iterations: 5 + workloads: + - id: 01_dhry + name: dhrystone + label: dhrystone_15over6 + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance + workload_params: + threads: 6 + mloops: 15 + - id: 02_memc + name: memcpy + - id: 03_cycl + name: cyclictest + iterations: 10 + + +Result Processors and Instrumentation +===================================== + +Result Processors +----------------- + +Result processors, as the name suggests, handle the processing of results +generated form running workload specs. By default, WA enables a couple of basic +result processors (e.g. one generates a csv file with all scores reported by +workloads), which you can see in ``~/.workload_automation/config.py``. However, +WA has a number of other, more specialized, result processors (e.g. for +uploading to databases). You can list available result processors with +``wa list result_processors`` command. If you want to permanently enable a +result processor, you can add it to your ``config.py``. You can also enable a +result processor for a particular run by specifying it in the ``config`` section +in the agenda. As the name suggests, ``config`` section mirrors the structure of +``config.py``\ (although using YAML rather than Python), and anything that can +be specified in the latter, can also be specified in the former. + +As with workloads, result processors may have parameters that define their +behavior. Parameters of result processors are specified a little differently, +however. Result processor parameter values are listed in the config section, +namespaced under the name of the result processor. + +For example, suppose we want to be able to easily query the results generated by +the workload specs we've defined so far. We can use ``sqlite`` result processor +to have WA create an sqlite_ database file with the results. By default, this +file will be generated in WA's output directory (at the same level as +results.csv); but suppose we want to store the results in the same file for +every run of the agenda we do. This can be done by specifying an alternative +database file with ``database`` parameter of the result processor: + +.. code-block:: yaml + + config: + result_processors: [sqlite] + sqlite: + database: ~/my_wa_results.sqlite + global: + iterations: 5 + workloads: + - id: 01_dhry + name: dhrystone + label: dhrystone_15over6 + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance + workload_params: + threads: 6 + mloops: 15 + - id: 02_memc + name: memcpy + - id: 03_cycl + name: cyclictest + iterations: 10 + +A couple of things to observe here: + +- There is no need to repeat the result processors listed in ``config.py``. The + processors listed in ``result_processors`` entry in the agenda will be used + *in addition to* those defined in the ``config.py``. +- The database file is specified under "sqlite" entry in the config section. + Note, however, that this entry alone is not enough to enable the result + processor, it must be listed in ``result_processors``, otherwise the "sqilte" + config entry will be ignored. +- The database file must be specified as an absolute path, however it may use + the user home specifier '~' and/or environment variables. + +.. _sqlite: http://www.sqlite.org/ + + +Instrumentation +--------------- + +WA can enable various "instruments" to be used during workload execution. +Instruments can be quite diverse in their functionality, but the majority of +instruments available in WA today are there to collect additional data (such as +trace) from the device during workload execution. You can view the list of +available instruments by using ``wa list instruments`` command. As with result +processors, a few are enabled by default in the ``config.py`` and additional +ones may be added in the same place, or specified in the agenda using +``instrumentation`` entry. + +For example, we can collect core utilisation statistics (for what proportion of +workload execution N cores were utilized above a specified threshold) using +``coreutil`` instrument. + +.. code-block:: yaml + + config: + instrumentation: [coreutil] + coreutil: + threshold: 80 + result_processors: [sqlite] + sqlite: + database: ~/my_wa_results.sqlite + global: + iterations: 5 + workloads: + - id: 01_dhry + name: dhrystone + label: dhrystone_15over6 + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance + workload_params: + threads: 6 + mloops: 15 + - id: 02_memc + name: memcpy + - id: 03_cycl + name: cyclictest + iterations: 10 + +Instrumentation isn't "free" and it is advisable not to have too many +instruments enabled at once as that might skew results. For example, you don't +want to have power measurement enabled at the same time as event tracing, as the +latter may prevent cores from going into idle states and thus affecting the +reading collected by the former. + +Unlike result processors, instrumentation may be enabled (and disabled -- see below) +on per-spec basis. For example, suppose we want to collect /proc/meminfo from the +device when we run ``memcpy`` workload, but not for the other two. We can do that using +``sysfs_extractor`` instrument, and we will only enable it for ``memcpy``: + +.. code-block:: yaml + + config: + instrumentation: [coreutil] + coreutil: + threshold: 80 + sysfs_extractor: + paths: [/proc/meminfo] + result_processors: [sqlite] + sqlite: + database: ~/my_wa_results.sqlite + global: + iterations: 5 + workloads: + - id: 01_dhry + name: dhrystone + label: dhrystone_15over6 + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance + workload_params: + threads: 6 + mloops: 15 + - id: 02_memc + name: memcpy + instrumentation: [sysfs_extractor] + - id: 03_cycl + name: cyclictest + iterations: 10 + +As with ``config`` sections, ``instrumentation`` entry in the spec needs only to +list additional instruments and does not need to repeat instruments specified +elsewhere. + +.. note:: At present, it is only possible to enable/disable instrumentation on + per-spec base. It is *not* possible to provide configuration on + per-spec basis in the current version of WA (e.g. in our example, it + is not possible to specify different ``sysfs_extractor`` paths for + different workloads). This restriction may be lifted in future + versions of WA. + +Disabling result processors and instrumentation +----------------------------------------------- + +As seen above, extensions specified with ``instrumentation`` and +``result_processor`` clauses get added to those already specified previously. +Just because an instrument specified in ``config.py`` is not listed in the +``config`` section of the agenda, does not mean it will be disabled. If you do +want to disable an instrument, you can always remove/comment it out from +``config.py``. However that will be introducing a permanent configuration change +to your environment (one that can be easily reverted, but may be just as +easily forgotten). If you want to temporarily disable a result processor or an +instrument for a particular run, you can do that in your agenda by prepending a +tilde (``~``) to its name. + +For example, let's say we want to disable ``cpufreq`` instrument enabled in our +``config.py`` (suppose we're going to send results via email and so want to +reduce to total size of the output directory): + +.. code-block:: yaml + + config: + instrumentation: [coreutil, ~cpufreq] + coreutil: + threshold: 80 + sysfs_extractor: + paths: [/proc/meminfo] + result_processors: [sqlite] + sqlite: + database: ~/my_wa_results.sqlite + global: + iterations: 5 + workloads: + - id: 01_dhry + name: dhrystone + label: dhrystone_15over6 + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance + workload_params: + threads: 6 + mloops: 15 + - id: 02_memc + name: memcpy + instrumentation: [sysfs_extractor] + - id: 03_cycl + name: cyclictest + iterations: 10 + + +Sections +======== + +It is a common requirement to be able to run the same set of workloads under +different device configurations. E.g. you may want to investigate impact of +changing a particular setting to different values on the benchmark scores, or to +quantify the impact of enabling a particular feature in the kernel. WA allows +this by defining "sections" of configuration with an agenda. + +For example, suppose what we really want, is to measure the impact of using +interactive cpufreq governor vs the performance governor on the three +benchmarks. We could create another three workload spec entries similar to the +ones we already have and change the sysfile value being set to "interactive". +However, this introduces a lot of duplication; and what if we want to change +spec configuration? We would have to change it in multiple places, running the +risk of forgetting one. + +A better way is to keep the three workload specs and define a section for each +governor: + +.. code-block:: yaml + + config: + instrumentation: [coreutil, ~cpufreq] + coreutil: + threshold: 80 + sysfs_extractor: + paths: [/proc/meminfo] + result_processors: [sqlite] + sqlite: + database: ~/my_wa_results.sqlite + global: + iterations: 5 + sections: + - id: perf + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance + - id: inter + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive + workloads: + - id: 01_dhry + name: dhrystone + label: dhrystone_15over6 + workload_params: + threads: 6 + mloops: 15 + - id: 02_memc + name: memcpy + instrumentation: [sysfs_extractor] + - id: 03_cycl + name: cyclictest + iterations: 10 + +A section, just like an workload spec, needs to have a unique ID. Apart from +that, a "section" is similar to the ``global`` section we've already seen -- +everything that goes into a section will be applied to each workload spec. +Workload specs defined under top-level ``workloads`` entry will be executed for +each of the sections listed under ``sections``. + +.. note:: It is also possible to have a ``workloads`` entry within a section, + in which case, those workloads will only be executed for that specific + section. + +In order to maintain the uniqueness requirement of workload spec IDs, they will +be namespaced under each section by prepending the section ID to the spec ID +with an under score. So in the agenda above, we no longer have a workload spec +with ID ``01_dhry``, instead there are two specs with IDs ``perf_01_dhry`` and +``inter_01_dhry``. + +Note that the ``global`` section still applies to every spec in the agenda. So +the precedence order is -- spec settings override section settings, which in +turn override global settings. + + +Other Configuration +=================== + +.. _configuration_in_agenda: + +As mentioned previously, ``config`` section in an agenda can contain anything +that can be defined in ``config.py`` (with Python syntax translated to the +equivalent YAML). Certain configuration (e.g. ``run_name``) makes more sense +to define in an agenda than a config file. Refer to the +:ref:`configuration-specification` section for details. + +.. code-block:: yaml + + config: + project: governor_comparison + run_name: performance_vs_interactive + + device: generic_android + reboot_policy: never + + instrumentation: [coreutil, ~cpufreq] + coreutil: + threshold: 80 + sysfs_extractor: + paths: [/proc/meminfo] + result_processors: [sqlite] + sqlite: + database: ~/my_wa_results.sqlite + global: + iterations: 5 + sections: + - id: perf + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance + - id: inter + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive + workloads: + - id: 01_dhry + name: dhrystone + label: dhrystone_15over6 + workload_params: + threads: 6 + mloops: 15 + - id: 02_memc + name: memcpy + instrumentation: [sysfs_extractor] + - id: 03_cycl + name: cyclictest + iterations: 10 + diff --git a/doc/source/changes.rst b/doc/source/changes.rst new file mode 100644 index 00000000..9d1dd58d --- /dev/null +++ b/doc/source/changes.rst @@ -0,0 +1,7 @@ +What's New in Workload Automation +================================= + +Version 2.3.0 +------------- + +- First publicly-released version. diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 00000000..56c30053 --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,270 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Workload Automation 2 documentation build configuration file, created by +# sphinx-quickstart on Mon Jul 15 09:00:46 2013. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os +import warnings + +warnings.filterwarnings('ignore', "Module louie was already imported") + +this_dir = os.path.dirname(__file__) +sys.path.insert(0, os.path.join(this_dir, '../..')) +import wlauto + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Workload Automation' +copyright = u'2013, ARM Ltd' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = wlauto.__version__ +# The full version, including alpha/beta/rc tags. +release = wlauto.__version__ + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['**/*-example'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'WorkloadAutomationdoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'WorkloadAutomation.tex', u'Workload Automation Documentation', + u'WA Mailing List \\textless{}workload-automation@arm.com\\textgreater{},Sergei Trofimov \\textless{}sergei.trofimov@arm.com\\textgreater{}, Vasilis Flouris \\textless{}vasilis.flouris@arm.com\\textgreater{}, Mohammed Binsabbar \\textless{}mohammed.binsabbar@arm.com\\textgreater{}', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'workloadautomation', u'Workload Automation Documentation', + [u'WA Mailing List , Sergei Trofimov , Vasilis Flouris '], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'WorkloadAutomation', u'Workload Automation Documentation', + u'WA Mailing List , Sergei Trofimov , Vasilis Flouris ', 'WorkloadAutomation', 'A framwork for automationg workload execution on mobile devices.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + + +def setup(app): + app.add_object_type('confval', 'confval', + objname='configuration value', + indextemplate='pair: %s; configuration value') diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst new file mode 100644 index 00000000..8551c672 --- /dev/null +++ b/doc/source/configuration.rst @@ -0,0 +1,188 @@ +.. _configuration-specification: + +============= +Configuration +============= + +In addition to specifying run execution parameters through an agenda, the +behavior of WA can be modified through configuration file(s). The default +configuration file is ``~/.workload_automation/config.py`` (the location can be +changed by setting ``WA_USER_DIRECTORY`` environment variable, see :ref:`envvars` +section below). This file will be +created when you first run WA if it does not already exist. This file must +always exist and will always be loaded. You can add to or override the contents +of that file on invocation of Workload Automation by specifying an additional +configuration file using ``--config`` option. + +The config file is just a Python source file, so it can contain any valid Python +code (though execution of arbitrary code through the config file is +discouraged). Variables with specific names will be picked up by the framework +and used to modify the behavior of Workload automation. + +.. note:: As of version 2.1.3 is also possible to specify the following + configuration in the agenda. See :ref:`configuration in an agenda `\ . + + +.. _available_settings: + +Available Settings +================== + +.. note:: Extensions such as workloads, instrumentation or result processors + may also pick up certain settings from this file, so the list below is + not exhaustive. Please refer to the documentation for the specific + extensions to see what settings they accept. + +.. confval:: device + + This setting defines what specific Device subclass will be used to interact + the connected device. Obviously, this must match your setup. + +.. confval:: device_config + + This must be a Python dict containing setting-value mapping for the + configured :rst:dir:`device`. What settings and values are valid is specific + to each device. Please refer to the documentation for your device. + +.. confval:: reboot_policy + + This defines when during execution of a run the Device will be rebooted. The + possible values are: + + ``"never"`` + The device will never be rebooted. + ``"initial"`` + The device will be rebooted when the execution first starts, just before + executing the first workload spec. + ``"each_spec"`` + The device will be rebooted before running a new workload spec. + Note: this acts the same as each_iteration when execution order is set to by_iteration + ``"each_iteration"`` + The device will be rebooted before each new iteration. + + .. seealso:: + + :doc:`execution_model` + +.. confval:: execution_order + + Defines the order in which the agenda spec will be executed. At the moment, + the following execution orders are supported: + + ``"by_iteration"`` + The first iteration of each workload spec is executed one after the other, + so all workloads are executed before proceeding on to the second iteration. + E.g. A1 B1 C1 A2 C2 A3. This is the default if no order is explicitly specified. + + In case of multiple sections, this will spread them out, such that specs + from the same section are further part. E.g. given sections X and Y, global + specs A and B, and two iterations, this will run :: + + X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2 + + ``"by_section"`` + Same as ``"by_iteration"``, however this will group specs from the same + section together, so given sections X and Y, global specs A and B, and two iterations, + this will run :: + + X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2 + + ``"by_spec"`` + All iterations of the first spec are executed before moving on to the next + spec. E.g. A1 A2 A3 B1 C1 C2 This may also be specified as ``"classic"``, + as this was the way workloads were executed in earlier versions of WA. + + ``"random"`` + Execution order is entirely random. + + Added in version 2.1.5. + +.. confval:: instrumentation + + This should be a list of instruments to be enabled during run execution. + Values must be names of available instruments. Instruments are used to + collect additional data, such as energy measurements or execution time, + during runs. + + .. seealso:: + + :doc:`api/wlauto.instrumentation` + +.. confval:: result_processors + + This should be a list of result processors to be enabled during run execution. + Values must be names of available result processors. Result processor define + how data is output from WA. + + .. seealso:: + + :doc:`api/wlauto.result_processors` + +.. confval:: logging + + A dict that contains logging setting. At the moment only three settings are + supported: + + ``"file format"`` + Controls how logging output appears in the run.log file in the output + directory. + ``"verbose format"`` + Controls how logging output appear on the console when ``--verbose`` flag + was used. + ``"regular format"`` + Controls how logging output appear on the console when ``--verbose`` flag + was not used. + + All three values should be Python `old-style format strings`_ specifying which + `log record attributes`_ should be displayed. + +There are also a couple of settings are used to provide additional metadata +for a run. These may get picked up by instruments or result processors to +attach context to results. + +.. confval:: project + + A string naming the project for which data is being collected. This may be + useful, e.g. when uploading data to a shared database that is populated from + multiple projects. + +.. confval:: project_stage + + A dict or a string that allows adding additional identifier. This is may be + useful for long-running projects. + +.. confval:: run_name + + A string that labels the WA run that is bing performed. This would typically + be set in the ``config`` section of an agenda (see + :ref:`configuration in an agenda `) rather than in the config file. + +.. _old-style format strings: http://docs.python.org/2/library/stdtypes.html#string-formatting-operations +.. _log record attributes: http://docs.python.org/2/library/logging.html#logrecord-attributes + + +.. _envvars: + +Environment Variables +===================== + +In addition to standard configuration described above, WA behaviour can be +altered through environment variables. These can determine where WA looks for +various assets when it starts. + +.. confval:: WA_USER_DIRECTORY + + This is the location WA will look for config.py, inustrumentation , and it + will also be used for local caches, etc. If this variable is not set, the + default location is ``~/.workload_automation`` (this is created when WA + is installed). + + .. note:: This location **must** be writable by the user who runs WA. + + +.. confval:: WA_EXTENSION_PATHS + + By default, WA will look for extensions in its own package and in + subdirectories under ``WA_USER_DIRECTORY``. This environment variable can + be used specify a colon-separated list of additional locations WA should + use to look for extensions. diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst new file mode 100644 index 00000000..d0696ce7 --- /dev/null +++ b/doc/source/contributing.rst @@ -0,0 +1,45 @@ + +Contributing Code +================= + +We welcome code contributions via GitHub pull requests to the official WA +repository. To help with maintainability of the code line we ask that the code +uses a coding style consistent with the rest of WA code, which is basically +`PEP8 `_ with line length and block +comment rules relaxed (the wrapper for PEP8 checker inside ``dev_scripts`` will +run it with appropriate configuration). + +We ask that the following checks are performed on the modified code prior to +submitting a pull request: + +.. note:: You will need pylint and pep8 static checkers installed:: + + pip install pep8 + pip install pylint + + It is recommened that you install via pip rather than through your + distribution's package mananger because the latter is likely to + contain out-of-date version of these tools. + +- ``./dev_scripts/pylint`` should be run without arguments and should produce no + output (any output should be addressed by making appropriate changes in the + code or adding a pylint ignore directive, if there is a good reason for + keeping the code as is). +- ``./dev_scripts/pep8`` should be run without arguments and should produce no + output (any output should be addressed by making appropriate changes in the + code). +- If the modifications touch core framework (anything under ``wlauto/core``), unit + tests should be run using ``nosetests``, and they should all pass. + + - If significant additions have been made to the framework, unit + tests should be added to cover the new functionality. + +- If modifications have been made to documentation (this includes description + attributes for Parameters and Extensions), documentation should be built to + make sure no errors or warning during build process, and a visual inspection + of new/updated sections in resulting HTML should be performed to ensure + everything renders as expected. + +Once you have your contribution is ready, please follow instructions in `GitHub +documentation `_ to +create a pull request. diff --git a/doc/source/conventions.rst b/doc/source/conventions.rst new file mode 100644 index 00000000..c811f522 --- /dev/null +++ b/doc/source/conventions.rst @@ -0,0 +1,74 @@ +=========== +Conventions +=========== + +Interface Definitions +===================== + +Throughout this documentation a number of stubbed-out class definitions will be +presented showing an interface defined by a base class that needs to be +implemented by the deriving classes. The following conventions will be used when +presenting such an interface: + + - Methods shown raising :class:`NotImplementedError` are abstract and *must* + be overridden by subclasses. + - Methods with ``pass`` in their body *may* be (but do not need to be) overridden + by subclasses. If not overridden, these methods will default to the base + class implementation, which may or may not be a no-op (the ``pass`` in the + interface specification does not necessarily mean that the method does not have an + actual implementation in the base class). + + .. note:: If you *do* override these methods you must remember to call the + base class' version inside your implementation as well. + + - Attributes who's value is shown as ``None`` *must* be redefined by the + subclasses with an appropriate value. + - Attributes who's value is shown as something other than ``None`` (including + empty strings/lists/dicts) *may* be (but do not need to be) overridden by + subclasses. If not overridden, they will default to the value shown. + +Keep in mind that the above convention applies only when showing interface +definitions and may not apply elsewhere in the documentation. Also, in the +interest of clarity, only the relevant parts of the base class definitions will +be shown some members (such as internal methods) may be omitted. + + +Code Snippets +============= + +Code snippets provided are intended to be valid Python code, and to be complete. +However, for the sake of clarity, in some cases only the relevant parts will be +shown with some details omitted (details that may necessary to validity of the code +but not to understanding of the concept being illustrated). In such cases, a +commented ellipsis will be used to indicate that parts of the code have been +dropped. E.g. :: + + # ... + + def update_result(self, context): + # ... + context.result.add_metric('energy', 23.6, 'Joules', lower_is_better=True) + + # ... + + +Core Class Names +================ + +When core classes are referenced throughout the documentation, usually their +fully-qualified names are given e.g. :class:`wlauto.core.workload.Workload`. +This is done so that Sphinx_ can resolve them and provide a link. While +implementing extensions, however, you should *not* be importing anything +directly form under :mod:`wlauto.core`. Instead, classes you are meant to +instantiate or subclass have been aliased in the root :mod:`wlauto` package, +and should be imported from there, e.g. :: + + from wlauto import Workload + +All examples given in the documentation follow this convention. Please note that +this only applies to the :mod:`wlauto.core` subpackage; all other classes +should be imported for their corresponding subpackages. + +.. _Sphinx: http://sphinx-doc.org/ + + diff --git a/doc/source/daq_device_setup.rst b/doc/source/daq_device_setup.rst new file mode 100644 index 00000000..8853fc2f --- /dev/null +++ b/doc/source/daq_device_setup.rst @@ -0,0 +1,246 @@ +.. _daq_setup: + +DAQ Server Guide +================ + +NI-DAQ, or just "DAQ", is the Data Acquisition device developed by National +Instruments: + + http://www.ni.com/data-acquisition/ + +WA uses the DAQ to collect power measurements during workload execution. A +client/server solution for this is distributed as part of WA, though it is +distinct from WA and may be used separately (by invoking the client APIs from a +Python script, or used directly from the command line). + +This solution is dependent on the NI-DAQmx driver for the DAQ device. At the +time of writing, only Windows versions of the driver are supported (there is an +old Linux version that works on some versions of RHEL and Centos, but it is +unsupported and won't work with recent Linux kernels). Because of this, the +server part of the solution will need to be run on a Windows machine (though it +should also work on Linux, if the driver becomes available). + + +.. _daq_wiring: + +DAQ Device Wiring +----------------- + +The server expects the device to be wired in a specific way in order to be able +to collect power measurements. Two consecutive Analogue Input (AI) channels on +the DAQ are used to form a logical "port" (starting with AI/0 and AI/1 for port +0). Of these, the lower/even channel (e.g. AI/0) is used to measure the voltage +on the rail we're interested in; the higher/odd channel (e.g. AI/1) is used to +measure the voltage drop across a known very small resistor on the same rail, +which is then used to calculate current. The logical wiring diagram looks like +this:: + + Port N + ====== + | + | AI/(N*2)+ <--- Vr -------------------------| + | | + | AI/(N*2)- <--- GND -------------------// | + | | + | AI/(N*2+1)+ <--- V ------------|-------V | + | r | | + | AI/(N*2+1)- <--- Vr --/\/\/\----| | + | | | + | | | + | |------------------------------| + ====== + + Where: + V: Voltage going into the resistor + Vr: Voltage between resistor and the SOC + GND: Ground + r: The resistor across the rail with a known + small value. + + +The physical wiring will depend on the specific DAQ device, as channel layout +varies between models. + +.. note:: Current solution supports variable number of ports, however it + assumes that the ports are sequential and start at zero. E.g. if you + want to measure power on three rails, you will need to wire ports 0-2 + (AI/0 to AI/5 channels on the DAQ) to do it. It is not currently + possible to use any other configuration (e.g. ports 1, 2 and 5). + + +Setting up NI-DAQmx driver on a Windows Machine +----------------------------------------------- + + - The NI-DAQmx driver is pretty big in size, 1.5 GB. The driver name is + 'NI-DAQmx' and its version '9.7.0f0' which you can obtain it from National + Instruments website by downloading NI Measurement & Automation Explorer (Ni + MAX) from: http://joule.ni.com/nidu/cds/view/p/id/3811/lang/en + + .. note:: During the installation process, you might be prompted to install + .NET framework 4. + + - The installation process is quite long, 7-15 minutes. + - Once installed, open NI MAX, which should be in your desktop, if not type its + name in the start->search. + - Connect the NI-DAQ device to your machine. You should see it appear under + 'Devices and Interfaces'. If not, press 'F5' to refresh the list. + - Complete the device wiring as described in the :ref:`daq_wiring` section. + - Quit NI MAX. + + +Setting up DAQ server +--------------------- + +The DAQ power measurement solution is implemented in daqpower Python library, +the package for which can be found in WA's install location under +``wlauto/external/daq_server/daqpower-1.0.0.tar.gz`` (the version number in your +installation may be different). + + - Install NI-DAQmx driver, as described in the previous section. + - Install Python 2.7. + - Download and install ``pip``, ``numpy`` and ``twisted`` Python packages. + These packages have C extensions, an so you will need a native compiler set + up if you want to install them from PyPI. As an easier alternative, you can + find pre-built Windows installers for these packages here_ (the versions are + likely to be older than what's on PyPI though). + - Install the daqpower package using pip:: + + pip install C:\Python27\Lib\site-packages\wlauto\external\daq_server\daqpower-1.0.0.tar.gz + + This should automatically download and install ``PyDAQmx`` package as well + (the Python bindings for the NI-DAQmx driver). + +.. _here: http://www.lfd.uci.edu/~gohlke/pythonlibs/ + + +Running DAQ server +------------------ + +Once you have installed the ``daqpower`` package and the required dependencies as +described above, you can start the server by executing ``run-daq-server`` from the +command line. The server will start listening on the default port, 45677. + +.. note:: There is a chance that pip will not add ``run-daq-server`` into your + path. In that case, you can run daq server as such: + ``python C:\path to python\Scripts\run-daq-server`` + +You can optionally specify flags to control the behaviour or the server:: + + usage: run-daq-server [-h] [-d DIR] [-p PORT] [--debug] [--verbose] + + optional arguments: + -h, --help show this help message and exit + -d DIR, --directory DIR + Working directory + -p PORT, --port PORT port the server will listen on. + --debug Run in debug mode (no DAQ connected). + --verbose Produce verobose output. + +.. note:: The server will use a working directory (by default, the directory + the run-daq-server command was executed in, or the location specified + with -d flag) to store power traces before they are collected by the + client. This directory must be read/write-able by the user running + the server. + + +Collecting Power with WA +------------------------ + +.. note:: You do *not* need to install the ``daqpower`` package on the machine + running WA, as it is already included in the WA install structure. + However, you do need to make sure that ``twisted`` package is + installed. + +You can enable ``daq`` instrument your agenda/config.py in order to get WA to +collect power measurements. At minimum, you will also need to specify the +resistor values for each port in your configuration, e.g.:: + + resistor_values = [0.005, 0.005] # in Ohms + +This also specifies the number of logical ports (measurement sites) you want to +use, and, implicitly, the port numbers (ports 0 to N-1 will be used). + +.. note:: "ports" here refers to the logical ports wired on the DAQ (see :ref:`daq_wiring`, + not to be confused with the TCP port the server is listening on. + +Unless you're running the DAQ server and WA on the same machine (unlikely +considering that WA is officially supported only on Linux and recent NI-DAQmx +drivers are only available on Windows), you will also need to specify the IP +address of the server:: + + daq_server = 127.0.0.1 + +There are a number of other settings that can optionally be specified in the +configuration (e.g. the labels to be used for DAQ ports). Please refer to the +:class:`wlauto.instrumentation.daq.Daq` documentation for details. + + +Collecting Power from the Command Line +-------------------------------------- + +``daqpower`` package also comes with a client that may be used from the command +line. Unlike when collecting power with WA, you *will* need to install the +``daqpower`` package. Once installed, you will be able to interract with a +running DAQ server by invoking ``send-daq-command``. The invocation syntax is :: + + send-daq-command --host HOST [--port PORT] COMMAND [OPTIONS] + +Options are command-specific. COMMAND may be one of the following (and they +should generally be inoked in that order): + + :configure: Set up a new session, specifying the configuration values to + be used. If there is already a configured session, it will + be terminated. OPTIONS for this this command are the DAQ + configuration parameters listed in the DAQ instrument + documentation with all ``_`` replaced by ``-`` and prefixed + with ``--``, e.g. ``--resistor-values``. + :start: Start collecting power measurments. + :stop: Stop collecting power measurments. + :get_data: Pull files containg power measurements from the server. + There is one option for this command: + ``--output-directory`` which specifies where the files will + be pulled to; if this is not specified, the will be in the + current directory. + :close: Close the currently configured server session. This will get rid + of the data files and configuration on the server, so it would + no longer be possible to use "start" or "get_data" commands + before a new session is configured. + +A typical command line session would go like this: + +.. code-block:: bash + + send-daq-command --host 127.0.0.1 configure --resistor-values 0.005 0.005 + # set up and kick off the use case you want to measure + send-daq-command --host 127.0.0.1 start + # wait for the use case to complete + send-daq-command --host 127.0.0.1 stop + send-daq-command --host 127.0.0.1 get_data + # files called PORT_0.csv and PORT_1.csv will appear in the current directory + # containing measurements collected during use case execution + send-daq-command --host 127.0.0.1 close + # the session is terminated and the csv files on the server have been + # deleted. A new session may now be configured. + +In addtion to these "standard workflow" commands, the following commands are +also available: + + :list_devices: Returns a list of DAQ devices detected by the NI-DAQmx + driver. In case mutiple devices are connected to the + server host, you can specify the device you want to use + with ``--device-id`` option when configuring a session. + :list_ports: Returns a list of ports tha have been configured for the + current session, e.g. ``['PORT_0', 'PORT_1']``. + :list_port_files: Returns a list of data files that have been geneted + (unless something went wrong, there should be one for + each port). + + +Collecting Power from another Python Script +------------------------------------------- + +You can invoke the above commands from a Python script using +:py:func:`daqpower.client.execute_command` function, passing in +:class:`daqpower.config.ServerConfiguration` and, in case of the configure command, +:class:`daqpower.config.DeviceConfigruation`. Please see the implementation of +the ``daq`` WA instrument for examples of how these APIs can be used. diff --git a/doc/source/device_setup.rst b/doc/source/device_setup.rst new file mode 100644 index 00000000..3f6e16ad --- /dev/null +++ b/doc/source/device_setup.rst @@ -0,0 +1,407 @@ +Setting Up A Device +=================== + +WA should work with most Android devices out-of-the box, as long as the device +is discoverable by ``adb`` (i.e. gets listed when you run ``adb devices``). For +USB-attached devices, that should be the case; for network devices, ``adb connect`` +would need to be invoked with the IP address of the device. If there is only one +device connected to the host running WA, then no further configuration should be +necessary (though you may want to :ref:`tweak some Android settings `\ ). + +If you have multiple devices connected, have a non-standard Android build (e.g. +on a development board), or want to use of the more advanced WA functionality, +further configuration will be required. + +Android ++++++++ + +General Device Setup +-------------------- + +You can specify the device interface by setting ``device`` setting in +``~/.workload_automation/config.py``. Available interfaces can be viewed by +running ``wa list devices`` command. If you don't see your specific device +listed (which is likely unless you're using one of the ARM-supplied platforms), then +you should use ``generic_android`` interface (this is set in the config by +default). + +.. code-block:: python + + device = 'generic_android' + +The device interface may be configured through ``device_config`` setting, who's +value is a ``dict`` mapping setting names to their values. You can find the full +list of available parameter by looking up your device interface in the +:ref:`devices` section of the documentation. Some of the most common parameters +you might want to change are outlined below. + +.. confval:: adb_name + + If you have multiple Android devices connected to the host machine, you will + need to set this to indicate to WA which device you want it to use. + +.. confval:: working_directory + + WA needs a "working" directory on the device which it will use for collecting + traces, caching assets it pushes to the device, etc. By default, it will + create one under ``/sdcard`` which should be mapped and writable on standard + Android builds. If this is not the case for your device, you will need to + specify an alternative working directory (e.g. under ``/data/local``). + +.. confval:: scheduler + + This specifies the scheduling mechanism (from the perspective of core layout) + utilized by the device). For recent big.LITTLE devices, this should generally + be "hmp" (ARM Hetrogeneous Mutli-Processing); some legacy development + platforms might have Linaro IKS kernels, in which case it should be "iks". + For homogeneous (single-cluster) devices, it should be "smp". Please see + ``scheduler`` parameter in the ``generic_android`` device documentation for + more details. + +.. confval:: core_names + + This and ``core_clusters`` need to be set if you want to utilize some more + advanced WA functionality (like setting of core-related runtime parameters + such as governors, frequencies, etc). ``core_names`` should be a list of + core names matching the order in which they are exposed in sysfs. For + example, ARM TC2 SoC is a 2x3 big.LITTLE system; it's core_names would be + ``['a7', 'a7', 'a7', 'a15', 'a15']``, indicating that cpu0-cpu2 in cpufreq + sysfs structure are A7's and cpu3 and cpu4 are A15's. + +.. confval:: core_clusters + + If ``core_names`` is defined, this must also be defined. This is a list of + integer values indicating the cluster the corresponding core in + ``cores_names`` belongs to. For example, for TC2, this would be + ``[0, 0, 0, 1, 1]``, indicating that A7's are on cluster 0 and A15's are on + cluster 1. + +A typical ``device_config`` inside ``config.py`` may look something like + + +.. code-block:: python + + device_config = dict( + 'adb_name'='0123456789ABCDEF', + 'working_direcory'='/sdcard/wa-working', + 'core_names'=['a7', 'a7', 'a7', 'a15', 'a15'], + 'core_clusters'=[0, 0, 0, 1, 1], + # ... + ) + +.. _configuring-android: + +Configuring Android +------------------- + +There are a few additional tasks you may need to perform once you have a device +booted into Android (especially if this is an initial boot of a fresh OS +deployment): + + - You have gone through FTU (first time usage) on the home screen and + in the apps menu. + - You have disabled the screen lock. + - You have set sleep timeout to the highest possible value (30 mins on + most devices). + - You have disabled brightness auto-adjust and have set the brightness + to a fixed level. + - You have set the locale language to "English" (this is important for + some workloads in which UI automation looks for specific text in UI + elements). + +TC2 Setup +--------- + +This section outlines how to setup ARM TC2 development platform to work with WA. + +Pre-requisites +~~~~~~~~~~~~~~ + +You can obtain the full set of images for TC2 from Linaro: + +https://releases.linaro.org/latest/android/vexpress-lsk. + +For the easiest setup, follow the instructions on the "Firmware" and "Binary +Image Installation" tabs on that page. + +.. note:: The default ``reboot_policy`` in ``config.py`` is to not reboot. With + this WA will assume that the device is already booted into Android + prior to WA being invoked. If you want to WA to do the initial boot of + the TC2, you will have to change reboot policy to at least + ``initial``. + + +Setting Up Images +~~~~~~~~~~~~~~~~~ + +.. note:: Make sure that both DIP switches near the black reset button on TC2 + are up (this is counter to the Linaro guide that instructs to lower + one of the switches). + +.. note:: The TC2 must have an Ethernet connection. + + +If you have followed the setup instructions on the Linaro page, you should have +a USB stick or an SD card with the file system, and internal microSD on the +board (VEMSD) with the firmware images. The default Linaro configuration is to +boot from the image on the boot partition in the file system you have just +created. This is not supported by WA, which expects the image to be in NOR flash +on the board. This requires you to copy the images from the boot partition onto +the internal microSD card. + +Assuming the boot partition of the Linaro file system is mounted on +``/media/boot`` and the internal microSD is mounted on ``/media/VEMSD``, copy +the following images:: + + cp /media/boot/zImage /media/VEMSD/SOFTWARE/kern_mp.bin + cp /media/boot/initrd /media/VEMSD/SOFTWARE/init_mp.bin + cp /media/boot/v2p-ca15-tc2.dtb /media/VEMSD/SOFTWARE/mp_a7bc.dtb + +Optionally +########## + +The default device tree configuration the TC2 is to boot on the A7 cluster. It +is also possible to configure the device tree to boot on the A15 cluster, or to +boot with one of the clusters disabled (turning TC2 into an A7-only or A15-only +device). Please refer to the "Firmware" tab on the Linaro paged linked above for +instructions on how to compile the appropriate device tree configurations. + +WA allows selecting between these configurations using ``os_mode`` boot +parameter of the TC2 device interface. In order for this to work correctly, +device tree files for the A15-bootcluster, A7-only and A15-only configurations +should be copied into ``/media/VEMSD/SOFTWARE/`` as ``mp_a15bc.dtb``, +``mp_a7.dtb`` and ``mp_a15.dtb`` respectively. + +This is entirely optional. If you're not planning on switching boot cluster +configuration, those files do not need to be present in VEMSD. + +config.txt +########## + +Also, make sure that ``USB_REMOTE`` setting in ``/media/VEMSD/config.txt`` is set +to ``TRUE`` (this will allow rebooting the device by writing reboot.txt to +VEMSD). :: + + USB_REMOTE: TRUE ;Selects remote command via USB + + +TC2-specific device_config settings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are a few settings that may need to be set in ``device_config`` inside +your ``config.py`` which are specific to TC2: + +.. note:: TC2 *does not* accept most "standard" android ``device_config`` + settings. + +adb_name + If you're running WA with reboots disabled (which is the default reboot + policy), you will need to manually run ``adb connect`` with TC2's IP + address and set this. + +root_mount + WA expects TC2's internal microSD to be mounted on the host under + ``/media/VEMSD``. If this location is different, it needs to be specified + using this setting. + +boot_firmware + WA defaults to try booting using UEFI, which will require some additional + firmware from ARM that may not be provided with Linaro releases (see the + UEFI and PSCI section below). If you do not have those images, you will + need to set ``boot_firmware`` to ``bootmon``. + +fs_medium + TC2's file system can reside either on an SD card or on a USB stick. Boot + configuration is different depending on this. By default, WA expects it + to be on ``usb``; if you are using and SD card, you should set this to + ``sd``. + +bm_image + Bootmon image that comes as part of TC2 firmware periodically gets + updated. At the time of the release, ``bm_v519r.axf`` was used by + ARM. If you are using a more recent image, you will need to set this + indicating the image name (just the name of the actual file, *not* the + path). Note: this setting only applies if using ``bootmon`` boot + firmware. + +serial_device + WA will assume TC2 is connected on ``/dev/ttyS0`` by default. If the + serial port is different, you will need to set this. + + +UEFI and PSCI +~~~~~~~~~~~~~ + +UEFI is a boot firmware alternative to bootmon. Currently UEFI is coupled with PSCI (Power State Coordination Interface). That means +that in order to use PSCI, UEFI has to be the boot firmware. Currently the reverse dependency is true as well (for TC2). Therefore +using UEFI requires enabling PSCI. + +In case you intend to use uefi/psci mode instead of bootmon, you will need two additional files: tc2_sec.bin and tc2_uefi.bin. +after obtaining those files, place them inside /media/VEMSD/SOFTWARE/ directory as such:: + + cp tc2_sec.bin /media/VEMSD/SOFTWARE/ + cp tc2_uefi.bin /media/VEMSD/SOFTWARE/ + + +Juno Setup +---------- + +.. note:: At the time of writing, the Android software stack on Juno was still + very immature. Some workloads may not run, and there maybe stability + issues with the device. + + +The full software stack can be obtained from Linaro: + +https://releases.linaro.org/14.08/members/arm/android/images/armv8-android-juno-lsk + +Please follow the instructions on the "Binary Image Installation" tab on that +page. More up-to-date firmware and kernel may also be obtained by registered +members from ARM Connected Community: http://www.arm.com/community/ (though this +is not guaranteed to work with the Linaro file system). + +UEFI +~~~~ + +Juno uses UEFI_ to boot the kernel image. UEFI supports multiple boot +configurations, and presents a menu on boot to select (in default configuration +it will automatically boot the first entry in the menu if not interrupted before +a timeout). WA will look for a specific entry in the UEFI menu +(``'WA'`` by default, but that may be changed by setting ``uefi_entry`` in the +``device_config``). When following the UEFI instructions on the above Linaro +page, please make sure to name the entry appropriately (or to correctly set the +``uefi_entry``). + +.. _UEFI: http://en.wikipedia.org/wiki/UEFI + +There are two supported way for Juno to discover kernel images through UEFI. It +can either load them from NOR flash on the board, or form boot partition on the +file system. The setup described on the Linaro page uses the boot partition +method. + +If WA does not find the UEFI entry it expects, it will create one. However, it +will assume that the kernel image resides in NOR flash, which means it will not +work with Linaro file system. So if you're replicating the Linaro setup exactly, +you will need to create the entry manually, as outline on the above-linked page. + +Rebooting +~~~~~~~~~ + +At the time of writing, normal Android reboot did not work properly on Juno +Android, causing the device to crash into an irrecoverable state. Therefore, WA +will perform a hard reset to reboot the device. It will attempt to do this by +toggling the DTR line on the serial connection to the device. In order for this +to work, you need to make sure that SW1 configuration switch on the back panel of +the board (the right-most DIP switch) is toggled *down*. + + +Linux ++++++ + +General Device Setup +-------------------- + +You can specify the device interface by setting ``device`` setting in +``~/.workload_automation/config.py``. Available interfaces can be viewed by +running ``wa list devices`` command. If you don't see your specific device +listed (which is likely unless you're using one of the ARM-supplied platforms), then +you should use ``generic_linux`` interface (this is set in the config by +default). + +.. code-block:: python + + device = 'generic_linux' + +The device interface may be configured through ``device_config`` setting, who's +value is a ``dict`` mapping setting names to their values. You can find the full +list of available parameter by looking up your device interface in the +:ref:`devices` section of the documentation. Some of the most common parameters +you might want to change are outlined below. + +Currently, the only only supported method for talking to a Linux device is over +SSH. Device configuration must specify the parameters need to establish the +connection. + +.. confval:: host + + This should be either the the DNS name or IP address of the device. + +.. confval:: username + + The login name of the user on the device that WA will use. This user should + have a home directory (unless an alternative working directory is specified + using ``working_directory`` config -- see below), and, for full + functionality, the user should have sudo rights (WA will be able to use + sudo-less acounts but some instruments or workload may not work). + +.. confval:: password + + Password for the account on the device. Either this of a ``keyfile`` (see + below) must be specified. + +.. confval:: keyfile + + If key-based authentication is used, this may be used to specify the SSH identity + file instead of the password. + +.. confval:: property_files + + This is a list of paths that will be pulled for each WA run into the __meta + subdirectory in the results. The intention is to collect meta-data about the + device that may aid in reporducing the results later. The paths specified do + not have to exist on the device (they will be ignored if they do not). The + default list is ``['/proc/version', '/etc/debian_version', '/etc/lsb-release', '/etc/arch-release']`` + + +In addition, ``working_directory``, ``scheduler``, ``core_names``, and +``core_clusters`` can also be specified and have the same meaning as for Android +devices (see above). + +A typical ``device_config`` inside ``config.py`` may look something like + + +.. code-block:: python + + device_config = dict( + 'host'='192.168.0.7', + 'username'='guest', + 'password'='guest', + 'core_names'=['a7', 'a7', 'a7', 'a15', 'a15'], + 'core_clusters'=[0, 0, 0, 1, 1], + # ... + ) + + +Related Settings +++++++++++++++++ + +Reboot Policy +------------- + +This indicates when during WA execution the device will be rebooted. By default +this is set to ``never``, indicating that WA will not reboot the device. Please +see ``reboot_policy`` documentation in :ref:`configuration-specification` for + +more details. + +Execution Order +--------------- + +``execution_order`` defines the order in which WA will execute workloads. +``by_iteration`` (set by default) will execute the first iteration of each spec +first, followed by the second iteration of each spec (that defines more than one +iteration) and so forth. The alternative will loop through all iterations for +the first first spec first, then move on to second spec, etc. Again, please see +:ref:`configuration-specification` for more details. + + +Adding a new device interface ++++++++++++++++++++++++++++++ + +If you are working with a particularly unusual device (e.g. a early stage +development board) or need to be able to handle some quirk of your Android build, +configuration available in ``generic_android`` interface may not be enough for +you. In that case, you may need to write a custom interface for your device. A +device interface is an ``Extension`` (a plug-in) type in WA and is implemented +similar to other extensions (such as workloads or instruments). Pleaser refer to +:ref:`adding_a_device` section for information on how this may be done. diff --git a/doc/source/execution_model.rst b/doc/source/execution_model.rst new file mode 100644 index 00000000..3140583b --- /dev/null +++ b/doc/source/execution_model.rst @@ -0,0 +1,115 @@ +++++++++++++++++++ +Framework Overview +++++++++++++++++++ + +Execution Model +=============== + +At the high level, the execution model looks as follows: + +.. image:: wa-execution.png + :scale: 50 % + +After some initial setup, the framework initializes the device, loads and initialized +instrumentation and begins executing jobs defined by the workload specs in the agenda. Each job +executes in four basic stages: + +setup + Initial setup for the workload is performed. E.g. required assets are deployed to the + devices, required services or applications are launched, etc. Run time configuration of the + device for the workload is also performed at this time. + +run + This is when the workload actually runs. This is defined as the part of the workload that is + to be measured. Exactly what happens at this stage depends entirely on the workload. + +result processing + Results generated during the execution of the workload, if there are any, are collected, + parsed and extracted metrics are passed up to the core framework. + +teardown + Final clean up is performed, e.g. applications may closed, files generated during execution + deleted, etc. + +Signals are dispatched (see signal_dispatch_ below) at each stage of workload execution, +which installed instrumentation can hook into in order to collect measurements, alter workload +execution, etc. Instrumentation implementation usually mirrors that of workloads, defining +setup, teardown and result processing stages for a particular instrument. Instead of a ``run``, +instruments usually implement a ``start`` and a ``stop`` which get triggered just before and just +after a workload run. However, the signal dispatch mechanism give a high degree of flexibility +to instruments allowing them to hook into almost any stage of a WA run (apart from the very +early initialization). + +Metrics and artifacts generated by workloads and instrumentation are accumulated by the framework +and are then passed to active result processors. This happens after each individual workload +execution and at the end of the run. A result process may chose to act at either or both of these +points. + + +Control Flow +============ + +This section goes into more detail explaining the relationship between the major components of the +framework and how control passes between them during a run. It will only go through the major +transition and interactions and will not attempt to describe very single thing that happens. + +.. note:: This is the control flow for the ``wa run`` command which is the main functionality + of WA. Other commands are much simpler and most of what is described below does not + apply to them. + +#. ``wlauto.core.entry_point`` parses the command form the arguments and executes the run command + (``wlauto.commands.run.RunCommand``). +#. Run command initializes the output directory and creates a ``wlauto.core.agenda.Agenda`` based on + the command line arguments. Finally, it instantiates a ``wlauto.core.execution.Executor`` and + passes it the Agenda. +#. The Executor uses the Agenda to create a ``wlauto.core.configuraiton.RunConfiguration`` fully + defines the configuration for the run (it will be serialised into ``__meta`` subdirectory under + the output directory. +#. The Executor proceeds to instantiate and install instrumentation, result processors and the + device interface, based on the RunConfiguration. The executor also initialise a + ``wlauto.core.execution.ExecutionContext`` which is used to track the current state of the run + execution and also serves as a means of communication between the core framework and the + extensions. +#. Finally, the Executor instantiates a ``wlauto.core.execution.Runner``, initializes its job + queue with workload specs from the RunConfiguraiton, and kicks it off. +#. The Runner performs the run time initialization of the device and goes through the workload specs + (in the order defined by ``execution_order`` setting), running each spec according to the + execution model described in the previous section. The Runner sends signals (see below) at + appropriate points during execution. +#. At the end of the run, the control is briefly passed back to the Executor, which outputs a + summary for the run. + + +.. _signal_dispatch: + +Signal Dispatch +=============== + +WA uses the `louie `_ (formerly, pydispatcher) library +for signal dispatch. Callbacks can be registered for signals emitted during the run. WA uses a +version of louie that has been modified to introduce priority to registered callbacks (so that +callbacks that are know to be slow can be registered with a lower priority so that they do not +interfere with other callbacks). + +This mechanism is abstracted for instrumentation. Methods of an :class:`wlauto.core.Instrument` +subclass automatically get hooked to appropriate signals based on their names when the instrument +is "installed" for the run. Priority can be specified by adding ``very_fast_``, ``fast_`` , +``slow_`` or ``very_slow_`` prefixes to method names. + +The full list of method names and the signals they map to may be viewed +:ref:`here `. + +Signal dispatching mechanism may also be used directly, for example to dynamically register +callbacks at runtime or allow extensions other than ``Instruments`` to access stages of the run +they are normally not aware of. + +The sending of signals is the responsibility of the Runner. Signals gets sent during transitions +between execution stages and when special evens, such as errors or device reboots, occur. + +See Also +-------- + +.. toctree:: + :maxdepth: 1 + + instrumentation_method_map diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 00000000..46095f5d --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,138 @@ +.. Workload Automation 2 documentation master file, created by + sphinx-quickstart on Mon Jul 15 09:00:46 2013. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to Documentation for Workload Automation +================================================ + +Workload Automation (WA) is a framework for running workloads on real hardware devices. WA +supports a number of output formats as well as additional instrumentation (such as Streamline +traces). A number of workloads are included with the framework. + + +.. contents:: Contents + + +What's New +~~~~~~~~~~ + +.. toctree:: + :maxdepth: 1 + + changes + + +Usage +~~~~~ + +This section lists general usage documentation. If you're new to WA2, it is +recommended you start with the :doc:`quickstart` page. This section also contains +installation and configuration guides. + + +.. toctree:: + :maxdepth: 2 + + quickstart + installation + device_setup + invocation + agenda + configuration + + +Extensions +~~~~~~~~~~ + +This section lists extensions that currently come with WA2. Each package below +represents a particular type of extension (e.g. a workload); each sub-package of +that package is a particular instance of that extension (e.g. the Andebench +workload). Clicking on a link will show what the individual extension does, +what configuration parameters it takes, etc. + +For how to implement you own extensions, please refer to the guides in the +:ref:`in-depth` section. + +.. raw:: html + + +
+ +.. toctree:: + :maxdepth: 2 + + extensions/workloads + +.. raw:: html + + + +.. toctree:: + :maxdepth: 2 + + extensions/instruments + + +.. raw:: html + + + +.. toctree:: + :maxdepth: 2 + + extensions/result_processors + +.. raw:: html + + + +.. toctree:: + :maxdepth: 2 + + extensions/devices + +.. raw:: html + +
+ +.. _in-depth: + +In-depth +~~~~~~~~ + +This section contains more advanced topics, such how to write your own extensions +and detailed descriptions of how WA functions under the hood. + +.. toctree:: + :maxdepth: 2 + + conventions + writing_extensions + execution_model + resources + additional_topics + daq_device_setup + revent + contributing + +API Reference +~~~~~~~~~~~~~ + +.. toctree:: + :maxdepth: 5 + + api/modules + + +Indices and tables +~~~~~~~~~~~~~~~~~~ + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/doc/source/installation.rst b/doc/source/installation.rst new file mode 100644 index 00000000..0485ddcd --- /dev/null +++ b/doc/source/installation.rst @@ -0,0 +1,144 @@ +============ +Installation +============ + +.. module:: wlauto + +This page describes how to install Workload Automation 2. + + +Prerequisites +============= + +Operating System +---------------- + +WA runs on a native Linux install. It was tested with Ubuntu 12.04, +but any recent Linux distribution should work. It should run on either +32bit or 64bit OS, provided the correct version of Android (see below) +was installed. Officially, **other environments are not supported**. WA +has been known to run on Linux Virtual machines and in Cygwin environments, +though additional configuration maybe required in both cases (known issues +include makings sure USB/serial connections are passed to the VM, and wrong +python/pip binaries being picked up in Cygwin). WA *should* work on other +Unix-based systems such as BSD or Mac OS X, but it has not been tested +in those environments. WA *does not* run on Windows (though it should be +possible to get limited functionality with minimal porting effort). + + +Android SDK +----------- + +You need to have the Android SDK with at least one platform installed. +To install it, download the ADT Bundle from here_. Extract it +and add ``/sdk/platform-tools`` and ``/sdk/tools`` +to your ``PATH``. To test that you've installed it properly run ``adb +version``, the output should be similar to this:: + + $$ adb version + Android Debug Bridge version 1.0.31 + $$ + +.. _here: https://developer.android.com/sdk/index.html + +Once that is working, run :: + + android update sdk + +This will open up a dialog box listing available android platforms and +corresponding API levels, e.g. ``Android 4.3 (API 18)``. For WA, you will need +at least API level 18 (i.e. Android 4.3), though installing the latest is +usually the best bet. + +Optionally (but recommended), you should also set ``ANDROID_HOME`` to point to +the install location of the SDK (i.e. ``/sdk``). + + +Python +------ + +Workload Automation 2 requires Python 2.7 (Python 3 is not supported, at the moment). + + +pip +--- + +pip is the recommended package manager for Python. It is not part of standard +Python distribution and would need to be installed separately. On Ubuntu and +similar distributions, this may be done with APT:: + + sudo apt-get install python-pip + + +Python Packages +--------------- + +.. note:: pip should automatically download and install missing dependencies, + so if you're using pip, you can skip this section. + +Workload Automation 2 depends on the following additional libraries: + + * pexpect + * docutils + * pySerial + * pyYAML + * python-dateutil + +You can install these with pip:: + + sudo pip install pexpect + sudo pip install pyserial + sudo pip install pyyaml + sudo pip install docutils + sudo pip install python-dateutil + +Some of these may also be available in your distro's repositories, e.g. :: + + sudo apt-get install python-serial + +Distro package versions tend to be older, so pip installation is recommended. +However, pip will always download and try to build the source, so in some +situations distro binaries may provide an easier fall back. Please also note that +distro package names may differ from pip packages. + + +Optional Python Packages +------------------------ + +.. note:: unlike the mandatory dependencies in the previous section, + pip will *not* install these automatically, so you will have + to explicitly install them if/when you need them. + +In addition to the mandatory packages listed in the previous sections, some WA +functionality (e.g. certain extensions) may have additional dependencies. Since +they are not necessary to be able to use most of WA, they are not made mandatory +to simplify initial WA installation. If you try to use an extension that has +additional, unmet dependencies, WA will tell you before starting the run, and +you can install it then. They are listed here for those that would rather +install them upfront (e.g. if you're planning to use WA to an environment that +may not always have Internet access). + + * nose + * pandas + * PyDAQmx + * pymongo + * jinja2 + + +.. note:: Some packages have C extensions and will require Python development + headers to install. You can get those by installing ``python-dev`` + package in apt on Ubuntu (or the equivalent for your distribution). + +Installing +========== + +Download the tarball and run pip:: + + sudo pip install wlauto-$version.tar.gz + +If the above succeeds, try :: + + wa --version + +Hopefully, this should output something along the lines of "Workload Automation +version $version". diff --git a/doc/source/instrumentation_method_map.rst b/doc/source/instrumentation_method_map.rst new file mode 100644 index 00000000..f68ecb59 --- /dev/null +++ b/doc/source/instrumentation_method_map.rst @@ -0,0 +1,73 @@ +Instrumentation Signal-Method Mapping +===================================== + +.. _instrumentation_method_map: + +Instrument methods get automatically hooked up to signals based on their names. Mostly, the method +name correponds to the name of the signal, however there are a few convienience aliases defined +(listed first) to make easier to relate instrumenation code to the workload execution model. + +======================================== ========================================= +method name signal +======================================== ========================================= +initialize run-init-signal +setup successful-workload-setup-signal +start before-workload-execution-signal +stop after-workload-execution-signal +process_workload_result successful-iteration-result-update-signal +update_result after-iteration-result-update-signal +teardown after-workload-teardown-signal +finalize run-fin-signal +on_run_start start-signal +on_run_end end-signal +on_workload_spec_start workload-spec-start-signal +on_workload_spec_end workload-spec-end-signal +on_iteration_start iteration-start-signal +on_iteration_end iteration-end-signal +before_initial_boot before-initial-boot-signal +on_successful_initial_boot successful-initial-boot-signal +after_initial_boot after-initial-boot-signal +before_first_iteration_boot before-first-iteration-boot-signal +on_successful_first_iteration_boot successful-first-iteration-boot-signal +after_first_iteration_boot after-first-iteration-boot-signal +before_boot before-boot-signal +on_successful_boot successful-boot-signal +after_boot after-boot-signal +on_spec_init spec-init-signal +on_run_init run-init-signal +on_iteration_init iteration-init-signal +before_workload_setup before-workload-setup-signal +on_successful_workload_setup successful-workload-setup-signal +after_workload_setup after-workload-setup-signal +before_workload_execution before-workload-execution-signal +on_successful_workload_execution successful-workload-execution-signal +after_workload_execution after-workload-execution-signal +before_workload_result_update before-iteration-result-update-signal +on_successful_workload_result_update successful-iteration-result-update-signal +after_workload_result_update after-iteration-result-update-signal +before_workload_teardown before-workload-teardown-signal +on_successful_workload_teardown successful-workload-teardown-signal +after_workload_teardown after-workload-teardown-signal +before_overall_results_processing before-overall-results-process-signal +on_successful_overall_results_processing successful-overall-results-process-signal +after_overall_results_processing after-overall-results-process-signal +on_error error_logged +on_warning warning_logged +======================================== ========================================= + + +The names above may be prefixed with one of pre-defined prefixes to set the priority of the +Instrument method realive to other callbacks registered for the signal (within the same priority +level, callbacks are invoked in the order they were registered). The table below shows the mapping +of the prifix to the corresponding priority: + +=========== === +prefix priority +=========== === +very_fast\_ 20 +fast\_ 10 +normal\_ 0 +slow\_ -10 +very_slow\_ -20 +=========== === + diff --git a/doc/source/instrumentation_method_map.template b/doc/source/instrumentation_method_map.template new file mode 100644 index 00000000..48003245 --- /dev/null +++ b/doc/source/instrumentation_method_map.template @@ -0,0 +1,17 @@ +Instrumentation Signal-Method Mapping +===================================== + +.. _instrumentation_method_map: + +Instrument methods get automatically hooked up to signals based on their names. Mostly, the method +name correponds to the name of the signal, however there are a few convienience aliases defined +(listed first) to make easier to relate instrumenation code to the workload execution model. + +$signal_names + +The names above may be prefixed with one of pre-defined prefixes to set the priority of the +Instrument method realive to other callbacks registered for the signal (within the same priority +level, callbacks are invoked in the order they were registered). The table below shows the mapping +of the prifix to the corresponding priority: + +$priority_prefixes diff --git a/doc/source/invocation.rst b/doc/source/invocation.rst new file mode 100644 index 00000000..5c8ead92 --- /dev/null +++ b/doc/source/invocation.rst @@ -0,0 +1,135 @@ +.. _invocation: + +======== +Commands +======== + +Installing the wlauto package will add ``wa`` command to your system, +which you can run from anywhere. This has a number of sub-commands, which can +be viewed by executing :: + + wa -h + +Individual sub-commands are discussed in detail below. + +run +--- + +The most common sub-command you will use is ``run``. This will run specfied +workload(s) and process resulting output. This takes a single mandatory +argument that specifies what you want WA to run. This could be either a +workload name, or a path to an "agenda" file that allows to specify multiple +workloads as well as a lot additional configuration (see :ref:`agenda` +section for details). Executing :: + + wa run -h + +Will display help for this subcommand that will look somehtign like this:: + + usage: run [-d DIR] [-f] AGENDA + + Execute automated workloads on a remote device and process the resulting + output. + + positional arguments: + AGENDA Agenda for this workload automation run. This defines + which workloads will be executed, how many times, with + which tunables, etc. See /usr/local/lib/python2.7 + /dist-packages/wlauto/agenda-example.csv for an + example of how this file should be structured. + + optional arguments: + -h, --help show this help message and exit + -c CONFIG, --config CONFIG + specify an additional config.py + -v, --verbose The scripts will produce verbose output. + --version Output the version of Workload Automation and exit. + --debug Enable debug mode. Note: this implies --verbose. + -d DIR, --output-directory DIR + Specify a directory where the output will be + generated. If the directoryalready exists, the script + will abort unless -f option (see below) is used,in + which case the contents of the directory will be + overwritten. If this optionis not specified, then + wa_output will be used instead. + -f, --force Overwrite output directory if it exists. By default, + the script will abort in thissituation to prevent + accidental data loss. + -i ID, --id ID Specify a workload spec ID from an agenda to run. If + this is specified, only that particular spec will be + run, and other workloads in the agenda will be + ignored. This option may be used to specify multiple + IDs. + + +Output Directory +~~~~~~~~~~~~~~~~ + +The exact contents on the output directory will depend on configuration options +used, instrumentation and output processors enabled, etc. Typically, the output +directory will contain a results file at the top level that lists all +measurements that were collected (currently, csv and json formats are +supported), along with a subdirectory for each iteration executed with output +for that specific iteration. + +At the top level, there will also be a run.log file containing the complete log +output for the execution. The contents of this file is equivalent to what you +would get in the console when using --verbose option. + +Finally, there will be a __meta subdirectory. This will contain a copy of the +agenda file used to run the workloads along with any other device-specific +configuration files used during execution. + + +list +---- + +This lists all extensions of a particular type. For example :: + + wa list workloads + +will list all workloads currently included in WA. The list will consist of +extension names and short descriptions of the functionality they offer. + + +show +---- + +This will show detailed information about an extension, including more in-depth +description and any parameters/configuration that are available. For example +executing :: + + wa show andebench + +will produce something like :: + + + andebench + + AndEBench is an industry standard Android benchmark provided by The Embedded Microprocessor Benchmark Consortium + (EEMBC). + + parameters: + + number_of_threads + Number of threads that will be spawned by AndEBench. + type: int + + single_threaded + If ``true``, AndEBench will run with a single thread. Note: this must not be specified if ``number_of_threads`` + has been specified. + type: bool + + http://www.eembc.org/andebench/about.php + + From the website: + + - Initial focus on CPU and Dalvik interpreter performance + - Internal algorithms concentrate on integer operations + - Compares the difference between native and Java performance + - Implements flexible multicore performance analysis + - Results displayed in Iterations per second + - Detailed log file for comprehensive engineering analysis + + + diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst new file mode 100644 index 00000000..7b9ec9b7 --- /dev/null +++ b/doc/source/quickstart.rst @@ -0,0 +1,162 @@ +========== +Quickstart +========== + +This sections will show you how to quickly start running workloads using +Workload Automation 2. + + +Install +======= + +.. note:: This is a quick summary. For more detailed instructions, please see + the :doc:`installation` section. + +Make sure you have Python 2.7 and a recent Android SDK with API level 18 or above +installed on your system. For the SDK, make sure that either ``ANDROID_HOME`` +environment variable is set, or that ``adb`` is in your ``PATH``. + +.. note:: A complete install of the Android SDK is required, as WA uses a + number of its utilities, not just adb. + +In addition to the base Python 2.7 install, you will also need to have ``pip`` +(Python's package manager) installed as well. This is usually a separate package. + +Once you have the pre-requisites and a tarball with the workload automation package, +you can install it with pip:: + + sudo pip install wlauto-2.2.0dev.tar.gz + +This will install Workload Automation on your system, along with the Python +packages it depends on. + +(Optional) Verify installation +------------------------------- + +Once the tarball has been installed, try executing :: + + wa -h + +You should see a help message outlining available subcommands. + + +(Optional) APK files +-------------------- + +A large number of WA workloads are installed as APK files. These cannot be +distributed with WA and so you will need to obtain those separately. + +For more details, please see the :doc:`installation` section. + + +Configure Your Device +===================== + +Out of the box, WA is configured to work with a generic Android device through +``adb``. If you only have one device listed when you execute ``adb devices``, +and your device has a standard Android configuration, then no extra configuration +is required (if your device is connected via network, you will have to manually execute +``adb connect `` so that it appears in the device listing). + +If you have multiple devices connected, you will need to tell WA which one you +want it to use. You can do that by setting ``adb_name`` in device configuration inside +``~/.workload_automation/config.py``\ , e.g. + +.. code-block:: python + + # ... + + device_config = dict( + adb_name = 'abcdef0123456789', + # ... + ) + + # ... + +This should give you basic functionality. If your device has non-standard +Android configuration (e.g. it's a development board) or your need some advanced +functionality (e.g. big.LITTLE tuning parameters), additional configuration may +be required. Please see the :doc:`device_setup` section for more details. + + +Running Your First Workload +=========================== + +The simplest way to run a workload is to specify it as a parameter to WA ``run`` +sub-command:: + + wa run dhrystone + +You will see INFO output from WA as it executes each stage of the run. A +completed run output should look something like this:: + + INFO Initializing + INFO Running workloads + INFO Connecting to device + INFO Initializing device + INFO Running workload 1 dhrystone (iteration 1) + INFO Setting up + INFO Executing + INFO Processing result + INFO Tearing down + INFO Processing overall results + INFO Status available in wa_output/status.txt + INFO Done. + INFO Ran a total of 1 iterations: 1 OK + INFO Results can be found in wa_output + +Once the run has completed, you will find a directory called ``wa_output`` +in the location where you have invoked ``wa run``. Within this directory, +you will find a "results.csv" file which will contain results obtained for +dhrystone, as well as a "run.log" file containing detailed log output for +the run. You will also find a sub-directory called 'drystone_1_1' that +contains the results for that iteration. Finally, you will find a copy of the +agenda file in the ``wa_output/__meta`` subdirectory. The contents of +iteration-specific subdirectories will vary from workload to workload, and, +along with the contents of the main output directory, will depend on the +instrumentation and result processors that were enabled for that run. + +The ``run`` sub-command takes a number of options that control its behavior, +you can view those by executing ``wa run -h``. Please see the :doc:`invocation` +section for details. + + +Create an Agenda +================ + +Simply running a single workload is normally of little use. Typically, you would +want to specify several workloads, setup the device state and, possibly, enable +additional instrumentation. To do this, you would need to create an "agenda" for +the run that outlines everything you want WA to do. + +Agendas are written using YAML_ markup language. A simple agenda might look +like this: + +.. code-block:: yaml + + config: + instrumentation: [~execution_time] + result_processors: [json] + global: + iterations: 2 + workloads: + - memcpy + - name: dhrystone + params: + mloops: 5 + threads: 1 + +This agenda + +- Specifies two workloads: memcpy and dhrystone. +- Specifies that dhrystone should run in one thread and execute five million loops. +- Specifies that each of the two workloads should be run twice. +- Enables json result processor, in addition to the result processors enabled in + the config.py. +- Disables execution_time instrument, if it is enabled in the config.py + +There is a lot more that could be done with an agenda. Please see :doc:`agenda` +section for details. + +.. _YAML: http://en.wikipedia.org/wiki/YAML + diff --git a/doc/source/resources.rst b/doc/source/resources.rst new file mode 100644 index 00000000..af944e6f --- /dev/null +++ b/doc/source/resources.rst @@ -0,0 +1,45 @@ +Dynamic Resource Resolution +=========================== + +Introduced in version 2.1.3. + +The idea is to decouple resource identification from resource discovery. +Workloads/instruments/devices/etc state *what* resources they need, and not +*where* to look for them -- this instead is left to the resource resolver that +is now part of the execution context. The actual discovery of resources is +performed by resource getters that are registered with the resolver. + +A resource type is defined by a subclass of +:class:`wlauto.core.resource.Resource`. An instance of this class describes a +resource that is to be obtained. At minimum, a ``Resource`` instance has an +owner (which is typically the object that is looking for the resource), but +specific resource types may define other parameters that describe an instance of +that resource (such as file names, URLs, etc). + +An object looking for a resource invokes a resource resolver with an instance of +``Resource`` describing the resource it is after. The resolver goes through the +getters registered for that resource type in priority order attempting to obtain +the resource; once the resource is obtained, it is returned to the calling +object. If none of the registered getters could find the resource, ``None`` is +returned instead. + +The most common kind of object looking for resources is a ``Workload``, and +since v2.1.3, ``Workload`` class defines +:py:meth:`wlauto.core.workload.Workload.init_resources` method that may be +overridden by subclasses to perform resource resolution. For example, a workload +looking for an APK file would do so like this:: + + from wlauto import Workload + from wlauto.common.resources import ApkFile + + class AndroidBenchmark(Workload): + + # ... + + def init_resources(self, context): + self.apk_file = context.resource.get(ApkFile(self)) + + # ... + + +Currently available resource types are defined in :py:mod:`wlauto.common.resources`. diff --git a/doc/source/revent.rst b/doc/source/revent.rst new file mode 100644 index 00000000..e3b756ce --- /dev/null +++ b/doc/source/revent.rst @@ -0,0 +1,97 @@ +.. _revent_files_creation: + +revent +====== + +revent utility can be used to record and later play back a sequence of user +input events, such as key presses and touch screen taps. This is an alternative +to Android UI Automator for providing automation for workloads. :: + + + usage: + revent [record time file|replay file|info] [verbose] + record: stops after either return on stdin + or time (in seconds) + and stores in file + replay: replays eventlog from file + info:shows info about each event char device + any additional parameters make it verbose + +Recording +--------- + +To record, transfer the revent binary to the device, then invoke ``revent +record``, giving it the time (in seconds) you want to record for, and the +file you want to record to (WA expects these files to have .revent +extension):: + + host$ adb push revent /data/local/revent + host$ adb shell + device# cd /data/local + device# ./revent record 1000 my_recording.revent + +The recording has now started and button presses, taps, etc you perform on the +device will go into the .revent file. The recording will stop after the +specified time period, and you can also stop it by hitting return in the adb +shell. + +Replaying +--------- + +To replay a recorded file, run ``revent replay`` on the device, giving it the +file you want to replay:: + + device# ./revent replay my_recording.revent + + +Using revent With Workloads +--------------------------- + +Some workloads (pretty much all games) rely on recorded revents for their +execution. :class:`wlauto.common.GameWorkload`-derived workloads expect two +revent files -- one for performing the initial setup (navigating menus, +selecting game modes, etc), and one for the actual execution of the game. +Because revents are very device-specific\ [*]_, these two files would need to +be recorded for each device. + +The files must be called ``.(setup|run).revent``, where +```` is the name of your device (as defined by the ``name`` +attribute of your device's class). WA will look for these files in two +places: ``/wlauto/workloads//revent_files`` +and ``~/.workload_automation/dependencies/``. The first +location is primarily intended for revent files that come with WA (and if +you did a system-wide install, you'll need sudo to add files there), so it's +probably easier to use the second location for the files you record. Also, +if revent files for a workload exist in both locations, the files under +``~/.workload_automation/dependencies`` will be used in favor of those +installed with WA. + +For example, if you wanted to run angrybirds workload on "Acme" device, you would +record the setup and run revent files using the method outlined in the section +above and then pull them for the devices into the following locations:: + + ~/workload_automation/dependencies/angrybirds/Acme.setup.revent + ~/workload_automation/dependencies/angrybirds/Acme.run.revent + +(you may need to create the intermediate directories if they don't already +exist). + +.. [*] It's not just about screen resolution -- the event codes may be different + even if devices use the same screen. + + +revent vs. UiAutomator +---------------------- + +In general, Android UI Automator is the preferred way of automating user input +for workloads because, unlike revent, UI Automator does not depend on a +particular screen resolution, and so is more portable across different devices. +It also gives better control and can potentially be faster for ling UI +manipulations, as input events are scripted based on the available UI elements, +rather than generated by human input. + +On the other hand, revent can be used to manipulate pretty much any workload, +where as UI Automator only works for Android UI elements (such as text boxes or +radio buttons), which makes the latter useless for things like games. Recording +revent sequence is also faster than writing automation code (on the other hand, +one would need maintain a different revent log for each screen resolution). diff --git a/doc/source/wa-execution.png b/doc/source/wa-execution.png new file mode 100644 index 00000000..9bdea6fd Binary files /dev/null and b/doc/source/wa-execution.png differ diff --git a/doc/source/writing_extensions.rst b/doc/source/writing_extensions.rst new file mode 100644 index 00000000..737a1166 --- /dev/null +++ b/doc/source/writing_extensions.rst @@ -0,0 +1,956 @@ +================== +Writing Extensions +================== + +Workload Automation offers several extension points (or plugin types).The most +interesting of these are + +:workloads: These are the tasks that get executed and measured on the device. These + can be benchmarks, high-level use cases, or pretty much anything else. +:devices: These are interfaces to the physical devices (development boards or end-user + devices, such as smartphones) that use cases run on. Typically each model of a + physical device would require it's own interface class (though some functionality + may be reused by subclassing from an existing base). +:instruments: Instruments allow collecting additional data from workload execution (e.g. + system traces). Instruments are not specific to a particular Workload. Instruments + can hook into any stage of workload execution. +:result processors: These are used to format the results of workload execution once they have been + collected. Depending on the callback used, these will run either after each + iteration or at the end of the run, after all of the results have been + collected. + +You create an extension by subclassing the appropriate base class, defining +appropriate methods and attributes, and putting the .py file with the class into +an appropriate subdirectory under ``~/.workload_automation`` (there is one for +each extension type). + + +Extension Basics +================ + +This sub-section covers things common to implementing extensions of all types. +It is recommended you familiarize yourself with the information here before +proceeding onto guidance for specific extension types. + +To create an extension, you basically subclass an appropriate base class and them +implement the appropriate methods + +The Context +----------- + +The majority of methods in extensions accept a context argument. This is an +instance of :class:`wlauto.core.execution.ExecutionContext`. If contains +of information about current state of execution of WA and keeps track of things +like which workload is currently running and the current iteration. + +Notable attributes of the context are + +context.spec + the current workload specification being executed. This is an + instance of :class:`wlauto.core.configuration.WorkloadRunSpec` + and defines the workload and the parameters under which it is + being executed. + +context.workload + ``Workload`` object that is currently being executed. + +context.current_iteration + The current iteration of the spec that is being executed. Note that this + is the iteration for that spec, i.e. the number of times that spec has + been run, *not* the total number of all iterations have been executed so + far. + +context.result + This is the result object for the current iteration. This is an instance + of :class:`wlauto.core.result.IterationResult`. It contains the status + of the iteration as well as the metrics and artifacts generated by the + workload and enable instrumentation. + +context.device + The device interface object that can be used to interact with the + device. Note that workloads and instruments have their own device + attribute and they should be using that instead. + +In addition to these, context also defines a few useful paths (see below). + + +Paths +----- + +You should avoid using hard-coded absolute paths in your extensions whenever +possible, as they make your code too dependent on a particular environment and +may mean having to make adjustments when moving to new (host and/or device) +platforms. To help avoid hard-coded absolute paths, WA automation defines +a number of standard locations. You should strive to define your paths relative +to one of those. + +On the host +~~~~~~~~~~~ + +Host paths are available through the context object, which is passed to most +extension methods. + +context.run_output_directory + This is the top-level output directory for all WA results (by default, + this will be "wa_output" in the directory in which WA was invoked. + +context.output_directory + This is the output directory for the current iteration. This will an + iteration-specific subdirectory under the main results location. If + there is no current iteration (e.g. when processing overall run results) + this will point to the same location as ``root_output_directory``. + +context.host_working_directory + This an addition location that may be used by extensions to store + non-iteration specific intermediate files (e.g. configuration). + +Additionally, the global ``wlauto.settings`` object exposes on other location: + +settings.dependency_directory + this is the root directory for all extension dependencies (e.g. media + files, assets etc) that are not included within the extension itself. + +As per Python best practice, it is recommended that methods and values in +``os.path`` standard library module are used for host path manipulation. + +On the device +~~~~~~~~~~~~~ + +Workloads and instruments have a ``device`` attribute, which is an interface to +the device used by WA. It defines the following location: + +device.working_directory + This is the directory for all WA-related files on the device. All files + deployed to the device should be pushed to somewhere under this location + (the only exception being executables installed with ``device.install`` + method). + +Since there could be a mismatch between path notation used by the host and the +device, the ``os.path`` modules should *not* be used for on-device path +manipulation. Instead device has an equipment module exposed through +``device.path`` attribute. This has all the same attributes and behaves the +same way as ``os.path``, but is guaranteed to produce valid paths for the device, +irrespective of the host's path notation. + +.. note:: result processors, unlike workloads and instruments, do not have their + own device attribute; however they can access the device through the + context. + + +Parameters +---------- + +All extensions can be parameterized. Parameters are specified using +``parameters`` class attribute. This should be a list of +:class:`wlauto.core.Parameter` instances. The following attributes can be +specified on parameter creation: + +name + This is the only mandatory argument. The name will be used to create a + corresponding attribute in the extension instance, so it must be a valid + Python identifier. + +kind + This is the type of the value of the parameter. This could be an + callable. Normally this should be a standard Python type, e.g. ``int` + or ``float``, or one the types defined in :mod:`wlauto.utils.types`. + If not explicitly specified, this will default to ``str``. + + .. note:: Irrespective of the ``kind`` specified, ``None`` is always a + valid value for a parameter. If you don't want to allow + ``None``, then set ``mandatory`` (see below) to ``True``. + +allowed_values + A list of the only allowed values for this parameter. + + .. note:: For composite types, such as ``list_of_strings`` or + ``list_of_ints`` in :mod:`wlauto.utils.types`, each element of + the value will be checked against ``allowed_values`` rather + than the composite value itself. + +default + The default value to be used for this parameter if one has not been + specified by the user. Defaults to ``None``. + +mandatory + A ``bool`` indicating whether this parameter is mandatory. Setting this + to ``True`` will make ``None`` an illegal value for the parameter. + Defaults to ``False``. + + .. note:: Specifying a ``default`` will mean that this parameter will, + effectively, be ignored (unless the user sets the param to ``None``). + + .. note:: Mandatory parameters are *bad*. If at all possible, you should + strive to provide a sensible ``default`` or to make do without + the parameter. Only when the param is absolutely necessary, + and there really is no sensible default that could be given + (e.g. something like login credentials), should you consider + making it mandatory. + +constraint + This is an additional constraint to be enforced on the parameter beyond + its type or fixed allowed values set. This should be a predicate (a function + that takes a single argument -- the user-supplied value -- and returns + a ``bool`` indicating whether the constraint has been satisfied). + +override + A parameter name must be unique not only within an extension but also + with that extension's class hierarchy. If you try to declare a parameter + with the same name as already exists, you will get an error. If you do + want to override a parameter from further up in the inheritance + hierarchy, you can indicate that by setting ``override`` attribute to + ``True``. + + When overriding, you do not need to specify every other attribute of the + parameter, just the ones you what to override. Values for the rest will + be taken from the parameter in the base class. + + +Validation and cross-parameter constraints +------------------------------------------ + +An extension will get validated at some point after constructions. When exactly +this occurs depends on the extension type, but it *will* be validated before it +is used. + +You can implement ``validate`` method in your extension (that takes no arguments +beyond the ``self``) to perform any additions *internal* validation in your +extension. By "internal", I mean that you cannot make assumptions about the +surrounding environment (e.g. that the device has been initialized). + +The contract for ``validate`` method is that it should raise an exception +(either ``wlauto.exceptions.ConfigError`` or extension-specific exception type -- see +further on this page) if some validation condition has not, and cannot, been met. +If the method returns without raising an exception, then the extension is in a +valid internal state. + +Note that ``validate`` can be used not only to verify, but also to impose a +valid internal state. In particular, this where cross-parameter constraints can +be resolved. If the ``default`` or ``allowed_values`` of one parameter depend on +another parameter, there is no way to express that declaratively when specifying +the parameters. In that case the dependent attribute should be left unspecified +on creation and should instead be set inside ``validate``. + +Logging +------- + +Every extension class has it's own logger that you can access through +``self.logger`` inside the extension's methods. Generally, a :class:`Device` will log +everything it is doing, so you shouldn't need to add much additional logging in +your expansion's. But you might what to log additional information, e.g. +what settings your extension is using, what it is doing on the host, etc. +Operations on the host will not normally be logged, so your extension should +definitely log what it is doing on the host. One situation in particular where +you should add logging is before doing something that might take a significant amount +of time, such as downloading a file. + + +Documenting +----------- + +All extensions and their parameter should be documented. For extensions +themselves, this is done through ``description`` class attribute. The convention +for an extension description is that the first paragraph should be a short +summary description of what the extension does and why one would want to use it +(among other things, this will get extracted and used by ``wa list`` command). +Subsequent paragraphs (separated by blank lines) can then provide a more +detailed description, including any limitations and setup instructions. + +For parameters, the description is passed as an argument on creation. Please +note that if ``default``, ``allowed_values``, or ``constraint``, are set in the +parameter, they do not need to be explicitly mentioned in the description (wa +documentation utilities will automatically pull those). If the ``default`` is set +in ``validate`` or additional cross-parameter constraints exist, this *should* +be documented in the parameter description. + +Both extensions and their parameters should be documented using reStructureText +markup (standard markup for Python documentation). See: + +http://docutils.sourceforge.net/rst.html + +Aside from that, it is up to you how you document your extension. You should try +to provide enough information so that someone unfamiliar with your extension is +able to use it, e.g. you should document all settings and parameters your +extension expects (including what the valid value are). + + +Error Notification +------------------ + +When you detect an error condition, you should raise an appropriate exception to +notify the user. The exception would typically be :class:`ConfigError` or +(depending the type of the extension) +:class:`WorkloadError`/:class:`DeviceError`/:class:`InstrumentError`/:class:`ResultProcessorError`. +All these errors are defined in :mod:`wlauto.exception` module. + +:class:`ConfigError` should be raised where there is a problem in configuration +specified by the user (either through the agenda or config files). These errors +are meant to be resolvable by simple adjustments to the configuration (and the +error message should suggest what adjustments need to be made. For all other +errors, such as missing dependencies, mis-configured environment, problems +performing operations, etc., the extension type-specific exceptions should be +used. + +If the extension itself is capable of recovering from the error and carrying +on, it may make more sense to log an ERROR or WARNING level message using the +extension's logger and to continue operation. + + +Utils +----- + +Workload Automation defines a number of utilities collected under +:mod:`wlauto.utils` subpackage. These utilities were created to help with the +implementation of the framework itself, but may be also be useful when +implementing extensions. + + +Adding a Workload +================= + +.. note:: You can use ``wa create workload [name]`` script to generate a new workload + structure for you. This script can also create the boilerplate for + UI automation, if your workload needs it. See ``wa create -h`` for more + details. + +New workloads can be added by subclassing :class:`wlauto.core.workload.Workload` + + +The Workload class defines the following interface:: + + class Workload(Extension): + + name = None + + def init_resources(self, context): + pass + + def setup(self, context): + raise NotImplementedError() + + def run(self, context): + raise NotImplementedError() + + def update_result(self, context): + raise NotImplementedError() + + def teardown(self, context): + raise NotImplementedError() + + def validate(self): + pass + +.. note:: Please see :doc:`conventions` section for notes on how to interpret + this. + +The interface should be implemented as follows + + :name: This identifies the workload (e.g. it used to specify it in the + agenda_. + :init_resources: This method may be optionally override to implement dynamic + resource discovery for the workload. + **Added in version 2.1.3** + :setup: Everything that needs to be in place for workload execution should + be done in this method. This includes copying files to the device, + starting up an application, configuring communications channels, + etc. + :run: This method should perform the actual task that is being measured. + When this method exits, the task is assumed to be complete. + + .. note:: Instrumentation is kicked off just before calling this + method and is disabled right after, so everything in this + method is being measured. Therefore this method should + contain the least code possible to perform the operations + you are interested in measuring. Specifically, things like + installing or starting applications, processing results, or + copying files to/from the device should be done elsewhere if + possible. + + :update_result: This method gets invoked after the task execution has + finished and should be used to extract metrics and add them + to the result (see below). + :teardown: This could be used to perform any cleanup you may wish to do, + e.g. Uninstalling applications, deleting file on the device, etc. + + :validate: This method can be used to validate any assumptions your workload + makes about the environment (e.g. that required files are + present, environment variables are set, etc) and should raise + a :class:`wlauto.exceptions.WorkloadError` if that is not the + case. The base class implementation only makes sure sure that + the name attribute has been set. + +.. _agenda: agenda.html + +Workload methods (except for ``validate``) take a single argument that is a +:class:`wlauto.core.execution.ExecutionContext` instance. This object keeps +track of the current execution state (such as the current workload, iteration +number, etc), and contains, among other things, a +:class:`wlauto.core.workload.WorkloadResult` instance that should be populated +from the ``update_result`` method with the results of the execution. :: + + # ... + + def update_result(self, context): + # ... + context.result.add_metric('energy', 23.6, 'Joules', lower_is_better=True) + + # ... + +Example +------- + +This example shows a simple workload that times how long it takes to compress a +file of a particular size on the device. + +.. note:: This is intended as an example of how to implement the Workload + interface. The methodology used to perform the actual measurement is + not necessarily sound, and this Workload should not be used to collect + real measurements. + +.. code-block:: python + + import os + from wlauto import Workload, Parameter + + class ZiptestWorkload(Workload): + + name = 'ziptest' + description = ''' + Times how long it takes to gzip a file of a particular size on a device. + + This workload was created for illustration purposes only. It should not be + used to collect actual measurements. + + ''' + + parameters = [ + Parameter('file_size', kind=int, default=2000000, + description='Size of the file (in bytes) to be gzipped.') + ] + + def setup(self, context): + # Generate a file of the specified size containing random garbage. + host_infile = os.path.join(context.output_directory, 'infile') + command = 'openssl rand -base64 {} > {}'.format(self.file_size, host_infile) + os.system(command) + # Set up on-device paths + devpath = self.device.path # os.path equivalent for the device + self.device_infile = devpath.join(self.device.working_directory, 'infile') + self.device_outfile = devpath.join(self.device.working_directory, 'outfile') + # Push the file to the device + self.device.push_file(host_infile, self.device_infile) + + def run(self, context): + self.device.execute('cd {} && (time gzip {}) &>> {}'.format(self.device.working_directory, + self.device_infile, + self.device_outfile)) + + def update_result(self, context): + # Pull the results file to the host + host_outfile = os.path.join(context.output_directory, 'outfile') + self.device.pull_file(self.device_outfile, host_outfile) + # Extract metrics form the file's contents and update the result + # with them. + content = iter(open(host_outfile).read().strip().split()) + for value, metric in zip(content, content): + mins, secs = map(float, value[:-1].split('m')) + context.result.add_metric(metric, secs + 60 * mins) + + def teardown(self, context): + # Clean up on-device file. + self.device.delete_file(self.device_infile) + self.device.delete_file(self.device_outfile) + + + +.. _GameWorkload: + +Adding revent-dependent Workload: +--------------------------------- + +:class:`wlauto.common.game.GameWorkload` is the base class for all the workloads +that depend on :ref:`revent_files_creation` files. It implements all the methods +needed to push the files to the device and run them. New GameWorkload can be +added by subclassing :class:`wlauto.common.game.GameWorkload`: + +The GameWorkload class defines the following interface:: + + class GameWorkload(Workload): + + name = None + package = None + activity = None + +The interface should be implemented as follows + + :name: This identifies the workload (e.g. it used to specify it in the + agenda_. + :package: This is the name of the '.apk' package without its file extension. + :activity: The name of the main activity that runs the package. + +Example: +-------- + +This example shows a simple GameWorkload that plays a game. + +.. code-block:: python + + from wlauto.common.game import GameWorkload + + class MyGame(GameWorkload): + + name = 'mygame' + package = 'com.mylogo.mygame' + activity = 'myActivity.myGame' + +Convention for Naming revent Files for :class:`wlauto.common.game.GameWorkload` +------------------------------------------------------------------------------- + +There is a convention for naming revent files which you should follow if you +want to record your own revent files. Each revent file must start with the +device name(case sensitive) then followed by a dot '.' then the stage name +then '.revent'. All your custom revent files should reside at +'~/.workload_automation/dependencies/WORKLOAD NAME/'. These are the current +supported stages: + + :setup: This stage is where the game is loaded. It is a good place to + record revent here to modify the game settings and get it ready + to start. + :run: This stage is where the game actually starts. This will allow for + more accurate results if the revent file for this stage only + records the game being played. + +For instance, to add a custom revent files for a device named mydevice and +a workload name mygame, you create a new directory called mygame in +'~/.workload_automation/dependencies/'. Then you add the revent files for +the stages you want in ~/.workload_automation/dependencies/mygame/:: + + mydevice.setup.revent + mydevice.run.revent + +Any revent file in the dependencies will always overwrite the revent file in the +workload directory. So it is possible for example to just provide one revent for +setup in the dependencies and use the run.revent that is in the workload directory. + +Adding an Instrument +==================== + +Instruments can be used to collect additional measurements during workload +execution (e.g. collect power readings). An instrument can hook into almost any +stage of workload execution. A typical instrument would implement a subset of +the following interface:: + + class Instrument(Extension): + + name = None + description = None + + parameters = [ + ] + + def initialize(self, context): + pass + + def setup(self, context): + pass + + def start(self, context): + pass + + def stop(self, context): + pass + + def update_result(self, context): + pass + + def teardown(self, context): + pass + + def finalize(self, context): + pass + +This is similar to a Workload, except all methods are optional. In addition to +the workload-like methods, instruments can define a number of other methods that +will get invoked at various points during run execution. The most useful of +which is perhaps ``initialize`` that gets invoked after the device has been +initialised for the first time, and can be used to perform one-time setup (e.g. +copying files to the device -- there is no point in doing that for each +iteration). The full list of available methods can be found in +:ref:`Signals Documentation `. + + +Prioritization +-------------- + +Callbacks (e.g. ``setup()`` methods) for all instrumentation get executed at the +same point during workload execution, one after another. The order in which the +callbacks get invoked should be considered arbitrary and should not be relied +on (e.g. you cannot expect that just because instrument A is listed before +instrument B in the config, instrument A's callbacks will run first). + +In some cases (e.g. in ``start()`` and ``stop()`` methods), it is important to +ensure that a particular instrument's callbacks run a closely as possible to the +workload's invocations in order to maintain accuracy of readings; or, +conversely, that a callback is executed after the others, because it takes a +long time and may throw off the accuracy of other instrumentation. You can do +this by prepending ``fast_`` or ``slow_`` to your callbacks' names. For +example:: + + class PreciseInstrument(Instument): + + # ... + + def fast_start(self, context): + pass + + def fast_stop(self, context): + pass + + # ... + +``PreciseInstrument`` will be started after all other instrumentation (i.e. +*just* before the workload runs), and it will stopped before all other +instrumentation (i.e. *just* after the workload runs). It is also possible to +use ``very_fast_`` and ``very_slow_`` prefixes when you want to be really +sure that your callback will be the last/first to run. + +If more than one active instrument have specified fast (or slow) callbacks, then +their execution order with respect to each other is not guaranteed. In general, +having a lot of instrumentation enabled is going to necessarily affect the +readings. The best way to ensure accuracy of measurements is to minimize the +number of active instruments (perhaps doing several identical runs with +different instruments enabled). + +Example +------- + +Below is a simple instrument that measures the execution time of a workload:: + + class ExecutionTimeInstrument(Instrument): + """ + Measure how long it took to execute the run() methods of a Workload. + + """ + + name = 'execution_time' + + def initialize(self, context): + self.start_time = None + self.end_time = None + + def fast_start(self, context): + self.start_time = time.time() + + def fast_stop(self, context): + self.end_time = time.time() + + def update_result(self, context): + execution_time = self.end_time - self.start_time + context.result.add_metric('execution_time', execution_time, 'seconds') + + +Adding a Result Processor +========================= + +A result processor is responsible for processing the results. This may +involve formatting and writing them to a file, uploading them to a database, +generating plots, etc. WA comes with a few result processors that output +results in a few common formats (such as csv or JSON). + +You can add your own result processors by creating a Python file in +``~/.workload_automation/result_processors`` with a class that derives from +:class:`wlauto.core.result.ResultProcessor`, which has the following interface:: + + class ResultProcessor(Extension): + + name = None + description = None + + parameters = [ + ] + + def initialize(self, context): + pass + + def process_iteration_result(self, result, context): + pass + + def export_iteration_result(self, result, context): + pass + + def process_run_result(self, result, context): + pass + + def export_run_result(self, result, context): + pass + + def finalize(self, context): + pass + + +The method names should be fairly self-explanatory. The difference between +"process" and "export" methods is that export methods will be invoke after +process methods for all result processors have been generated. Process methods +may generated additional artifacts (metrics, files, etc), while export methods +should not -- the should only handle existing results (upload them to a +database, archive on a filer, etc). + +The result object passed to iteration methods is an instance of +:class:`wlauto.core.result.IterationResult`, the result object passed to run +methods is an instance of :class:`wlauto.core.result.RunResult`. Please refer to +their API documentation for details. + +Example +------- + +Here is an example result processor that formats the results as a column-aligned +table:: + + import os + from wlauto import ResultProcessor + from wlauto.utils.misc import write_table + + + class Table(ResultProcessor): + + name = 'table' + description = 'Gerates a text file containing a column-aligned table with run results.' + + def process_run_result(self, result, context): + rows = [] + for iteration_result in result.iteration_results: + for metric in iteration_result.metrics: + rows.append([metric.name, str(metric.value), metric.units or '', + metric.lower_is_better and '-' or '+']) + + outfile = os.path.join(context.output_directory, 'table.txt') + with open(outfile, 'w') as wfh: + write_table(rows, wfh) + + +Adding a Resource Getter +======================== + +A resource getter is a new extension type added in version 2.1.3. A resource +getter implement a method of acquiring resources of a particular type (such as +APK files or additional workload assets). Resource getters are invoked in +priority order until one returns the desired resource. + +If you want WA to look for resources somewhere it doesn't by default (e.g. you +have a repository of APK files), you can implement a getter for the resource and +register it with a higher priority than the standard WA getters, so that it gets +invoked first. + +Instances of a resource getter should implement the following interface:: + + class ResourceGetter(Extension): + + name = None + resource_type = None + priority = GetterPriority.environment + + def get(self, resource, **kwargs): + raise NotImplementedError() + +The getter should define a name (as with all extensions), a resource +type, which should be a string, e.g. ``'jar'``, and a priority (see `Getter +Prioritization`_ below). In addition, ``get`` method should be implemented. The +first argument is an instance of :class:`wlauto.core.resource.Resource` +representing the resource that should be obtained. Additional keyword +arguments may be used by the invoker to provide additional information about +the resource. This method should return an instance of the resource that +has been discovered (what "instance" means depends on the resource, e.g. it +could be a file path), or ``None`` if this getter was unable to discover +that resource. + +Getter Prioritization +--------------------- + +A priority is an integer with higher numeric values indicating a higher +priority. The following standard priority aliases are defined for getters: + + + :cached: The cached version of the resource. Look here first. This priority also implies + that the resource at this location is a "cache" and is not the only version of the + resource, so it may be cleared without losing access to the resource. + :preferred: Take this resource in favour of the environment resource. + :environment: Found somewhere under ~/.workload_automation/ or equivalent, or + from environment variables, external configuration files, etc. + These will override resource supplied with the package. + :package: Resource provided with the package. + :remote: Resource will be downloaded from a remote location (such as an HTTP server + or a samba share). Try this only if no other getter was successful. + +These priorities are defined as class members of +:class:`wlauto.core.resource.GetterPriority`, e.g. ``GetterPriority.cached``. + +Most getters in WA will be registered with either ``environment`` or +``package`` priorities. So if you want your getter to override the default, it +should typically be registered as ``preferred``. + +You don't have to stick to standard priority levels (though you should, unless +there is a good reason). Any integer is a valid priority. The standard priorities +range from -20 to 20 in increments of 10. + +Example +------- + +The following is an implementation of a getter for a workload APK file that +looks for the file under +``~/.workload_automation/dependencies/``:: + + import os + import glob + + from wlauto import ResourceGetter, GetterPriority, settings + from wlauto.exceptions import ResourceError + + + class EnvironmentApkGetter(ResourceGetter): + + name = 'environment_apk' + resource_type = 'apk' + priority = GetterPriority.environment + + def get(self, resource): + resource_dir = _d(os.path.join(settings.dependency_directory, resource.owner.name)) + version = kwargs.get('version') + found_files = glob.glob(os.path.join(resource_dir, '*.apk')) + if version: + found_files = [ff for ff in found_files if version.lower() in ff.lower()] + if len(found_files) == 1: + return found_files[0] + elif not found_files: + return None + else: + raise ResourceError('More than one .apk found in {} for {}.'.format(resource_dir, + resource.owner.name)) + +.. _adding_a_device: + +Adding a Device +=============== + +At the moment, only Android devices are supported. Most of the functionality for +interacting with a device is implemented in +:class:`wlauto.common.AndroidDevice` and is exposed through ``generic_android`` +device interface, which should suffice for most purposes. The most common area +where custom functionality may need to be implemented is during device +initialization. Usually, once the device gets to the Android home screen, it's +just like any other Android device (modulo things like differences between +Android versions). + +If your device doesn't not work with ``generic_device`` interface and you need +to write a custom interface to handle it, you would do that by subclassing +``AndroidDevice`` and then just overriding the methods you need. Typically you +will want to override one or more of the following: + +reset + Trigger a device reboot. The default implementation just sends ``adb + reboot`` to the device. If this command does not work, an alternative + implementation may need to be provided. + +hard_reset + This is a harsher reset that involves cutting the power to a device + (e.g. holding down power button or removing battery from a phone). The + default implementation is a no-op that just sets some internal flags. If + you're dealing with unreliable prototype hardware that can crash and + become unresponsive, you may want to implement this in order for WA to + be able to recover automatically. + +connect + When this method returns, adb connection to the device has been + established. This gets invoked after a reset. The default implementation + just waits for the device to appear in the adb list of connected + devices. If this is not enough (e.g. your device is connected via + Ethernet and requires an explicit ``adb connect`` call), you may wish to + override this to perform the necessary actions before invoking the + ``AndroidDevice``\ s version. + +init + This gets called once at the beginning of the run once the connection to + the device has been established. There is no default implementation. + It's there to allow whatever custom initialisation may need to be + performed for the device (setting properties, configuring services, + etc). + +Please refer to the API documentation for :class:`wlauto.common.AndroidDevice` +for the full list of its methods and their functionality. + + +Other Extension Types +===================== + +In addition to extension types covered above, there are few other, more +specialized ones. They will not be covered in as much detail. Most of them +expose relatively simple interfaces with only a couple of methods and it is +expected that if the need arises to extend them, the API-level documentation +that accompanies them, in addition to what has been outlined here, should +provide enough guidance. + +:commands: This allows extending WA with additional sub-commands (to supplement + exiting ones outlined in the :ref:`invocation` section). +:modules: Modules are "extensions for extensions". They can be loaded by other + extensions to expand their functionality (for example, a flashing + module maybe loaded by a device in order to support flashing). + + +Packaging Your Extensions +========================= + +If your have written a bunch of extensions, and you want to make it easy to +deploy them to new systems and/or to update them on existing systems, you can +wrap them in a Python package. You can use ``wa create package`` command to +generate appropriate boiler plate. This will create a ``setup.py`` and a +directory for your package that you can place your extensions into. + +For example, if you have a workload inside ``my_workload.py`` and a result +processor in ``my_result_processor.py``, and you want to package them as +``my_wa_exts`` package, first run the create command :: + + wa create package my_wa_exts + +This will create a ``my_wa_exts`` directory which contains a +``my_wa_exts/setup.py`` and a subdirectory ``my_wa_exts/my_wa_exts`` which is +the package directory for your extensions (you can rename the top-level +``my_wa_exts`` directory to anything you like -- it's just a "container" for the +setup.py and the package directory). Once you have that, you can then copy your +extensions into the package directory, creating +``my_wa_exts/my_wa_exts/my_workload.py`` and +``my_wa_exts/my_wa_exts/my_result_processor.py``. If you have a lot of +extensions, you might want to organize them into subpackages, but only the +top-level package directory is created by default, and it is OK to have +everything in there. + +.. note:: When discovering extensions thorugh this mechanism, WA traveries the + Python module/submodule tree, not the directory strucuter, therefore, + if you are going to create subdirectories under the top level dictory + created for you, it is important that your make sure they are valid + Python packages; i.e. each subdirectory must contain a __init__.py + (even if blank) in order for the code in that directory and its + subdirectories to be discoverable. + +At this stage, you may want to edit ``params`` structure near the bottom of +the ``setup.py`` to add correct author, license and contact information (see +"Writing the Setup Script" section in standard Python documentation for +details). You may also want to add a README and/or a COPYING file at the same +level as the setup.py. Once you have the contents of your package sorted, +you can generate the package by running :: + + cd my_wa_exts + python setup.py sdist + +This will generate ``my_wa_exts/dist/my_wa_exts-0.0.1.tar.gz`` package which +can then be deployed on the target system with standard Python package +management tools, e.g. :: + + sudo pip install my_wa_exts-0.0.1.tar.gz + +As part of the installation process, the setup.py in the package, will write the +package's name into ``~/.workoad_automoation/packages``. This will tell WA that +the package contains extension and it will load them next time it runs. + +.. note:: There are no unistall hooks in ``setuputils``, so if you ever + uninstall your WA extensions package, you will have to manually remove + it from ``~/.workload_automation/packages`` otherwise WA will complain + abou a missing package next time you try to run it. diff --git a/extras/README b/extras/README new file mode 100644 index 00000000..9dbb3499 --- /dev/null +++ b/extras/README @@ -0,0 +1,12 @@ +This directory is intended for miscellaneous extra stuff that may be useful while developing +Workload Automation. It should *NOT* contain anything necessary for *using* workload automation. +Whenever you add something to this directory, please also add a short description of what it is in +this file. + +pylintrc + pylint configuration file set up for WA development (see comment at the top of the file + for how to use). + +walog.vim + Vim syntax file for WA logs; adds highlighting similar to what comes out + in the console. See comment in the file for how to enable it. diff --git a/extras/pylintrc b/extras/pylintrc new file mode 100644 index 00000000..99e2b8f1 --- /dev/null +++ b/extras/pylintrc @@ -0,0 +1,70 @@ +# +# pylint configuration for Workload Automation. +# +# To install pylint run +# +# sudo apt-get install pylint +# +# copy this file to ~/.pylintrc in order for pylint to pick it up. +# (Or alternatively, specify it with --rcfile option on invocation.) +# +# Note: If you're adding something to disable setting, please also add the +# explanation of the code in the comment above it. Messages should only +# be added here we really don't *ever* care about them. For ignoring +# messages on specific lines or in specific files, add the appropriate +# pylint disable clause in the source. +# +[MASTER] + +profile=no + +ignore=external + +[MESSAGES CONTROL] +# Disable the following messags: +# C0301: Line too long (%s/%s) +# C0103: Invalid name "%s" (should match %s) +# C0111: Missing docstring +# W0142 - Used * or ** magic +# R0903: Too few public methods +# R0904: Too many public methods +# R0922: Abstract class is only referenced 1 times +# W0511: TODO Note: this is disabled for a cleaner output, but should be reenabled +# occasionally (through command line argument) to make sure all +# TODO's are addressed, e.g. before a release. +# W0141: Used builtin function (map|filter) +# I0011: Locally disabling %s +# R0921: %s: Abstract class not referenced +# Note: this needs to be in the rc file due to a known bug in pylint: +# http://www.logilab.org/ticket/111138 +# W1401: nomalous-backslash-in-string, due to: +# https://bitbucket.org/logilab/pylint/issue/272/anomalous-backslash-in-string-for-raw +# C0330: bad continuation, due to: +# https://bitbucket.org/logilab/pylint/issue/232/wrong-hanging-indentation-false-positive +disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330 + +[FORMAT] +max-module-lines=4000 + +[DESIGN] + +# We have DeviceConfig classes that are basically just repositories of confuration +# settings. +max-args=30 +max-attributes=30 + + +[SIMILARITIES] + +min-similarity-lines=10 + +[REPORTS] + +output-format=colorized + +reports=no + +[IMPORTS] + +# Parts of string are not deprecated. Throws too many false positives. +deprecated-modules= diff --git a/extras/walog.vim b/extras/walog.vim new file mode 100644 index 00000000..a9b79617 --- /dev/null +++ b/extras/walog.vim @@ -0,0 +1,21 @@ +" Copy this into ~/.vim/syntax/ and add the following to your ~/.vimrc: +" au BufRead,BufNewFile run.log set filetype=walog +" +if exists("b:current_syntax") + finish +endif + +syn region debugPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d DEBUG' end=':' +syn region infoPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d INFO' end=':' +syn region warningPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d WARNING' end=':' +syn region errorPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d ERROR' end=':' +syn region critPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d CRITICAL' end=':' + +hi debugPreamble guifg=Blue ctermfg=DarkBlue +hi infoPreamble guifg=Green ctermfg=DarkGreen +hi warningPreamble guifg=Yellow ctermfg=178 +hi errorPreamble guifg=Red ctermfg=DarkRed +hi critPreamble guifg=Red ctermfg=DarkRed cterm=bold gui=bold + +let b:current_syntax='walog' + diff --git a/scripts/create_workload b/scripts/create_workload new file mode 100644 index 00000000..c32f9ce2 --- /dev/null +++ b/scripts/create_workload @@ -0,0 +1,17 @@ +#!/bin/bash +# $Copyright: +# ---------------------------------------------------------------- +# This confidential and proprietary software may be used only as +# authorised by a licensing agreement from ARM Limited +# (C) COPYRIGHT 2013 ARM Limited +# ALL RIGHTS RESERVED +# The entire notice above must be reproduced on all authorised +# copies and copies may only be made to the extent permitted +# by a licensing agreement from ARM Limited. +# ---------------------------------------------------------------- +# File: create_workload +# ---------------------------------------------------------------- +# $ +# +wa create workload $@ + diff --git a/scripts/list_extensions b/scripts/list_extensions new file mode 100644 index 00000000..08b65aad --- /dev/null +++ b/scripts/list_extensions @@ -0,0 +1,16 @@ +#!/bin/bash +# $Copyright: +# ---------------------------------------------------------------- +# This confidential and proprietary software may be used only as +# authorised by a licensing agreement from ARM Limited +# (C) COPYRIGHT 2013 ARM Limited +# ALL RIGHTS RESERVED +# The entire notice above must be reproduced on all authorised +# copies and copies may only be made to the extent permitted +# by a licensing agreement from ARM Limited. +# ---------------------------------------------------------------- +# File: list_extensions +# ---------------------------------------------------------------- +# $ +# +wa list $@ diff --git a/scripts/run_workloads b/scripts/run_workloads new file mode 100644 index 00000000..616f076c --- /dev/null +++ b/scripts/run_workloads @@ -0,0 +1,17 @@ +#!/bin/bash +# $Copyright: +# ---------------------------------------------------------------- +# This confidential and proprietary software may be used only as +# authorised by a licensing agreement from ARM Limited +# (C) COPYRIGHT 2013 ARM Limited +# ALL RIGHTS RESERVED +# The entire notice above must be reproduced on all authorised +# copies and copies may only be made to the extent permitted +# by a licensing agreement from ARM Limited. +# ---------------------------------------------------------------- +# File: run_workloads +# ---------------------------------------------------------------- +# $ +# +wa run $@ + diff --git a/scripts/wa b/scripts/wa new file mode 100644 index 00000000..a7942e5a --- /dev/null +++ b/scripts/wa @@ -0,0 +1,17 @@ +#!/usr/bin/env python +# $Copyright: +# ---------------------------------------------------------------- +# This confidential and proprietary software may be used only as +# authorised by a licensing agreement from ARM Limited +# (C) COPYRIGHT 2013 ARM Limited +# ALL RIGHTS RESERVED +# The entire notice above must be reproduced on all authorised +# copies and copies may only be made to the extent permitted +# by a licensing agreement from ARM Limited. +# ---------------------------------------------------------------- +# File: run_workloads +# ---------------------------------------------------------------- +# $ +# +from wlauto.core.entry_point import main +main() diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..4eb13f98 --- /dev/null +++ b/setup.py @@ -0,0 +1,96 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import sys +import warnings +from itertools import chain + +try: + from setuptools import setup +except ImportError: + from distutils.core import setup + +sys.path.insert(0, './wlauto/core/') +from version import get_wa_version + +# happends if falling back to distutils +warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'") +warnings.filterwarnings('ignore', "Unknown distribution option: 'extras_require'") + +try: + os.remove('MANIFEST') +except OSError: + pass + +packages = [] +data_files = {} +source_dir = os.path.dirname(__file__) +for root, dirs, files in os.walk('wlauto'): + rel_dir = os.path.relpath(root, source_dir) + data = [] + if '__init__.py' in files: + for f in files: + if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']: + data.append(f) + package_name = rel_dir.replace(os.sep, '.') + package_dir = root + packages.append(package_name) + data_files[package_name] = data + else: + # use previous package name + filepaths = [os.path.join(root, f) for f in files] + data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths]) + +scripts = [os.path.join('scripts', s) for s in os.listdir('scripts')] + +params = dict( + name='wlauto', + description='A framework for automating workload execution and measurment collection on ARM devices.', + version=get_wa_version(), + packages=packages, + package_data=data_files, + scripts=scripts, + url='N/A', + license='Apache v2', + maintainer='ARM Architecture & Technology Device Lab', + maintainer_email='workload-automation@arm.com', + install_requires=[ + 'python-dateutil', # converting between UTC and local time. + 'pexpect>=3.3', # Send/recieve to/from device + 'pyserial', # Serial port interface + 'colorama', # Printing with colors + 'pyYAML', # YAML-formatted agenda parsing + ], + extras_require={ + 'other': ['jinja2', 'pandas>=0.13.1'], + 'test': ['nose'], + 'mongodb': ['pymongo'], + 'doc': ['sphinx'], + }, + # https://pypi.python.org/pypi?%3Aaction=list_classifiers + classifiers=[ + 'Development Status :: 4 - Beta', + 'Environment :: Console', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: POSIX :: Linux', + 'Programming Language :: Python :: 2.7', + ], +) + +all_extras = list(chain(params['extras_require'].itervalues())) +params['extras_require']['everything'] = all_extras + +setup(**params) diff --git a/wlauto/__init__.py b/wlauto/__init__.py new file mode 100644 index 00000000..0e31686c --- /dev/null +++ b/wlauto/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from wlauto.core.bootstrap import settings # NOQA +from wlauto.core.device import Device, RuntimeParameter, CoreParameter # NOQA +from wlauto.core.command import Command # NOQA +from wlauto.core.workload import Workload # NOQA +from wlauto.core.extension import Module, Parameter, Artifact, Alias # NOQA +from wlauto.core.extension_loader import ExtensionLoader # NOQA +from wlauto.core.instrumentation import Instrument # NOQA +from wlauto.core.result import ResultProcessor, IterationResult # NOQA +from wlauto.core.resource import ResourceGetter, Resource, GetterPriority, NO_ONE # NOQA +from wlauto.core.exttype import get_extension_type # NOQA Note: MUST be imported after other core imports. + +from wlauto.common.resources import File, ExtensionAsset, Executable +from wlauto.common.linux.device import LinuxDevice # NOQA +from wlauto.common.android.device import AndroidDevice, BigLittleDevice # NOQA +from wlauto.common.android.resources import ApkFile, JarFile +from wlauto.common.android.workload import (UiAutomatorWorkload, ApkWorkload, AndroidBenchmark, # NOQA + AndroidUiAutoBenchmark, GameWorkload) # NOQA + +from wlauto.core.version import get_wa_version + +__version__ = get_wa_version() diff --git a/wlauto/agenda-example-biglittle.yaml b/wlauto/agenda-example-biglittle.yaml new file mode 100644 index 00000000..eea89213 --- /dev/null +++ b/wlauto/agenda-example-biglittle.yaml @@ -0,0 +1,79 @@ +# This agenda specifies configuration that may be used for regression runs +# on big.LITTLE systems. This agenda will with a TC2 device configured as +# described in the documentation. +config: + device: tc2 + run_name: big.LITTLE_regression +global: + iterations: 5 +sections: + - id: mp_a15only + boot_parameters: + os_mode: mp_a15_only + runtime_parameters: + a15_governor: interactive + a15_governor_tunables: + above_hispeed_delay: 20000 + - id: mp_a7bc + boot_parameters: + os_mode: mp_a7_bootcluster + runtime_parameters: + a7_governor: interactive + a7_min_frequency: 500000 + a7_governor_tunables: + above_hispeed_delay: 20000 + a15_governor: interactive + a15_governor_tunables: + above_hispeed_delay: 20000 + - id: mp_a15bc + boot_parameters: + os_mode: mp_a15_bootcluster + runtime_parameters: + a7_governor: interactive + a7_min_frequency: 500000 + a7_governor_tunables: + above_hispeed_delay: 20000 + a15_governor: interactive + a15_governor_tunables: + above_hispeed_delay: 20000 +workloads: + - id: b01 + name: andebench + workload_parameters: + number_of_threads: 5 + - id: b02 + name: andebench + label: andebenchst + workload_parameters: + number_of_threads: 1 + - id: b03 + name: antutu + label: antutu4.0.3 + workload_parameters: + version: 4.0.3 + - id: b04 + name: benchmarkpi + - id: b05 + name: caffeinemark + - id: b06 + name: cfbench + - id: b07 + name: geekbench + label: geekbench3 + workload_parameters: + version: 3 + - id: b08 + name: linpack + - id: b09 + name: quadrant + - id: b10 + name: smartbench + - id: b11 + name: sqlite + - id: b12 + name: vellamo + + - id: w01 + name: bbench_with_audio + - id: w02 + name: audio diff --git a/wlauto/agenda-example-tutorial.yaml b/wlauto/agenda-example-tutorial.yaml new file mode 100644 index 00000000..6eb2b9a1 --- /dev/null +++ b/wlauto/agenda-example-tutorial.yaml @@ -0,0 +1,43 @@ +# This an agenda that is built-up during the explantion of the agenda features +# in the documentation. This should work out-of-the box on most rooted Android +# devices. +config: + project: governor_comparison + run_name: performance_vs_interactive + + device: generic_android + reboot_policy: never + + instrumentation: [coreutil, cpufreq] + coreutil: + threshold: 80 + sysfs_extractor: + paths: [/proc/meminfo] + result_processors: [sqlite] + sqlite: + database: ~/my_wa_results.sqlite +global: + iterations: 5 +sections: + - id: perf + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance + - id: inter + runtime_params: + sysfile_values: + /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: interactive +workloads: + - id: 01_dhry + name: dhrystone + label: dhrystone_15over6 + workload_params: + threads: 6 + mloops: 15 + - id: 02_memc + name: memcpy + instrumentation: [sysfs_extractor] + - id: 03_cycl + name: cyclictest + iterations: 10 + diff --git a/wlauto/commands/__init__.py b/wlauto/commands/__init__.py new file mode 100644 index 00000000..16224d6f --- /dev/null +++ b/wlauto/commands/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + diff --git a/wlauto/commands/create.py b/wlauto/commands/create.py new file mode 100644 index 00000000..6db925c6 --- /dev/null +++ b/wlauto/commands/create.py @@ -0,0 +1,300 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import os +import stat +import string +import textwrap +import argparse +import shutil +import getpass + +from wlauto import ExtensionLoader, Command, settings +from wlauto.exceptions import CommandError +from wlauto.utils.cli import init_argument_parser +from wlauto.utils.misc import (capitalize, check_output, + ensure_file_directory_exists as _f, ensure_directory_exists as _d) +from wlauto.utils.types import identifier +from wlauto.utils.doc import format_body + + +__all__ = ['create_workload'] + + +TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates') + +UIAUTO_BUILD_SCRIPT = """#!/bin/bash + +class_dir=bin/classes/com/arm/wlauto/uiauto +base_class=`python -c "import os, wlauto; print os.path.join(os.path.dirname(wlauto.__file__), 'common', 'android', 'BaseUiAutomation.class')"` +mkdir -p $$class_dir +cp $$base_class $$class_dir + +ant build + +if [[ -f bin/${package_name}.jar ]]; then + cp bin/${package_name}.jar .. +fi +""" + + +class CreateSubcommand(object): + + name = None + help = None + usage = None + description = None + epilog = None + formatter_class = None + + def __init__(self, logger, subparsers): + self.logger = logger + self.group = subparsers + parser_params = dict(help=(self.help or self.description), usage=self.usage, + description=format_body(textwrap.dedent(self.description), 80), + epilog=self.epilog) + if self.formatter_class: + parser_params['formatter_class'] = self.formatter_class + self.parser = subparsers.add_parser(self.name, **parser_params) + init_argument_parser(self.parser) # propagate top-level options + self.initialize() + + def initialize(self): + pass + + +class CreateWorkloadSubcommand(CreateSubcommand): + + name = 'workload' + description = '''Create a new workload. By default, a basic workload template will be + used but you can use options to specify a different template.''' + + def initialize(self): + self.parser.add_argument('name', metavar='NAME', + help='Name of the workload to be created') + self.parser.add_argument('-p', '--path', metavar='PATH', default=None, + help='The location at which the workload will be created. If not specified, ' + + 'this defaults to "~/.workload_automation/workloads".') + self.parser.add_argument('-f', '--force', action='store_true', + help='Create the new workload even if a workload with the specified ' + + 'name already exists.') + + template_group = self.parser.add_mutually_exclusive_group() + template_group.add_argument('-A', '--android-benchmark', action='store_true', + help='Use android benchmark template. This template allows you to specify ' + + ' an APK file that will be installed and run on the device. You should ' + + ' place the APK file into the workload\'s directory at the same level ' + + 'as the __init__.py.') + template_group.add_argument('-U', '--ui-automation', action='store_true', + help='Use UI automation template. This template generates a UI automation ' + + 'Android project as well as the Python class. This a more general ' + + 'version of the android benchmark template that makes no assumptions ' + + 'about the nature of your workload, apart from the fact that you need ' + + 'UI automation. If you need to install an APK, start an app on device, ' + + 'etc., you will need to do that explicitly in your code.') + template_group.add_argument('-B', '--android-uiauto-benchmark', action='store_true', + help='Use android uiauto benchmark template. This generates a UI automation ' + + 'project as well as a Python class. This template should be used ' + + 'if you have a APK file that needs to be run on the device. You ' + + 'should place the APK file into the workload\'s directory at the ' + + 'same level as the __init__.py.') + + def execute(self, args): # pylint: disable=R0201 + where = args.path or 'local' + check_name = not args.force + + if args.android_benchmark: + kind = 'android' + elif args.ui_automation: + kind = 'uiauto' + elif args.android_uiauto_benchmark: + kind = 'android_uiauto' + else: + kind = 'basic' + + try: + create_workload(args.name, kind, where, check_name) + except CommandError, e: + print "ERROR:", e + + +class CreatePackageSubcommand(CreateSubcommand): + + name = 'package' + description = '''Create a new empty Python package for WA extensions. On installation, + this package will "advertise" itself to WA so that Extensions with in it will + be loaded by WA when it runs.''' + + def initialize(self): + self.parser.add_argument('name', metavar='NAME', + help='Name of the package to be created') + self.parser.add_argument('-p', '--path', metavar='PATH', default=None, + help='The location at which the new pacakge will be created. If not specified, ' + + 'current working directory will be used.') + self.parser.add_argument('-f', '--force', action='store_true', + help='Create the new package even if a file or directory with the same name ' + 'already exists at the specified location.') + + def execute(self, args): # pylint: disable=R0201 + package_dir = args.path or os.path.abspath('.') + template_path = os.path.join(TEMPLATES_DIR, 'setup.template') + self.create_extensions_package(package_dir, args.name, template_path, args.force) + + def create_extensions_package(self, location, name, setup_template_path, overwrite=False): + package_path = os.path.join(location, name) + if os.path.exists(package_path): + if overwrite: + self.logger.info('overwriting existing "{}"'.format(package_path)) + shutil.rmtree(package_path) + else: + raise CommandError('Location "{}" already exists.'.format(package_path)) + actual_package_path = os.path.join(package_path, name) + os.makedirs(actual_package_path) + setup_text = render_template(setup_template_path, {'package_name': name, 'user': getpass.getuser()}) + with open(os.path.join(package_path, 'setup.py'), 'w') as wfh: + wfh.write(setup_text) + touch(os.path.join(actual_package_path, '__init__.py')) + + +class CreateCommand(Command): + + name = 'create' + description = '''Used to create various WA-related objects (see positional arguments list for what + objects may be created).\n\nUse "wa create -h" for object-specific arguments.''' + formatter_class = argparse.RawDescriptionHelpFormatter + subcmd_classes = [CreateWorkloadSubcommand, CreatePackageSubcommand] + + def initialize(self): + subparsers = self.parser.add_subparsers(dest='what') + self.subcommands = [] # pylint: disable=W0201 + for subcmd_cls in self.subcmd_classes: + subcmd = subcmd_cls(self.logger, subparsers) + self.subcommands.append(subcmd) + + def execute(self, args): + for subcmd in self.subcommands: + if subcmd.name == args.what: + subcmd.execute(args) + break + else: + raise CommandError('Not a valid create parameter: {}'.format(args.name)) + + +def create_workload(name, kind='basic', where='local', check_name=True, **kwargs): + if check_name: + extloader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths) + if name in [wl.name for wl in extloader.list_workloads()]: + raise CommandError('Workload with name "{}" already exists.'.format(name)) + + class_name = get_class_name(name) + if where == 'local': + workload_dir = _d(os.path.join(settings.environment_root, 'workloads', name)) + else: + workload_dir = _d(os.path.join(where, name)) + + if kind == 'basic': + create_basic_workload(workload_dir, name, class_name, **kwargs) + elif kind == 'uiauto': + create_uiautomator_workload(workload_dir, name, class_name, **kwargs) + elif kind == 'android': + create_android_benchmark(workload_dir, name, class_name, **kwargs) + elif kind == 'android_uiauto': + create_android_uiauto_benchmark(workload_dir, name, class_name, **kwargs) + else: + raise CommandError('Unknown workload type: {}'.format(kind)) + + print 'Workload created in {}'.format(workload_dir) + + +def create_basic_workload(path, name, class_name): + source_file = os.path.join(path, '__init__.py') + with open(source_file, 'w') as wfh: + wfh.write(render_template('basic_workload', {'name': name, 'class_name': class_name})) + + +def create_uiautomator_workload(path, name, class_name): + uiauto_path = _d(os.path.join(path, 'uiauto')) + create_uiauto_project(uiauto_path, name) + source_file = os.path.join(path, '__init__.py') + with open(source_file, 'w') as wfh: + wfh.write(render_template('uiauto_workload', {'name': name, 'class_name': class_name})) + + +def create_android_benchmark(path, name, class_name): + source_file = os.path.join(path, '__init__.py') + with open(source_file, 'w') as wfh: + wfh.write(render_template('android_benchmark', {'name': name, 'class_name': class_name})) + + +def create_android_uiauto_benchmark(path, name, class_name): + uiauto_path = _d(os.path.join(path, 'uiauto')) + create_uiauto_project(uiauto_path, name) + source_file = os.path.join(path, '__init__.py') + with open(source_file, 'w') as wfh: + wfh.write(render_template('android_uiauto_benchmark', {'name': name, 'class_name': class_name})) + + +def create_uiauto_project(path, name, target='1'): + sdk_path = get_sdk_path() + android_path = os.path.join(sdk_path, 'tools', 'android') + package_name = 'com.arm.wlauto.uiauto.' + name.lower() + + # ${ANDROID_HOME}/tools/android create uitest-project -n com.arm.wlauto.uiauto.linpack -t 1 -p ../test2 + command = '{} create uitest-project --name {} --target {} --path {}'.format(android_path, + package_name, + target, + path) + check_output(command, shell=True) + + build_script = os.path.join(path, 'build.sh') + with open(build_script, 'w') as wfh: + template = string.Template(UIAUTO_BUILD_SCRIPT) + wfh.write(template.substitute({'package_name': package_name})) + os.chmod(build_script, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + + source_file = _f(os.path.join(path, 'src', + os.sep.join(package_name.split('.')[:-1]), + 'UiAutomation.java')) + with open(source_file, 'w') as wfh: + wfh.write(render_template('UiAutomation.java', {'name': name, 'package_name': package_name})) + + +# Utility functions + +def get_sdk_path(): + sdk_path = os.getenv('ANDROID_HOME') + if not sdk_path: + raise CommandError('Please set ANDROID_HOME environment variable to point to ' + + 'the locaton of Android SDK') + return sdk_path + + +def get_class_name(name, postfix=''): + name = identifier(name) + return ''.join(map(capitalize, name.split('_'))) + postfix + + +def render_template(name, params): + filepath = os.path.join(TEMPLATES_DIR, name) + with open(filepath) as fh: + text = fh.read() + template = string.Template(text) + return template.substitute(params) + + +def touch(path): + with open(path, 'w') as wfh: # pylint: disable=unused-variable + pass diff --git a/wlauto/commands/list.py b/wlauto/commands/list.py new file mode 100644 index 00000000..0ffba3fa --- /dev/null +++ b/wlauto/commands/list.py @@ -0,0 +1,59 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from wlauto import ExtensionLoader, Command, settings +from wlauto.utils.formatter import DescriptionListFormatter +from wlauto.utils.doc import get_summary + + +class ListCommand(Command): + + name = 'list' + description = 'List available WA extensions with a short description of each.' + + def initialize(self): + extension_types = ['{}s'.format(ext.name) for ext in settings.extensions] + self.parser.add_argument('kind', metavar='KIND', + help=('Specify the kind of extension to list. Must be ' + 'one of: {}'.format(', '.join(extension_types))), + choices=extension_types) + self.parser.add_argument('-n', '--name', help='Filter results by the name specified') + + def execute(self, args): + filters = {} + if args.name: + filters['name'] = args.name + + ext_loader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths) + results = ext_loader.list_extensions(args.kind[:-1]) + if filters: + filtered_results = [] + for result in results: + passed = True + for k, v in filters.iteritems(): + if getattr(result, k) != v: + passed = False + break + if passed: + filtered_results.append(result) + else: # no filters specified + filtered_results = results + + if filtered_results: + output = DescriptionListFormatter() + for result in sorted(filtered_results, key=lambda x: x.name): + output.add_item(get_summary(result), result.name) + print output.format_data() diff --git a/wlauto/commands/run.py b/wlauto/commands/run.py new file mode 100644 index 00000000..192d013a --- /dev/null +++ b/wlauto/commands/run.py @@ -0,0 +1,87 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import os +import sys +import shutil + +import wlauto +from wlauto import Command, settings +from wlauto.core.agenda import Agenda +from wlauto.core.execution import Executor +from wlauto.utils.log import add_log_file + + +class RunCommand(Command): + + name = 'run' + description = 'Execute automated workloads on a remote device and process the resulting output.' + + def initialize(self): + self.parser.add_argument('agenda', metavar='AGENDA', + help='Agenda for this workload automation run. This defines which workloads will ' + + 'be executed, how many times, with which tunables, etc. ' + + 'See example agendas in {} '.format(os.path.dirname(wlauto.__file__)) + + 'for an example of how this file should be structured.') + self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None, + help='Specify a directory where the output will be generated. If the directory' + + 'already exists, the script will abort unless -f option (see below) is used,' + + 'in which case the contents of the directory will be overwritten. If this option' + + 'is not specified, then {} will be used instead.'.format(settings.output_directory)) + self.parser.add_argument('-f', '--force', action='store_true', + help='Overwrite output directory if it exists. By default, the script will abort in this' + + 'situation to prevent accidental data loss.') + self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID', + help='Specify a workload spec ID from an agenda to run. If this is specified, only that particular ' + + 'spec will be run, and other workloads in the agenda will be ignored. This option may be used to ' + + 'specify multiple IDs.') + + def execute(self, args): # NOQA + self.set_up_output_directory(args) + add_log_file(settings.log_file) + + if os.path.isfile(args.agenda): + agenda = Agenda(args.agenda) + settings.agenda = args.agenda + shutil.copy(args.agenda, settings.meta_directory) + else: + self.logger.debug('{} is not a file; assuming workload name.'.format(args.agenda)) + agenda = Agenda() + agenda.add_workload_entry(args.agenda) + + file_name = 'config_{}.py' + for file_number, path in enumerate(settings.get_config_paths(), 1): + shutil.copy(path, os.path.join(settings.meta_directory, file_name.format(file_number))) + + executor = Executor() + executor.execute(agenda, selectors={'ids': args.only_run_ids}) + + def set_up_output_directory(self, args): + if args.output_directory: + settings.output_directory = args.output_directory + self.logger.debug('Using output directory: {}'.format(settings.output_directory)) + if os.path.exists(settings.output_directory): + if args.force: + self.logger.info('Removing existing output directory.') + shutil.rmtree(settings.output_directory) + else: + self.logger.error('Output directory {} exists.'.format(settings.output_directory)) + self.logger.error('Please specify another location, or use -f option to overwrite.\n') + sys.exit(1) + + self.logger.info('Creating output directory.') + os.makedirs(settings.output_directory) + os.makedirs(settings.meta_directory) diff --git a/wlauto/commands/show.py b/wlauto/commands/show.py new file mode 100644 index 00000000..12515b73 --- /dev/null +++ b/wlauto/commands/show.py @@ -0,0 +1,101 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import sys +import subprocess +from cStringIO import StringIO + +from terminalsize import get_terminal_size # pylint: disable=import-error +from wlauto import Command, ExtensionLoader, settings +from wlauto.utils.doc import (get_summary, get_description, get_type_name, format_column, format_body, + format_paragraph, indent, strip_inlined_text) +from wlauto.utils.misc import get_pager + + +class ShowCommand(Command): + + name = 'show' + + description = """ + Display documentation for the specified extension (workload, instrument, etc.). + """ + + def initialize(self): + self.parser.add_argument('name', metavar='EXTENSION', + help='''The name of the extension for which information will + be shown.''') + + def execute(self, args): + ext_loader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths) + extension = ext_loader.get_extension_class(args.name) + out = StringIO() + term_width, term_height = get_terminal_size() + format_extension(extension, out, term_width) + text = out.getvalue() + pager = get_pager() + if len(text.split('\n')) > term_height and pager: + sp = subprocess.Popen(pager, stdin=subprocess.PIPE) + sp.communicate(text) + else: + sys.stdout.write(text) + + +def format_extension(extension, out, width): + format_extension_name(extension, out) + out.write('\n') + format_extension_summary(extension, out, width) + out.write('\n') + if extension.parameters: + format_extension_parameters(extension, out, width) + out.write('\n') + format_extension_description(extension, out, width) + + +def format_extension_name(extension, out): + out.write('\n{}\n'.format(extension.name)) + + +def format_extension_summary(extension, out, width): + out.write('{}\n'.format(format_body(strip_inlined_text(get_summary(extension)), width))) + + +def format_extension_description(extension, out, width): + # skip the initial paragraph of multi-paragraph description, as already + # listed above. + description = get_description(extension).split('\n\n', 1)[-1] + out.write('{}\n'.format(format_body(strip_inlined_text(description), width))) + + +def format_extension_parameters(extension, out, width, shift=4): + out.write('parameters:\n\n') + param_texts = [] + for param in extension.parameters: + description = format_paragraph(strip_inlined_text(param.description or ''), width - shift) + param_text = '{}'.format(param.name) + if param.mandatory: + param_text += " (MANDATORY)" + param_text += '\n{}\n'.format(description) + param_text += indent('type: {}\n'.format(get_type_name(param.kind))) + if param.allowed_values: + param_text += indent('allowed values: {}\n'.format(', '.join(map(str, param.allowed_values)))) + elif param.constraint: + param_text += indent('constraint: {}\n'.format(get_type_name(param.constraint))) + if param.default: + param_text += indent('default: {}\n'.format(param.default)) + param_texts.append(indent(param_text, shift)) + + out.write(format_column('\n'.join(param_texts), width)) + diff --git a/wlauto/commands/templates/UiAutomation.java b/wlauto/commands/templates/UiAutomation.java new file mode 100644 index 00000000..bd33d9a7 --- /dev/null +++ b/wlauto/commands/templates/UiAutomation.java @@ -0,0 +1,25 @@ +package ${package_name}; + +import android.app.Activity; +import android.os.Bundle; +import android.util.Log; +import android.view.KeyEvent; + +// Import the uiautomator libraries +import com.android.uiautomator.core.UiObject; +import com.android.uiautomator.core.UiObjectNotFoundException; +import com.android.uiautomator.core.UiScrollable; +import com.android.uiautomator.core.UiSelector; +import com.android.uiautomator.testrunner.UiAutomatorTestCase; + +import com.arm.wlauto.uiauto.BaseUiAutomation; + +public class UiAutomation extends BaseUiAutomation { + + public static String TAG = "${name}"; + + public void runUiAutomation() throws Exception { + // UI Automation code goes here + } + +} diff --git a/wlauto/commands/templates/android_benchmark b/wlauto/commands/templates/android_benchmark new file mode 100644 index 00000000..82796bd5 --- /dev/null +++ b/wlauto/commands/templates/android_benchmark @@ -0,0 +1,27 @@ +from wlauto import AndroidBenchmark, Parameter + + +class ${class_name}(AndroidBenchmark): + + name = '${name}' + # NOTE: Please do not leave these comments in the code. + # + # Replace with the package for the app in the APK file. + package = 'com.foo.bar' + # Replace with the full path to the activity to run. + activity = '.RunBuzz' + description = "This is an placeholder description" + + parameters = [ + # Workload parameters go here e.g. + Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, + description='This is an example parameter') + ] + + def run(self, context): + pass + + def update_result(self, context): + super(${class_name}, self).update_result(context) + # process results and add them using + # context.result.add_metric diff --git a/wlauto/commands/templates/android_uiauto_benchmark b/wlauto/commands/templates/android_uiauto_benchmark new file mode 100644 index 00000000..5d6893a8 --- /dev/null +++ b/wlauto/commands/templates/android_uiauto_benchmark @@ -0,0 +1,24 @@ +from wlauto import AndroidUiAutoBenchmark, Parameter + + +class ${class_name}(AndroidUiAutoBenchmark): + + name = '${name}' + # NOTE: Please do not leave these comments in the code. + # + # Replace with the package for the app in the APK file. + package = 'com.foo.bar' + # Replace with the full path to the activity to run. + activity = '.RunBuzz' + description = "This is an placeholder description" + + parameters = [ + # Workload parameters go here e.g. + Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, + description='This is an example parameter') + ] + + def update_result(self, context): + super(${class_name}, self).update_result(context) + # process results and add them using + # context.result.add_metric diff --git a/wlauto/commands/templates/basic_workload b/wlauto/commands/templates/basic_workload new file mode 100644 index 00000000..e75316f1 --- /dev/null +++ b/wlauto/commands/templates/basic_workload @@ -0,0 +1,28 @@ +from wlauto import Workload, Parameter + + +class ${class_name}(Workload): + + name = '${name}' + description = "This is an placeholder description" + + parameters = [ + # Workload parameters go here e.g. + Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, + description='This is an example parameter') + ] + + def setup(self, context): + pass + + def run(self, context): + pass + + def update_result(self, context): + pass + + def teardown(self, context): + pass + + def validate(self): + pass diff --git a/wlauto/commands/templates/setup.template b/wlauto/commands/templates/setup.template new file mode 100644 index 00000000..f9097b59 --- /dev/null +++ b/wlauto/commands/templates/setup.template @@ -0,0 +1,102 @@ +import os +import sys +import warnings +from multiprocessing import Process + +try: + from setuptools.command.install import install as orig_install + from setuptools import setup +except ImportError: + from distutils.command.install import install as orig_install + from distutils.core import setup + +try: + import pwd +except ImportError: + pwd = None + +warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'") + +try: + os.remove('MANIFEST') +except OSError: + pass + + +packages = [] +data_files = {} +source_dir = os.path.dirname(__file__) +for root, dirs, files in os.walk('$package_name'): + rel_dir = os.path.relpath(root, source_dir) + data = [] + if '__init__.py' in files: + for f in files: + if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']: + data.append(f) + package_name = rel_dir.replace(os.sep, '.') + package_dir = root + packages.append(package_name) + data_files[package_name] = data + else: + # use previous package name + filepaths = [os.path.join(root, f) for f in files] + data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths]) + +params = dict( + name='$package_name', + version='0.0.1', + packages=packages, + package_data=data_files, + url='N/A', + maintainer='$user', + maintainer_email='$user@example.com', + install_requires=[ + 'wlauto', + ], + # https://pypi.python.org/pypi?%3Aaction=list_classifiers + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Environment :: Console', + 'License :: Other/Proprietary License', + 'Operating System :: Unix', + 'Programming Language :: Python :: 2.7', + ], +) + + +def update_wa_packages(): + sudo_user = os.getenv('SUDO_USER') + if sudo_user: + user_entry = pwd.getpwnam(sudo_user) + os.setgid(user_entry.pw_gid) + os.setuid(user_entry.pw_uid) + env_root = os.getenv('WA_USER_DIRECTORY', os.path.join(os.path.expanduser('~'), '.workload_automation')) + if not os.path.isdir(env_root): + os.makedirs(env_root) + wa_packages_file = os.path.join(env_root, 'packages') + if os.path.isfile(wa_packages_file): + with open(wa_packages_file, 'r') as wfh: + package_list = wfh.read().split() + if params['name'] not in package_list: + package_list.append(params['name']) + else: # no existing package file + package_list = [params['name']] + with open(wa_packages_file, 'w') as wfh: + wfh.write('\n'.join(package_list)) + + +class install(orig_install): + + def run(self): + orig_install.run(self) + # Must be done in a separate process because will drop privileges if + # sudo, and won't be able to reacquire them. + p = Process(target=update_wa_packages) + p.start() + p.join() + + +params['cmdclass'] = {'install': install} + + +setup(**params) diff --git a/wlauto/commands/templates/uiauto_workload b/wlauto/commands/templates/uiauto_workload new file mode 100644 index 00000000..66cc193a --- /dev/null +++ b/wlauto/commands/templates/uiauto_workload @@ -0,0 +1,35 @@ +from wlauto import UiAutomatorWorkload, Parameter + + +class ${class_name}(UiAutomatorWorkload): + + name = '${name}' + description = "This is an placeholder description" + + parameters = [ + # Workload parameters go here e.g. + Parameter('Example parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, + description='This is an example parameter') + ] + + def setup(self, context): + super(${class_name}, self).setup(context) + # Perform any necessary setup before starting the UI automation + # e.g. copy files to the device, start apps, reset logs, etc. + + + def update_result(self, context): + pass + # Process workload execution artifacts to extract metrics + # and add them to the run result using + # context.result.add_metric() + + def teardown(self, context): + super(${class_name}, self).teardown(context) + # Preform any necessary cleanup + + def validate(self): + pass + # Validate inter-parameter assumptions etc + + diff --git a/wlauto/common/__init__.py b/wlauto/common/__init__.py new file mode 100644 index 00000000..cd5d64d6 --- /dev/null +++ b/wlauto/common/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + diff --git a/wlauto/common/android/BaseUiAutomation.class b/wlauto/common/android/BaseUiAutomation.class new file mode 100644 index 00000000..2683f453 Binary files /dev/null and b/wlauto/common/android/BaseUiAutomation.class differ diff --git a/wlauto/common/android/__init__.py b/wlauto/common/android/__init__.py new file mode 100644 index 00000000..16224d6f --- /dev/null +++ b/wlauto/common/android/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + diff --git a/wlauto/common/android/device.py b/wlauto/common/android/device.py new file mode 100644 index 00000000..21824eae --- /dev/null +++ b/wlauto/common/android/device.py @@ -0,0 +1,678 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# pylint: disable=E1101 +import os +import sys +import re +import time +import tempfile +import shutil +import threading +from subprocess import CalledProcessError + +from wlauto.core.extension import Parameter +from wlauto.common.linux.device import BaseLinuxDevice +from wlauto.exceptions import DeviceError, WorkerThreadError, TimeoutError, DeviceNotRespondingError +from wlauto.utils.misc import convert_new_lines +from wlauto.utils.types import boolean, regex +from wlauto.utils.android import (adb_shell, adb_background_shell, adb_list_devices, + adb_command, AndroidProperties, ANDROID_VERSION_MAP) + + +SCREEN_STATE_REGEX = re.compile('(?:mPowerState|mScreenOn)=([0-9]+|true|false)', re.I) + + +class AndroidDevice(BaseLinuxDevice): # pylint: disable=W0223 + """ + Device running Android OS. + + """ + + platform = 'android' + + parameters = [ + Parameter('adb_name', + description='The unique ID of the device as output by "adb devices".'), + Parameter('android_prompt', kind=regex, default=re.compile('^.*(shell|root)@.*:/ [#$] ', re.MULTILINE), + description='The format of matching the shell prompt in Android.'), + Parameter('working_directory', default='/sdcard/wa-working', + description='Directory that will be used WA on the device for output files etc.'), + Parameter('binaries_directory', default='/system/bin', + description='Location of binaries on the device.'), + Parameter('package_data_directory', default='/data/data', + description='Location of of data for an installed package (APK).'), + Parameter('external_storage_directory', default='/sdcard', + description='Mount point for external storage.'), + Parameter('connection', default='usb', allowed_values=['usb', 'ethernet'], + description='Specified the nature of adb connection.'), + Parameter('logcat_poll_period', kind=int, + description=""" + If specified and is not ``0``, logcat will be polled every + ``logcat_poll_period`` seconds, and buffered on the host. This + can be used if a lot of output is expected in logcat and the fixed + logcat buffer on the device is not big enough. The trade off is that + this introduces some minor runtime overhead. Not set by default. + """), + Parameter('enable_screen_check', kind=boolean, default=False, + description=""" + Specified whether the device should make sure that the screen is on + during initialization. + """), + ] + + default_timeout = 30 + delay = 2 + long_delay = 3 * delay + ready_timeout = 60 + + # Overwritten from Device. For documentation, see corresponding method in + # Device. + + @property + def is_rooted(self): + if self._is_rooted is None: + try: + result = adb_shell(self.adb_name, 'su', timeout=1) + if 'not found' in result: + self._is_rooted = False + else: + self._is_rooted = True + except TimeoutError: + self._is_rooted = True + except DeviceError: + self._is_rooted = False + return self._is_rooted + + @property + def abi(self): + return self.getprop()['ro.product.cpu.abi'].split('-')[0] + + @property + def supported_eabi(self): + props = self.getprop() + result = [props['ro.product.cpu.abi']] + if 'ro.product.cpu.abi2' in props: + result.append(props['ro.product.cpu.abi2']) + if 'ro.product.cpu.abilist' in props: + for eabi in props['ro.product.cpu.abilist'].split(','): + if eabi not in result: + result.append(eabi) + return result + + def __init__(self, **kwargs): + super(AndroidDevice, self).__init__(**kwargs) + self._logcat_poller = None + + def reset(self): + self._is_ready = False + self._just_rebooted = True + adb_command(self.adb_name, 'reboot', timeout=self.default_timeout) + + def hard_reset(self): + super(AndroidDevice, self).hard_reset() + self._is_ready = False + self._just_rebooted = True + + def boot(self, **kwargs): + self.reset() + + def connect(self): # NOQA pylint: disable=R0912 + iteration_number = 0 + max_iterations = self.ready_timeout / self.delay + available = False + self.logger.debug('Polling for device {}...'.format(self.adb_name)) + while iteration_number < max_iterations: + devices = adb_list_devices() + if self.adb_name: + for device in devices: + if device.name == self.adb_name and device.status != 'offline': + available = True + else: # adb_name not set + if len(devices) == 1: + available = True + elif len(devices) > 1: + raise DeviceError('More than one device is connected and adb_name is not set.') + + if available: + break + else: + time.sleep(self.delay) + iteration_number += 1 + else: + raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name)) + + while iteration_number < max_iterations: + available = (1 == int('0' + adb_shell(self.adb_name, 'getprop sys.boot_completed', timeout=self.default_timeout))) + if available: + break + else: + time.sleep(self.delay) + iteration_number += 1 + else: + raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name)) + + if self._just_rebooted: + self.logger.debug('Waiting for boot to complete...') + # On some devices, adb connection gets reset some time after booting. + # This causes errors during execution. To prevent this, open a shell + # session and wait for it to be killed. Once its killed, give adb + # enough time to restart, and then the device should be ready. + # TODO: This is more of a work-around rather than an actual solution. + # Need to figure out what is going on the "proper" way of handling it. + try: + adb_shell(self.adb_name, '', timeout=20) + time.sleep(5) # give adb time to re-initialize + except TimeoutError: + pass # timed out waiting for the session to be killed -- assume not going to be. + + self.logger.debug('Boot completed.') + self._just_rebooted = False + self._is_ready = True + + def initialize(self, context, *args, **kwargs): + self.execute('mkdir -p {}'.format(self.working_directory)) + if self.is_rooted: + if not self.executable_is_installed('busybox'): + self.busybox = self.deploy_busybox(context) + else: + self.busybox = 'busybox' + self.disable_screen_lock() + self.disable_selinux() + if self.enable_screen_check: + self.ensure_screen_is_on() + self.init(context, *args, **kwargs) + + def disconnect(self): + if self._logcat_poller: + self._logcat_poller.close() + + def ping(self): + try: + # May be triggered inside initialize() + adb_shell(self.adb_name, 'ls /', timeout=10) + except (TimeoutError, CalledProcessError): + raise DeviceNotRespondingError(self.adb_name or self.name) + + def start(self): + if self.logcat_poll_period: + if self._logcat_poller: + self._logcat_poller.close() + self._logcat_poller = _LogcatPoller(self, self.logcat_poll_period, timeout=self.default_timeout) + self._logcat_poller.start() + + def stop(self): + if self._logcat_poller: + self._logcat_poller.stop() + + def get_android_version(self): + return ANDROID_VERSION_MAP.get(self.get_sdk_version(), None) + + def get_android_id(self): + """ + Get the device's ANDROID_ID. Which is + + "A 64-bit number (as a hex string) that is randomly generated when the user + first sets up the device and should remain constant for the lifetime of the + user's device." + + .. note:: This will get reset on userdata erasure. + + """ + return self.execute('settings get secure android_id').strip() + + def get_sdk_version(self): + try: + return int(self.getprop('ro.build.version.sdk')) + except (ValueError, TypeError): + return None + + def get_installed_package_version(self, package): + """ + Returns the version (versionName) of the specified package if it is installed + on the device, or ``None`` otherwise. + + Added in version 2.1.4 + + """ + output = self.execute('dumpsys package {}'.format(package)) + for line in convert_new_lines(output).split('\n'): + if 'versionName' in line: + return line.split('=', 1)[1] + return None + + def list_packages(self): + """ + List packages installed on the device. + + Added in version 2.1.4 + + """ + output = self.execute('pm list packages') + output = output.replace('package:', '') + return output.split() + + def package_is_installed(self, package_name): + """ + Returns ``True`` the if a package with the specified name is installed on + the device, and ``False`` otherwise. + + Added in version 2.1.4 + + """ + return package_name in self.list_packages() + + def executable_is_installed(self, executable_name): + return executable_name in self.listdir(self.binaries_directory) + + def is_installed(self, name): + return self.executable_is_installed(name) or self.package_is_installed(name) + + def listdir(self, path, as_root=False, **kwargs): + contents = self.execute('ls {}'.format(path), as_root=as_root) + return [x.strip() for x in contents.split()] + + def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221 + """ + Modified in version 2.1.4: added ``as_root`` parameter. + + """ + self._check_ready() + if not as_root: + adb_command(self.adb_name, "push '{}' '{}'".format(source, dest), timeout=timeout) + else: + device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep)) + self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile))) + adb_command(self.adb_name, "push '{}' '{}'".format(source, device_tempfile), timeout=timeout) + self.execute('cp {} {}'.format(device_tempfile, dest), as_root=True) + + def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221 + """ + Modified in version 2.1.4: added ``as_root`` parameter. + + """ + self._check_ready() + if not as_root: + adb_command(self.adb_name, "pull '{}' '{}'".format(source, dest), timeout=timeout) + else: + device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep)) + self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile))) + self.execute('cp {} {}'.format(source, device_tempfile), as_root=True) + adb_command(self.adb_name, "pull '{}' '{}'".format(device_tempfile, dest), timeout=timeout) + + def delete_file(self, filepath, as_root=False): # pylint: disable=W0221 + self._check_ready() + adb_shell(self.adb_name, "rm '{}'".format(filepath), as_root=as_root, timeout=self.default_timeout) + + def file_exists(self, filepath): + self._check_ready() + output = adb_shell(self.adb_name, 'if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath), + timeout=self.default_timeout) + if int(output): + return True + else: + return False + + def install(self, filepath, timeout=default_timeout, with_name=None): # pylint: disable=W0221 + ext = os.path.splitext(filepath)[1].lower() + if ext == '.apk': + return self.install_apk(filepath, timeout) + else: + return self.install_executable(filepath, with_name) + + def install_apk(self, filepath, timeout=default_timeout): # pylint: disable=W0221 + self._check_ready() + ext = os.path.splitext(filepath)[1].lower() + if ext == '.apk': + return adb_command(self.adb_name, "install {}".format(filepath), timeout=timeout) + else: + raise DeviceError('Can\'t install {}: unsupported format.'.format(filepath)) + + def install_executable(self, filepath, with_name=None): + """ + Installs a binary executable on device. Requires root access. Returns + the path to the installed binary, or ``None`` if the installation has failed. + Optionally, ``with_name`` parameter may be used to specify a different name under + which the executable will be installed. + + Added in version 2.1.3. + Updated in version 2.1.5 with ``with_name`` parameter. + + """ + executable_name = with_name or os.path.basename(filepath) + on_device_file = self.path.join(self.working_directory, executable_name) + on_device_executable = self.path.join(self.binaries_directory, executable_name) + self.push_file(filepath, on_device_file) + matched = [] + for entry in self.list_file_systems(): + if self.binaries_directory.rstrip('/').startswith(entry.mount_point): + matched.append(entry) + + if matched: + entry = sorted(matched, key=lambda x: len(x.mount_point))[-1] + if 'rw' not in entry.options: + self.execute('mount -o rw,remount {} {}'.format(entry.device, entry.mount_point), as_root=True) + self.execute('cp {} {}'.format(on_device_file, on_device_executable), as_root=True) + self.execute('chmod 0777 {}'.format(on_device_executable), as_root=True) + return on_device_executable + else: + raise DeviceError('Could not find mount point for binaries directory {}'.format(self.binaries_directory)) + + def uninstall(self, package): + self._check_ready() + adb_command(self.adb_name, "uninstall {}".format(package), timeout=self.default_timeout) + + def uninstall_executable(self, executable_name): + """ + Requires root access. + + Added in version 2.1.3. + + """ + on_device_executable = self.path.join(self.binaries_directory, executable_name) + for entry in self.list_file_systems(): + if entry.mount_point == '/system': + if 'rw' not in entry.options: + self.execute('mount -o rw,remount {} /system'.format(entry.device), as_root=True) + self.delete_file(on_device_executable) + + def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False, + as_root=False, busybox=False, **kwargs): + """ + Execute the specified command on the device using adb. + + Parameters: + + :param command: The command to be executed. It should appear exactly + as if you were typing it into a shell. + :param timeout: Time, in seconds, to wait for adb to return before aborting + and raising an error. Defaults to ``AndroidDevice.default_timeout``. + :param check_exit_code: If ``True``, the return code of the command on the Device will + be check and exception will be raised if it is not 0. + Defaults to ``True``. + :param background: If ``True``, will execute adb in a subprocess, and will return + immediately, not waiting for adb to return. Defaults to ``False`` + :param busybox: If ``True``, will use busybox to execute the command. Defaults to ``False``. + + Added in version 2.1.3 + + .. note:: The device must be rooted to be able to use busybox. + + :param as_root: If ``True``, will attempt to execute command in privileged mode. The device + must be rooted, otherwise an error will be raised. Defaults to ``False``. + + Added in version 2.1.3 + + :returns: If ``background`` parameter is set to ``True``, the subprocess object will + be returned; otherwise, the contents of STDOUT from the device will be returned. + + :raises: DeviceError if adb timed out or if the command returned non-zero exit + code on the device, or if attempting to execute a command in privileged mode on an + unrooted device. + + """ + self._check_ready() + if as_root and not self.is_rooted: + raise DeviceError('Attempting to execute "{}" as root on unrooted device.'.format(command)) + if busybox: + if not self.is_rooted: + DeviceError('Attempting to execute "{}" with busybox. '.format(command) + + 'Busybox can only be deployed to rooted devices.') + command = ' '.join([self.busybox, command]) + if background: + return adb_background_shell(self.adb_name, command, as_root=as_root) + else: + return adb_shell(self.adb_name, command, timeout, check_exit_code, as_root) + + def kick_off(self, command): + """ + Like execute but closes adb session and returns immediately, leaving the command running on the + device (this is different from execute(background=True) which keeps adb connection open and returns + a subprocess object). + + .. note:: This relies on busybox's nohup applet and so won't work on unrooted devices. + + Added in version 2.1.4 + + """ + if not self.is_rooted: + raise DeviceError('kick_off uses busybox\'s nohup applet and so can only be run a rooted device.') + try: + command = 'cd {} && busybox nohup {}'.format(self.working_directory, command) + output = self.execute(command, timeout=1, as_root=True) + except TimeoutError: + pass + else: + raise ValueError('Background command exited before timeout; got "{}"'.format(output)) + + def get_properties(self, context): + """Captures and saves the information from /system/build.prop and /proc/version""" + props = {} + props['android_id'] = self.get_android_id() + buildprop_file = os.path.join(context.host_working_directory, 'build.prop') + if not os.path.isfile(buildprop_file): + self.pull_file('/system/build.prop', context.host_working_directory) + self._update_build_properties(buildprop_file, props) + context.add_run_artifact('build_properties', buildprop_file, 'export') + + version_file = os.path.join(context.host_working_directory, 'version') + if not os.path.isfile(version_file): + self.pull_file('/proc/version', context.host_working_directory) + self._update_versions(version_file, props) + context.add_run_artifact('device_version', version_file, 'export') + return props + + def getprop(self, prop=None): + """Returns parsed output of Android getprop command. If a property is + specified, only the value for that property will be returned (with + ``None`` returned if the property doesn't exist. Otherwise, + ``wlauto.utils.android.AndroidProperties`` will be returned, which is + a dict-like object.""" + props = AndroidProperties(self.execute('getprop')) + if prop: + return props[prop] + return props + + # Android-specific methods. These either rely on specifics of adb or other + # Android-only concepts in their interface and/or implementation. + + def forward_port(self, from_port, to_port): + """ + Forward a port on the device to a port on localhost. + + :param from_port: Port on the device which to forward. + :param to_port: Port on the localhost to which the device port will be forwarded. + + Ports should be specified using adb spec. See the "adb forward" section in "adb help". + + """ + adb_command(self.adb_name, 'forward {} {}'.format(from_port, to_port), timeout=self.default_timeout) + + def dump_logcat(self, outfile, filter_spec=None): + """ + Dump the contents of logcat, for the specified filter spec to the + specified output file. + See http://developer.android.com/tools/help/logcat.html + + :param outfile: Output file on the host into which the contents of the + log will be written. + :param filter_spec: Logcat filter specification. + see http://developer.android.com/tools/debugging/debugging-log.html#filteringOutput + + """ + if self._logcat_poller: + return self._logcat_poller.write_log(outfile) + else: + if filter_spec: + command = 'logcat -d -s {} > {}'.format(filter_spec, outfile) + else: + command = 'logcat -d > {}'.format(outfile) + return adb_command(self.adb_name, command, timeout=self.default_timeout) + + def clear_logcat(self): + """Clear (flush) logcat log.""" + if self._logcat_poller: + return self._logcat_poller.clear_buffer() + else: + return adb_shell(self.adb_name, 'logcat -c', timeout=self.default_timeout) + + def capture_screen(self, filepath): + """Caputers the current device screen into the specified file in a PNG format.""" + on_device_file = self.path.join(self.working_directory, 'screen_capture.png') + self.execute('screencap -p {}'.format(on_device_file)) + self.pull_file(on_device_file, filepath) + self.delete_file(on_device_file) + + def is_screen_on(self): + """Returns ``True`` if the device screen is currently on, ``False`` otherwise.""" + output = self.execute('dumpsys power') + match = SCREEN_STATE_REGEX.search(output) + if match: + return boolean(match.group(1)) + else: + raise DeviceError('Could not establish screen state.') + + def ensure_screen_is_on(self): + if not self.is_screen_on(): + self.execute('input keyevent 26') + + def disable_screen_lock(self): + """ + Attempts to disable he screen lock on the device. + + .. note:: This does not always work... + + Added inversion 2.1.4 + + """ + lockdb = '/data/system/locksettings.db' + sqlcommand = "update locksettings set value=\\'0\\' where name=\\'screenlock.disabled\\';" + self.execute('sqlite3 {} "{}"'.format(lockdb, sqlcommand), as_root=True) + + def disable_selinux(self): + # This may be invoked from intialize() so we can't use execute() or the + # standard API for doing this. + api_level = int(adb_shell(self.adb_name, 'getprop ro.build.version.sdk', + timeout=self.default_timeout).strip()) + # SELinux was added in Android 4.3 (API level 18). Trying to + # 'getenforce' in earlier versions will produce an error. + if api_level >= 18: + se_status = self.execute('getenforce', as_root=True).strip() + if se_status == 'Enforcing': + self.execute('setenforce 0', as_root=True) + + # Internal methods: do not use outside of the class. + + def _update_build_properties(self, filepath, props): + try: + with open(filepath) as fh: + for line in fh: + line = re.sub(r'#.*', '', line).strip() + if not line: + continue + key, value = line.split('=', 1) + props[key] = value + except ValueError: + self.logger.warning('Could not parse build.prop.') + + def _update_versions(self, filepath, props): + with open(filepath) as fh: + text = fh.read() + props['version'] = text + text = re.sub(r'#.*', '', text).strip() + match = re.search(r'^(Linux version .*?)\s*\((gcc version .*)\)$', text) + if match: + props['linux_version'] = match.group(1).strip() + props['gcc_version'] = match.group(2).strip() + else: + self.logger.warning('Could not parse version string.') + + +class _LogcatPoller(threading.Thread): + + join_timeout = 5 + + def __init__(self, device, period, timeout=None): + super(_LogcatPoller, self).__init__() + self.adb_device = device.adb_name + self.logger = device.logger + self.period = period + self.timeout = timeout + self.stop_signal = threading.Event() + self.lock = threading.RLock() + self.buffer_file = tempfile.mktemp() + self.last_poll = 0 + self.daemon = True + self.exc = None + + def run(self): + self.logger.debug('Starting logcat polling.') + try: + while True: + if self.stop_signal.is_set(): + break + with self.lock: + current_time = time.time() + if (current_time - self.last_poll) >= self.period: + self._poll() + time.sleep(0.5) + except Exception: # pylint: disable=W0703 + self.exc = WorkerThreadError(self.name, sys.exc_info()) + self.logger.debug('Logcat polling stopped.') + + def stop(self): + self.logger.debug('Stopping logcat polling.') + self.stop_signal.set() + self.join(self.join_timeout) + if self.is_alive(): + self.logger.error('Could not join logcat poller thread.') + if self.exc: + raise self.exc # pylint: disable=E0702 + + def clear_buffer(self): + self.logger.debug('Clearing logcat buffer.') + with self.lock: + adb_shell(self.adb_device, 'logcat -c', timeout=self.timeout) + with open(self.buffer_file, 'w') as _: # NOQA + pass + + def write_log(self, outfile): + self.logger.debug('Writing logbuffer to {}.'.format(outfile)) + with self.lock: + self._poll() + if os.path.isfile(self.buffer_file): + shutil.copy(self.buffer_file, outfile) + else: # there was no logcat trace at this time + with open(outfile, 'w') as _: # NOQA + pass + + def close(self): + self.logger.debug('Closing logcat poller.') + if os.path.isfile(self.buffer_file): + os.remove(self.buffer_file) + + def _poll(self): + with self.lock: + self.last_poll = time.time() + adb_command(self.adb_device, 'logcat -d >> {}'.format(self.buffer_file), timeout=self.timeout) + adb_command(self.adb_device, 'logcat -c', timeout=self.timeout) + + +class BigLittleDevice(AndroidDevice): # pylint: disable=W0223 + + parameters = [ + Parameter('scheduler', default='hmp', override=True), + ] + diff --git a/wlauto/common/android/resources.py b/wlauto/common/android/resources.py new file mode 100644 index 00000000..27231e16 --- /dev/null +++ b/wlauto/common/android/resources.py @@ -0,0 +1,36 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from wlauto.common.resources import FileResource + + +class ReventFile(FileResource): + + name = 'revent' + + def __init__(self, owner, stage): + super(ReventFile, self).__init__(owner) + self.stage = stage + + +class JarFile(FileResource): + + name = 'jar' + + +class ApkFile(FileResource): + + name = 'apk' diff --git a/wlauto/common/android/workload.py b/wlauto/common/android/workload.py new file mode 100644 index 00000000..ee49c061 --- /dev/null +++ b/wlauto/common/android/workload.py @@ -0,0 +1,425 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import sys +import time + +from wlauto.core.extension import Parameter +from wlauto.core.workload import Workload +from wlauto.core.resource import NO_ONE +from wlauto.common.resources import ExtensionAsset, Executable +from wlauto.exceptions import WorkloadError, ResourceError +from wlauto.utils.android import ApkInfo +from wlauto.utils.types import boolean +import wlauto.common.android.resources + + +DELAY = 5 + + +class UiAutomatorWorkload(Workload): + """ + Base class for all workloads that rely on a UI Automator JAR file. + + This class should be subclassed by workloads that rely on android UiAutomator + to work. This class handles transferring the UI Automator JAR file to the device + and invoking it to run the workload. By default, it will look for the JAR file in + the same directory as the .py file for the workload (this can be changed by overriding + the ``uiauto_file`` property in the subclassing workload). + + To inintiate UI Automation, the fully-qualified name of the Java class and the + corresponding method name are needed. By default, the package part of the class name + is derived from the class file, and class and method names are ``UiAutomation`` + and ``runUiAutomaton`` respectively. If you have generated the boilder plate for the + UiAutomatior code using ``create_workloads`` utility, then everything should be named + correctly. If you're creating the Java project manually, you need to make sure the names + match what is expected, or you could override ``uiauto_package``, ``uiauto_class`` and + ``uiauto_method`` class attributes with the value that match your Java code. + + You can also pass parameters to the JAR file. To do this add the parameters to + ``self.uiauto_params`` dict inside your class's ``__init__`` or ``setup`` methods. + + """ + + supported_platforms = ['android'] + + uiauto_package = '' + uiauto_class = 'UiAutomation' + uiauto_method = 'runUiAutomation' + + # Can be overidden by subclasses to adjust to run time of specific + # benchmarks. + run_timeout = 4 * 60 # seconds + + def __init__(self, device, _call_super=True, **kwargs): # pylint: disable=W0613 + if _call_super: + super(UiAutomatorWorkload, self).__init__(device, **kwargs) + self.uiauto_file = None + self.device_uiauto_file = None + self.command = None + self.uiauto_params = {} + + def init_resources(self, context): + self.uiauto_file = context.resolver.get(wlauto.common.android.resources.JarFile(self)) + if not self.uiauto_file: + raise ResourceError('No UI automation JAR file found for workload {}.'.format(self.name)) + self.device_uiauto_file = self.device.path.join(self.device.working_directory, + os.path.basename(self.uiauto_file)) + if not self.uiauto_package: + self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0] + + def setup(self, context): + method_string = '{}.{}#{}'.format(self.uiauto_package, self.uiauto_class, self.uiauto_method) + params_dict = self.uiauto_params + params_dict['workdir'] = self.device.working_directory + params = '' + for k, v in self.uiauto_params.iteritems(): + params += ' -e {} {}'.format(k, v) + self.command = 'uiautomator runtest {}{} -c {}'.format(self.device_uiauto_file, params, method_string) + self.device.push_file(self.uiauto_file, self.device_uiauto_file) + self.device.killall('uiautomator') + + def run(self, context): + result = self.device.execute(self.command, self.run_timeout) + if 'FAILURE' in result: + raise WorkloadError(result) + else: + self.logger.debug(result) + time.sleep(DELAY) + + def update_result(self, context): + pass + + def teardown(self, context): + self.device.delete_file(self.device_uiauto_file) + + def validate(self): + if not self.uiauto_file: + raise WorkloadError('No UI automation JAR file found for workload {}.'.format(self.name)) + if not self.uiauto_package: + raise WorkloadError('No UI automation package specified for workload {}.'.format(self.name)) + + +class ApkWorkload(Workload): + """ + A workload based on an APK file. + + Defines the following attributes: + + :package: The package name of the app. This is usually a Java-style name of the form + ``com.companyname.appname``. + :activity: This is the initial activity of the app. This will be used to launch the + app during the setup. + :view: The class of the main view pane of the app. This needs to be defined in order + to collect SurfaceFlinger-derived statistics (such as FPS) for the app, but + may otherwise be left as ``None``. + :install_timeout: Timeout for the installation of the APK. This may vary wildly based on + the size and nature of a specific APK, and so should be defined on + per-workload basis. + + .. note:: To a lesser extent, this will also vary based on the the + device and the nature of adb connection (USB vs Ethernet), + so, as with all timeouts, so leeway must be included in + the specified value. + + .. note:: Both package and activity for a workload may be obtained from the APK using + the ``aapt`` tool that comes with the ADT (Android Developemnt Tools) bundle. + + """ + package = None + activity = None + view = None + install_timeout = None + default_install_timeout = 300 + + parameters = [ + Parameter('uninstall_apk', kind=boolean, default=False, + description="If ``True``, will uninstall workload's APK as part of teardown."), + ] + + def __init__(self, device, _call_super=True, **kwargs): + if _call_super: + super(ApkWorkload, self).__init__(device, **kwargs) + self.apk_file = None + self.apk_version = None + self.logcat_log = None + self.force_reinstall = kwargs.get('force_reinstall', False) + if not self.install_timeout: + self.install_timeout = self.default_install_timeout + + def init_resources(self, context): + self.apk_file = context.resolver.get(wlauto.common.android.resources.ApkFile(self), version=getattr(self, 'version', None)) + + def setup(self, context): + self.initialize_package(context) + self.start_activity() + self.device.execute('am kill-all') # kill all *background* activities + self.device.clear_logcat() + + def initialize_package(self, context): + installed_version = self.device.get_installed_package_version(self.package) + host_version = ApkInfo(self.apk_file).version_name + if installed_version != host_version: + if installed_version: + message = '{} host version: {}, device version: {}; re-installing...' + self.logger.debug(message.format(os.path.basename(self.apk_file), host_version, installed_version)) + else: + message = '{} host version: {}, not found on device; installing...' + self.logger.debug(message.format(os.path.basename(self.apk_file), host_version)) + self.force_reinstall = True + else: + message = '{} version {} found on both device and host.' + self.logger.debug(message.format(os.path.basename(self.apk_file), host_version)) + if self.force_reinstall: + if installed_version: + self.device.uninstall(self.package) + self.install_apk(context) + else: + self.reset(context) + self.apk_version = host_version + + def start_activity(self): + output = self.device.execute('am start -W -n {}/{}'.format(self.package, self.activity)) + if 'Error:' in output: + self.device.execute('am force-stop {}'.format(self.package)) # this will dismiss any erro dialogs + raise WorkloadError(output) + self.logger.debug(output) + + def reset(self, context): # pylint: disable=W0613 + self.device.execute('am force-stop {}'.format(self.package)) + self.device.execute('pm clear {}'.format(self.package)) + + def install_apk(self, context): + output = self.device.install(self.apk_file, self.install_timeout) + if 'Failure' in output: + if 'ALREADY_EXISTS' in output: + self.logger.warn('Using already installed APK (did not unistall properly?)') + else: + raise WorkloadError(output) + else: + self.logger.debug(output) + self.do_post_install(context) + + def do_post_install(self, context): + """ May be overwritten by dervied classes.""" + pass + + def run(self, context): + pass + + def update_result(self, context): + self.logcat_log = os.path.join(context.output_directory, 'logcat.log') + self.device.dump_logcat(self.logcat_log) + context.add_iteration_artifact(name='logcat', + path='logcat.log', + kind='log', + description='Logact dump for the run.') + + def teardown(self, context): + self.device.execute('am force-stop {}'.format(self.package)) + if self.uninstall_apk: + self.device.uninstall(self.package) + + def validate(self): + if not self.apk_file: + raise WorkloadError('No APK file found for workload {}.'.format(self.name)) + + +AndroidBenchmark = ApkWorkload # backward compatibility + + +class ReventWorkload(Workload): + + default_setup_timeout = 5 * 60 # in seconds + default_run_timeout = 10 * 60 # in seconds + + def __init__(self, device, _call_super=True, **kwargs): + if _call_super: + super(ReventWorkload, self).__init__(device, **kwargs) + devpath = self.device.path + self.on_device_revent_binary = devpath.join(self.device.working_directory, 'revent') + self.on_device_setup_revent = devpath.join(self.device.working_directory, '{}.setup.revent'.format(self.device.name)) + self.on_device_run_revent = devpath.join(self.device.working_directory, '{}.run.revent'.format(self.device.name)) + self.setup_timeout = kwargs.get('setup_timeout', self.default_setup_timeout) + self.run_timeout = kwargs.get('run_timeout', self.default_run_timeout) + self.revent_setup_file = None + self.revent_run_file = None + + def init_resources(self, context): + self.revent_setup_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'setup')) + self.revent_run_file = context.resolver.get(wlauto.common.android.resources.ReventFile(self, 'run')) + + def setup(self, context): + self._check_revent_files(context) + self.device.killall('revent') + command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_setup_revent) + self.device.execute(command, timeout=self.setup_timeout) + + def run(self, context): + command = '{} replay {}'.format(self.on_device_revent_binary, self.on_device_run_revent) + self.logger.debug('Replaying {}'.format(os.path.basename(self.on_device_run_revent))) + self.device.execute(command, timeout=self.run_timeout) + self.logger.debug('Replay completed.') + + def update_result(self, context): + pass + + def teardown(self, context): + self.device.delete_file(self.on_device_setup_revent) + self.device.delete_file(self.on_device_run_revent) + + def _check_revent_files(self, context): + # check the revent binary + revent_binary = context.resolver.get(Executable(NO_ONE, self.device.abi, 'revent')) + if not os.path.isfile(revent_binary): + message = '{} does not exist. '.format(revent_binary) + message += 'Please build revent for your system and place it in that location' + raise WorkloadError(message) + if not self.revent_setup_file: + # pylint: disable=too-few-format-args + message = '{0}.setup.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name) + raise WorkloadError(message) + if not self.revent_run_file: + # pylint: disable=too-few-format-args + message = '{0}.run.revent file does not exist, Please provide one for your device, {0}'.format(self.device.name) + raise WorkloadError(message) + + self.on_device_revent_binary = self.device.install_executable(revent_binary) + self.device.push_file(self.revent_run_file, self.on_device_run_revent) + self.device.push_file(self.revent_setup_file, self.on_device_setup_revent) + + +class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark): + + def __init__(self, device, **kwargs): + UiAutomatorWorkload.__init__(self, device, **kwargs) + AndroidBenchmark.__init__(self, device, _call_super=False, **kwargs) + + def init_resources(self, context): + UiAutomatorWorkload.init_resources(self, context) + AndroidBenchmark.init_resources(self, context) + + def setup(self, context): + UiAutomatorWorkload.setup(self, context) + AndroidBenchmark.setup(self, context) + + def update_result(self, context): + UiAutomatorWorkload.update_result(self, context) + AndroidBenchmark.update_result(self, context) + + def teardown(self, context): + UiAutomatorWorkload.teardown(self, context) + AndroidBenchmark.teardown(self, context) + + +class GameWorkload(ApkWorkload, ReventWorkload): + """ + GameWorkload is the base class for all the workload that use revent files to + run. + + For more in depth details on how to record revent files, please see + :ref:`revent_files_creation`. To subclass this class, please refer to + :ref:`GameWorkload`. + + Additionally, this class defines the following attributes: + + :asset_file: A tarball containing additional assets for the workload. These are the assets + that are not part of the APK but would need to be downloaded by the workload + (usually, on first run of the app). Since the presence of a network connection + cannot be assumed on some devices, this provides an alternative means of obtaining + the assets. + :saved_state_file: A tarball containing the saved state for a workload. This tarball gets + deployed in the same way as the asset file. The only difference being that + it is usually much slower and re-deploying the tarball should alone be + enough to reset the workload to a known state (without having to reinstall + the app or re-deploy the other assets). + :loading_time: Time it takes for the workload to load after the initial activity has been + started. + + """ + + # May be optionally overwritten by subclasses + asset_file = None + saved_state_file = None + view = 'SurfaceView' + install_timeout = 500 + loading_time = 10 + + def __init__(self, device, **kwargs): # pylint: disable=W0613 + ApkWorkload.__init__(self, device, **kwargs) + ReventWorkload.__init__(self, device, _call_super=False, **kwargs) + self.logcat_process = None + self.module_dir = os.path.dirname(sys.modules[self.__module__].__file__) + self.revent_dir = os.path.join(self.module_dir, 'revent_files') + + def init_resources(self, context): + ApkWorkload.init_resources(self, context) + ReventWorkload.init_resources(self, context) + + def setup(self, context): + ApkWorkload.setup(self, context) + self.logger.debug('Waiting for the game to load...') + time.sleep(self.loading_time) + ReventWorkload.setup(self, context) + + def do_post_install(self, context): + ApkWorkload.do_post_install(self, context) + self._deploy_assets(context) + + def reset(self, context): + # If saved state exists, restore it; if not, do full + # uninstall/install cycle. + if self.saved_state_file: + self._deploy_resource_tarball(context, self.saved_state_file) + else: + ApkWorkload.reset(self, context) + self._deploy_assets(context) + + def run(self, context): + ReventWorkload.run(self, context) + + def teardown(self, context): + if not self.saved_state_file: + ApkWorkload.teardown(self, context) + else: + self.device.execute('am force-stop {}'.format(self.package)) + ReventWorkload.teardown(self, context) + + def _deploy_assets(self, context, timeout=300): + if self.asset_file: + self._deploy_resource_tarball(context, self.asset_file, timeout) + if self.saved_state_file: # must be deployed *after* asset tarball! + self._deploy_resource_tarball(context, self.saved_state_file, timeout) + + def _deploy_resource_tarball(self, context, resource_file, timeout=300): + kind = 'data' + if ':' in resource_file: + kind, resource_file = resource_file.split(':', 1) + ondevice_cache = self.device.path.join(self.device.resource_cache, self.name, resource_file) + if not self.device.file_exists(ondevice_cache): + asset_tarball = context.resolver.get(ExtensionAsset(self, resource_file)) + if not asset_tarball: + message = 'Could not find resource {} for workload {}.' + raise WorkloadError(message.format(resource_file, self.name)) + # adb push will create intermediate directories if they don't + # exist. + self.device.push_file(asset_tarball, ondevice_cache) + + device_asset_directory = self.device.path.join(self.device.external_storage_directory, 'Android', kind) + deploy_command = 'cd {} && {} tar -xzf {}'.format(device_asset_directory, + self.device.busybox, + ondevice_cache) + self.device.execute(deploy_command, timeout=timeout, as_root=True) diff --git a/wlauto/common/bin/arm64/busybox b/wlauto/common/bin/arm64/busybox new file mode 100755 index 00000000..6d09a079 Binary files /dev/null and b/wlauto/common/bin/arm64/busybox differ diff --git a/wlauto/common/bin/arm64/revent b/wlauto/common/bin/arm64/revent new file mode 100755 index 00000000..4d7ee72f Binary files /dev/null and b/wlauto/common/bin/arm64/revent differ diff --git a/wlauto/common/bin/armeabi/busybox b/wlauto/common/bin/armeabi/busybox new file mode 100755 index 00000000..1714d40a Binary files /dev/null and b/wlauto/common/bin/armeabi/busybox differ diff --git a/wlauto/common/bin/armeabi/revent b/wlauto/common/bin/armeabi/revent new file mode 100755 index 00000000..e0fa4d23 Binary files /dev/null and b/wlauto/common/bin/armeabi/revent differ diff --git a/wlauto/common/linux/__init__.py b/wlauto/common/linux/__init__.py new file mode 100644 index 00000000..16224d6f --- /dev/null +++ b/wlauto/common/linux/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + diff --git a/wlauto/common/linux/device.py b/wlauto/common/linux/device.py new file mode 100644 index 00000000..ecac286a --- /dev/null +++ b/wlauto/common/linux/device.py @@ -0,0 +1,966 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# pylint: disable=E1101 +import os +import re +from collections import namedtuple +from subprocess import CalledProcessError + +from wlauto.core.extension import Parameter +from wlauto.core.device import Device, RuntimeParameter, CoreParameter +from wlauto.core.resource import NO_ONE +from wlauto.exceptions import ConfigError, DeviceError, TimeoutError, DeviceNotRespondingError +from wlauto.common.resources import Executable +from wlauto.utils.cpuinfo import Cpuinfo +from wlauto.utils.misc import convert_new_lines, escape_double_quotes +from wlauto.utils.ssh import SshShell +from wlauto.utils.types import boolean, list_of_strings + + +# a dict of governor name and a list of it tunables that can't be read +WRITE_ONLY_TUNABLES = { + 'interactive': ['boostpulse'] +} + +FstabEntry = namedtuple('FstabEntry', ['device', 'mount_point', 'fs_type', 'options', 'dump_freq', 'pass_num']) +PsEntry = namedtuple('PsEntry', 'user pid ppid vsize rss wchan pc state name') + + +class BaseLinuxDevice(Device): # pylint: disable=abstract-method + + path_module = 'posixpath' + has_gpu = True + + parameters = [ + Parameter('scheduler', kind=str, default='unknown', + allowed_values=['unknown', 'smp', 'hmp', 'iks', 'ea', 'other'], + description=""" + Specifies the type of multi-core scheduling model utilized in the device. The value + must be one of the following: + + :unknown: A generic Device interface is used to interact with the underlying device + and the underlying scheduling model is unkown. + :smp: A standard single-core or Symmetric Multi-Processing system. + :hmp: ARM Heterogeneous Multi-Processing system. + :iks: Linaro In-Kernel Switcher. + :ea: ARM Energy-Aware scheduler. + :other: Any other system not covered by the above. + + .. note:: most currently-available systems would fall under ``smp`` rather than + this value. ``other`` is there to future-proof against new schemes + not yet covered by WA. + + """), + Parameter('iks_switch_frequency', kind=int, default=None, + description=""" + This is the switching frequency, in kilohertz, of IKS devices. This parameter *MUST NOT* + be set for non-IKS device (i.e. ``scheduler != 'iks'``). If left unset for IKS devices, + it will default to ``800000``, i.e. 800MHz. + """), + + ] + + runtime_parameters = [ + RuntimeParameter('sysfile_values', 'get_sysfile_values', 'set_sysfile_values', value_name='params'), + CoreParameter('${core}_cores', 'get_number_of_active_cores', 'set_number_of_active_cores', + value_name='number'), + CoreParameter('${core}_min_frequency', 'get_core_min_frequency', 'set_core_min_frequency', + value_name='freq'), + CoreParameter('${core}_max_frequency', 'get_core_max_frequency', 'set_core_max_frequency', + value_name='freq'), + CoreParameter('${core}_governor', 'get_core_governor', 'set_core_governor', + value_name='governor'), + CoreParameter('${core}_governor_tunables', 'get_core_governor_tunables', 'set_core_governor_tunables', + value_name='tunables'), + ] + + @property + def active_cpus(self): + val = self.get_sysfile_value('/sys/devices/system/cpu/online') + cpus = re.findall(r"([\d]\-[\d]|[\d])", val) + active_cpus = [] + for cpu in cpus: + if '-' in cpu: + lo, hi = cpu.split('-') + active_cpus.extend(range(int(lo), int(hi) + 1)) + else: + active_cpus.append(int(cpu)) + return active_cpus + + @property + def number_of_cores(self): + """ + Added in version 2.1.4. + + """ + if self._number_of_cores is None: + corere = re.compile('^\s*cpu\d+\s*$') + output = self.execute('ls /sys/devices/system/cpu') + self._number_of_cores = 0 + for entry in output.split(): + if corere.match(entry): + self._number_of_cores += 1 + return self._number_of_cores + + @property + def resource_cache(self): + return self.path.join(self.working_directory, '.cache') + + @property + def file_transfer_cache(self): + return self.path.join(self.working_directory, '.transfer') + + @property + def cpuinfo(self): + if not self._cpuinfo: + self._cpuinfo = Cpuinfo(self.execute('cat /proc/cpuinfo')) + return self._cpuinfo + + def __init__(self, **kwargs): + super(BaseLinuxDevice, self).__init__(**kwargs) + self.busybox = None + self._is_initialized = False + self._is_ready = False + self._just_rebooted = False + self._is_rooted = None + self._available_frequencies = {} + self._available_governors = {} + self._available_governor_tunables = {} + self._number_of_cores = None + self._written_sysfiles = [] + self._cpuinfo = None + + def validate(self): + if len(self.core_names) != len(self.core_clusters): + raise ConfigError('core_names and core_clusters are of different lengths.') + if self.iks_switch_frequency is not None and self.scheduler != 'iks': # pylint: disable=E0203 + raise ConfigError('iks_switch_frequency must NOT be set for non-IKS devices.') + if self.iks_switch_frequency is None and self.scheduler == 'iks': # pylint: disable=E0203 + self.iks_switch_frequency = 800000 # pylint: disable=W0201 + + def initialize(self, context, *args, **kwargs): + self.execute('mkdir -p {}'.format(self.working_directory)) + if self.is_rooted: + if not self.is_installed('busybox'): + self.busybox = self.deploy_busybox(context) + else: + self.busybox = 'busybox' + self.init(context, *args, **kwargs) + + def get_sysfile_value(self, sysfile, kind=None): + """ + Get the contents of the specified sysfile. + + :param sysfile: The file who's contents will be returned. + + :param kind: The type of value to be expected in the sysfile. This can + be any Python callable that takes a single str argument. + If not specified or is None, the contents will be returned + as a string. + + """ + output = self.execute('cat \'{}\''.format(sysfile), as_root=True).strip() # pylint: disable=E1103 + if kind: + return kind(output) + else: + return output + + def set_sysfile_value(self, sysfile, value, verify=True): + """ + Set the value of the specified sysfile. By default, the value will be checked afterwards. + Can be overridden by setting ``verify`` parameter to ``False``. + + """ + value = str(value) + self.execute('echo {} > \'{}\''.format(value, sysfile), check_exit_code=False, as_root=True) + if verify: + output = self.get_sysfile_value(sysfile) + if not output.strip() == value: # pylint: disable=E1103 + message = 'Could not set the value of {} to {}'.format(sysfile, value) + raise DeviceError(message) + self._written_sysfiles.append(sysfile) + + def get_sysfile_values(self): + """ + Returns a dict mapping paths of sysfiles that were previously set to their + current values. + + """ + values = {} + for sysfile in self._written_sysfiles: + values[sysfile] = self.get_sysfile_value(sysfile) + return values + + def set_sysfile_values(self, params): + """ + The plural version of ``set_sysfile_value``. Takes a single parameter which is a mapping of + file paths to values to be set. By default, every value written will be verified. The can + be disabled for individual paths by appending ``'!'`` to them. + + """ + for sysfile, value in params.iteritems(): + verify = not sysfile.endswith('!') + sysfile = sysfile.rstrip('!') + self.set_sysfile_value(sysfile, value, verify=verify) + + def deploy_busybox(self, context, force=False): + """ + Deploys the busybox Android binary (hence in android module) to the + specified device, and returns the path to the binary on the device. + + :param device: device to deploy the binary to. + :param context: an instance of ExecutionContext + :param force: by default, if the binary is already present on the + device, it will not be deployed again. Setting force + to ``True`` overrides that behavior and ensures that the + binary is always copied. Defaults to ``False``. + + :returns: The on-device path to the busybox binary. + + """ + on_device_executable = self.path.join(self.binaries_directory, 'busybox') + if not force and self.file_exists(on_device_executable): + return on_device_executable + host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'busybox')) + return self.install(host_file) + + def list_file_systems(self): + output = self.execute('mount') + fstab = [] + for line in output.split('\n'): + fstab.append(FstabEntry(*line.split())) + return fstab + + # Process query and control + + def get_pids_of(self, process_name): + """Returns a list of PIDs of all processes with the specified name.""" + result = self.execute('ps {}'.format(process_name[-15:]), check_exit_code=False).strip() + if result and 'not found' not in result: + return [int(x.split()[1]) for x in result.split('\n')[1:]] + else: + return [] + + def ps(self, **kwargs): + """ + Returns the list of running processes on the device. Keyword arguments may + be used to specify simple filters for columns. + + Added in version 2.1.4 + + """ + lines = iter(convert_new_lines(self.execute('ps')).split('\n')) + lines.next() # header + result = [] + for line in lines: + parts = line.split() + if parts: + result.append(PsEntry(*(parts[0:1] + map(int, parts[1:5]) + parts[5:]))) + if not kwargs: + return result + else: + filtered_result = [] + for entry in result: + if all(getattr(entry, k) == v for k, v in kwargs.iteritems()): + filtered_result.append(entry) + return filtered_result + + def kill(self, pid, signal=None, as_root=False): # pylint: disable=W0221 + """ + Kill the specified process. + + :param pid: PID of the process to kill. + :param signal: Specify which singal to send to the process. This must + be a valid value for -s option of kill. Defaults to ``None``. + + Modified in version 2.1.4: added ``signal`` parameter. + + """ + signal_string = '-s {}'.format(signal) if signal else '' + self.execute('kill {} {}'.format(signal_string, pid), as_root=as_root) + + def killall(self, process_name, signal=None, as_root=False): # pylint: disable=W0221 + """ + Kill all processes with the specified name. + + :param process_name: The name of the process(es) to kill. + :param signal: Specify which singal to send to the process. This must + be a valid value for -s option of kill. Defaults to ``None``. + + Modified in version 2.1.5: added ``as_root`` parameter. + + """ + for pid in self.get_pids_of(process_name): + self.kill(pid, signal=signal, as_root=as_root) + + # cpufreq + + def list_available_cpu_governors(self, cpu): + """Returns a list of governors supported by the cpu.""" + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + if cpu not in self._available_governors: + cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_governors'.format(cpu) + output = self.execute(cmd, check_exit_code=True) + self._available_governors[cpu] = output.strip().split() # pylint: disable=E1103 + return self._available_governors[cpu] + + def get_cpu_governor(self, cpu): + """Returns the governor currently set for the specified CPU.""" + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu) + return self.get_sysfile_value(sysfile) + + def set_cpu_governor(self, cpu, governor, **kwargs): + """ + Set the governor for the specified CPU. + See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt + + :param cpu: The CPU for which the governor is to be set. This must be + the full name as it appears in sysfs, e.g. "cpu0". + :param governor: The name of the governor to be used. This must be + supported by the specific device. + + Additional keyword arguments can be used to specify governor tunables for + governors that support them. + + :note: On big.LITTLE all cores in a cluster must be using the same governor. + Setting the governor on any core in a cluster will also set it on all + other cores in that cluster. + + :raises: ConfigError if governor is not supported by the CPU. + :raises: DeviceError if, for some reason, the governor could not be set. + + """ + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + supported = self.list_available_cpu_governors(cpu) + if governor not in supported: + raise ConfigError('Governor {} not supported for cpu {}'.format(governor, cpu)) + sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu) + self.set_sysfile_value(sysfile, governor) + self.set_cpu_governor_tunables(cpu, governor, **kwargs) + + def list_available_cpu_governor_tunables(self, cpu): + """Returns a list of tunables available for the governor on the specified CPU.""" + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + governor = self.get_cpu_governor(cpu) + if governor not in self._available_governor_tunables: + try: + tunables_path = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(cpu, governor) + self._available_governor_tunables[governor] = self.listdir(tunables_path) + except DeviceError: # probably an older kernel + try: + tunables_path = '/sys/devices/system/cpu/cpufreq/{}'.format(governor) + self._available_governor_tunables[governor] = self.listdir(tunables_path) + except DeviceError: # governor does not support tunables + self._available_governor_tunables[governor] = [] + return self._available_governor_tunables[governor] + + def get_cpu_governor_tunables(self, cpu): + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + governor = self.get_cpu_governor(cpu) + tunables = {} + for tunable in self.list_available_cpu_governor_tunables(cpu): + if tunable not in WRITE_ONLY_TUNABLES.get(governor, []): + try: + path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable) + tunables[tunable] = self.get_sysfile_value(path) + except DeviceError: # May be an older kernel + path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable) + tunables[tunable] = self.get_sysfile_value(path) + return tunables + + def set_cpu_governor_tunables(self, cpu, governor, **kwargs): + """ + Set tunables for the specified governor. Tunables should be specified as + keyword arguments. Which tunables and values are valid depends on the + governor. + + :param cpu: The cpu for which the governor will be set. This must be the + full cpu name as it appears in sysfs, e.g. ``cpu0``. + :param governor: The name of the governor. Must be all lower case. + + The rest should be keyword parameters mapping tunable name onto the value to + be set for it. + + :raises: ConfigError if governor specified is not a valid governor name, or if + a tunable specified is not valid for the governor. + :raises: DeviceError if could not set tunable. + + + """ + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + valid_tunables = self.list_available_cpu_governor_tunables(cpu) + for tunable, value in kwargs.iteritems(): + if tunable in valid_tunables: + try: + path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable) + self.set_sysfile_value(path, value) + except DeviceError: # May be an older kernel + path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable) + self.set_sysfile_value(path, value) + else: + message = 'Unexpected tunable {} for governor {} on {}.\n'.format(tunable, governor, cpu) + message += 'Available tunables are: {}'.format(valid_tunables) + raise ConfigError(message) + + def enable_cpu(self, cpu): + """ + Enable the specified core. + + :param cpu: CPU core to enable. This must be the full name as it + appears in sysfs, e.g. "cpu0". + + """ + self.hotplug_cpu(cpu, online=True) + + def disable_cpu(self, cpu): + """ + Disable the specified core. + + :param cpu: CPU core to disable. This must be the full name as it + appears in sysfs, e.g. "cpu0". + """ + self.hotplug_cpu(cpu, online=False) + + def hotplug_cpu(self, cpu, online): + """ + Hotplug the specified CPU either on or off. + See https://www.kernel.org/doc/Documentation/cpu-hotplug.txt + + :param cpu: The CPU for which the governor is to be set. This must be + the full name as it appears in sysfs, e.g. "cpu0". + :param online: CPU will be enabled if this value bool()'s to True, and + will be disabled otherwise. + + """ + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + status = 1 if online else 0 + sysfile = '/sys/devices/system/cpu/{}/online'.format(cpu) + self.set_sysfile_value(sysfile, status) + + def list_available_cpu_frequencies(self, cpu): + """Returns a list of frequencies supported by the cpu or an empty list + if not could be found.""" + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + if cpu not in self._available_frequencies: + try: + cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu) + output = self.execute(cmd) + self._available_frequencies[cpu] = map(int, output.strip().split()) # pylint: disable=E1103 + except DeviceError: + # we return an empty list because on some devices scaling_available_frequencies + # is not generated. So we are returing an empty list as an indication + # http://adrynalyne-teachtofish.blogspot.co.uk/2011/11/how-to-enable-scalingavailablefrequenci.html + self._available_frequencies[cpu] = [] + return self._available_frequencies[cpu] + + def get_cpu_min_frequency(self, cpu): + """ + Returns the min frequency currently set for the specified CPU. + + Warning, this method does not check if the cpu is online or not. It will + try to read the minimum frequency and the following exception will be + raised :: + + :raises: DeviceError if for some reason the frequency could not be read. + + """ + sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu) + return self.get_sysfile_value(sysfile) + + def set_cpu_min_frequency(self, cpu, frequency): + """ + Set's the minimum value for CPU frequency. Actual frequency will + depend on the Governor used and may vary during execution. The value should be + either an int or a string representing an integer. The Value must also be + supported by the device. The available frequencies can be obtained by calling + get_available_frequencies() or examining + + /sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies + + on the device. + + :raises: ConfigError if the frequency is not supported by the CPU. + :raises: DeviceError if, for some reason, frequency could not be set. + + """ + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + available_frequencies = self.list_available_cpu_frequencies(cpu) + try: + value = int(frequency) + if available_frequencies and value not in available_frequencies: + raise ConfigError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu, + value, + available_frequencies)) + sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu) + self.set_sysfile_value(sysfile, value) + except ValueError: + raise ValueError('value must be an integer; got: "{}"'.format(value)) + + def get_cpu_max_frequency(self, cpu): + """ + Returns the max frequency currently set for the specified CPU. + + Warning, this method does not check if the cpu is online or not. It will + try to read the maximum frequency and the following exception will be + raised :: + + :raises: DeviceError if for some reason the frequency could not be read. + """ + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu) + return self.get_sysfile_value(sysfile) + + def set_cpu_max_frequency(self, cpu, frequency): + """ + Set's the minimum value for CPU frequency. Actual frequency will + depend on the Governor used and may vary during execution. The value should be + either an int or a string representing an integer. The Value must also be + supported by the device. The available frequencies can be obtained by calling + get_available_frequencies() or examining + + /sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies + + on the device. + + :raises: ConfigError if the frequency is not supported by the CPU. + :raises: DeviceError if, for some reason, frequency could not be set. + + """ + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + available_frequencies = self.list_available_cpu_frequencies(cpu) + try: + value = int(frequency) + if available_frequencies and value not in available_frequencies: + raise DeviceError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu, + value, + available_frequencies)) + sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu) + self.set_sysfile_value(sysfile, value) + except ValueError: + raise ValueError('value must be an integer; got: "{}"'.format(value)) + + def get_cpuidle_states(self, cpu=0): + """ + Return map of cpuidle states with their descriptive names. + """ + if isinstance(cpu, int): + cpu = 'cpu{}'.format(cpu) + cpuidle_states = {} + statere = re.compile('^\s*state\d+\s*$') + output = self.execute("ls /sys/devices/system/cpu/{}/cpuidle".format(cpu)) + for entry in output.split(): + if statere.match(entry): + cpuidle_states[entry] = self.get_sysfile_value("/sys/devices/system/cpu/{}/cpuidle/{}/desc".format(cpu, entry)) + return cpuidle_states + + # Core- and cluster-level mapping for the above cpu-level APIs above. The + # APIs make the following assumptions, which were True for all devices that + # existed at the time of writing: + # 1. A cluster can only contain cores of one type. + # 2. All cores in a cluster are tied to the same DVFS domain, therefore + # changes to cpufreq for a core will affect all other cores on the + # same cluster. + + def get_core_clusters(self, core, strict=True): + """Returns the list of clusters that contain the specified core. if ``strict`` + is ``True``, raises ValueError if no clusters has been found (returns empty list + if ``strict`` is ``False``).""" + core_indexes = [i for i, c in enumerate(self.core_names) if c == core] + clusters = sorted(list(set(self.core_clusters[i] for i in core_indexes))) + if strict and not clusters: + raise ValueError('No active clusters for core {}'.format(core)) + return clusters + + def get_cluster_cpu(self, cluster): + """Returns the first *active* cpu for the cluster. If the entire cluster + has been hotplugged, this will raise a ``ValueError``.""" + cpu_indexes = set([i for i, c in enumerate(self.core_clusters) if c == cluster]) + active_cpus = sorted(list(cpu_indexes.intersection(self.active_cpus))) + if not active_cpus: + raise ValueError('All cpus for cluster {} are offline'.format(cluster)) + return active_cpus[0] + + def list_available_cluster_governors(self, cluster): + return self.list_available_cpu_governors(self.get_cluster_cpu(cluster)) + + def get_cluster_governor(self, cluster): + return self.get_cpu_governor(self.get_cluster_cpu(cluster)) + + def set_cluster_governor(self, cluster, governor, **tunables): + return self.set_cpu_governor(self.get_cluster_cpu(cluster), governor, **tunables) + + def list_available_cluster_governor_tunables(self, cluster): + return self.list_available_cpu_governor_tunables(self.get_cluster_cpu(cluster)) + + def get_cluster_governor_tunables(self, cluster): + return self.get_cpu_governor_tunables(self.get_cluster_cpu(cluster)) + + def set_cluster_governor_tunables(self, cluster, governor, **tunables): + return self.set_cpu_governor_tunables(self.get_cluster_cpu(cluster), governor, **tunables) + + def get_cluster_min_frequency(self, cluster): + return self.get_cpu_min_frequency(self.get_cluster_cpu(cluster)) + + def set_cluster_min_frequency(self, cluster, freq): + return self.set_cpu_min_frequency(self.get_cluster_cpu(cluster), freq) + + def get_cluster_max_frequency(self, cluster): + return self.get_cpu_max_frequency(self.get_cluster_cpu(cluster)) + + def set_cluster_max_frequency(self, cluster, freq): + return self.set_cpu_max_frequency(self.get_cluster_cpu(cluster), freq) + + def get_core_cpu(self, core): + for cluster in self.get_core_clusters(core): + try: + return self.get_cluster_cpu(cluster) + except ValueError: + pass + raise ValueError('No active CPUs found for core {}'.format(core)) + + def list_available_core_governors(self, core): + return self.list_available_cpu_governors(self.get_core_cpu(core)) + + def get_core_governor(self, core): + return self.get_cpu_governor(self.get_core_cpu(core)) + + def set_core_governor(self, core, governor, **tunables): + for cluster in self.get_core_clusters(core): + self.set_cluster_governor(cluster, governor, **tunables) + + def list_available_core_governor_tunables(self, core): + return self.list_available_cpu_governor_tunables(self.get_core_cpu(core)) + + def get_core_governor_tunables(self, core): + return self.get_cpu_governor_tunables(self.get_core_cpu(core)) + + def set_core_governor_tunables(self, core, tunables): + for cluster in self.get_core_clusters(core): + governor = self.get_cluster_governor(cluster) + self.set_cluster_governor_tunables(cluster, governor, **tunables) + + def get_core_min_frequency(self, core): + return self.get_cpu_min_frequency(self.get_core_cpu(core)) + + def set_core_min_frequency(self, core, freq): + for cluster in self.get_core_clusters(core): + self.set_cluster_min_frequency(cluster, freq) + + def get_core_max_frequency(self, core): + return self.get_cpu_max_frequency(self.get_core_cpu(core)) + + def set_core_max_frequency(self, core, freq): + for cluster in self.get_core_clusters(core): + self.set_cluster_max_frequency(cluster, freq) + + def get_number_of_active_cores(self, core): + if core not in self.core_names: + raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names)))) + active_cpus = self.active_cpus + num_active_cores = 0 + for i, c in enumerate(self.core_names): + if c == core and i in active_cpus: + num_active_cores += 1 + return num_active_cores + + def set_number_of_active_cores(self, core, number): + if core not in self.core_names: + raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names)))) + core_ids = [i for i, c in enumerate(self.core_names) if c == core] + max_cores = len(core_ids) + if number > max_cores: + message = 'Attempting to set the number of active {} to {}; maximum is {}' + raise ValueError(message.format(core, number, max_cores)) + for i in xrange(0, number): + self.enable_cpu(core_ids[i]) + for i in xrange(number, max_cores): + self.disable_cpu(core_ids[i]) + + # internal methods + + def _check_ready(self): + if not self._is_ready: + raise AttributeError('Device not ready.') + + def _get_core_cluster(self, core): + """Returns the first cluster that has cores of the specified type. Raises + value error if no cluster for the specified type has been found""" + core_indexes = [i for i, c in enumerate(self.core_names) if c == core] + core_clusters = set(self.core_clusters[i] for i in core_indexes) + if not core_clusters: + raise ValueError('No cluster found for core {}'.format(core)) + return sorted(list(core_clusters))[0] + + +class LinuxDevice(BaseLinuxDevice): + + platform = 'linux' + + default_timeout = 30 + delay = 2 + long_delay = 3 * delay + ready_timeout = 60 + + parameters = [ + Parameter('host', mandatory=True, description='Host name or IP address for the device.'), + Parameter('username', mandatory=True, description='User name for the account on the device.'), + Parameter('password', description='Password for the account on the device (for password-based auth).'), + Parameter('keyfile', description='Keyfile to be used for key-based authentication.'), + Parameter('port', kind=int, description='SSH port number on the device.'), + + Parameter('use_telnet', kind=boolean, default=False, + description='Optionally, telnet may be used instead of ssh, though this is discouraged.'), + + Parameter('working_directory', default=None, + description=''' + Working directory to be used by WA. This must be in a location where the specified user + has write permissions. This will default to /home//wa (or to /root/wa, if + username is 'root'). + '''), + Parameter('binaries_directory', default='/usr/local/bin', + description='Location of executable binaries on this device (must be in PATH).'), + Parameter('property_files', kind=list_of_strings, + default=['/proc/version', '/etc/debian_version', '/etc/lsb-release', '/etc/arch-release'], + description=''' + A list of paths to files containing static OS properties. These will be pulled into the + __meta directory in output for each run in order to provide information about the platfrom. + These paths do not have to exist and will be ignored if the path is not present on a + particular device. + '''), + ] + + @property + def is_rooted(self): + if self._is_rooted is None: + try: + self.execute('ls /', as_root=True) + self._is_rooted = True + except DeviceError: + self._is_rooted = False + return self._is_rooted + + def __init__(self, *args, **kwargs): + super(LinuxDevice, self).__init__(*args, **kwargs) + self.shell = None + self.local_binaries_directory = None + self._is_rooted = None + + def validate(self): + if not self.password and not self.keyfile: + raise ConfigError('Either a password or a keyfile must be provided.') + if self.working_directory is None: # pylint: disable=access-member-before-definition + if self.username == 'root': + self.working_directory = '/root/wa' # pylint: disable=attribute-defined-outside-init + else: + self.working_directory = '/home/{}/wa'.format(self.username) # pylint: disable=attribute-defined-outside-init + self.local_binaries_directory = self.path.join(self.working_directory, 'bin') + + def initialize(self, context, *args, **kwargs): + self.execute('mkdir -p {}'.format(self.local_binaries_directory)) + self.execute('export PATH={}:$PATH'.format(self.local_binaries_directory)) + super(LinuxDevice, self).initialize(context, *args, **kwargs) + + # Power control + + def reset(self): + self._is_ready = False + self.execute('reboot', as_root=True) + + def hard_reset(self): + super(LinuxDevice, self).hard_reset() + self._is_ready = False + + def boot(self, **kwargs): + self.reset() + + def connect(self): # NOQA pylint: disable=R0912 + self.shell = SshShell(timeout=self.default_timeout) + self.shell.login(self.host, self.username, self.password, self.keyfile, self.port, telnet=self.use_telnet) + self._is_ready = True + + def disconnect(self): # NOQA pylint: disable=R0912 + self.shell.logout() + self._is_ready = False + + # Execution + + def has_root(self): + try: + self.execute('ls /', as_root=True) + return True + except DeviceError as e: + if 'not in the sudoers file' not in e.message: + raise e + return False + + def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False, + as_root=False, strip_colors=True, **kwargs): + """ + Execute the specified command on the device using adb. + + Parameters: + + :param command: The command to be executed. It should appear exactly + as if you were typing it into a shell. + :param timeout: Time, in seconds, to wait for adb to return before aborting + and raising an error. Defaults to ``AndroidDevice.default_timeout``. + :param check_exit_code: If ``True``, the return code of the command on the Device will + be check and exception will be raised if it is not 0. + Defaults to ``True``. + :param background: If ``True``, will execute create a new ssh shell rather than using + the default session and will return it immediately. If this is ``True``, + ``timeout``, ``strip_colors`` and (obvisously) ``check_exit_code`` will + be ignored; also, with this, ``as_root=True`` is only valid if ``username`` + for the device was set to ``root``. + :param as_root: If ``True``, will attempt to execute command in privileged mode. The device + must be rooted, otherwise an error will be raised. Defaults to ``False``. + + Added in version 2.1.3 + + :returns: If ``background`` parameter is set to ``True``, the subprocess object will + be returned; otherwise, the contents of STDOUT from the device will be returned. + + """ + self._check_ready() + if background: + if as_root and self.username != 'root': + raise DeviceError('Cannot execute in background with as_root=True unless user is root.') + return self.shell.background(command) + else: + return self.shell.execute(command, timeout, check_exit_code, as_root, strip_colors) + + def kick_off(self, command): + """ + Like execute but closes adb session and returns immediately, leaving the command running on the + device (this is different from execute(background=True) which keeps adb connection open and returns + a subprocess object). + + """ + self._check_ready() + command = 'sh -c "{}" 1>/dev/null 2>/dev/null &'.format(escape_double_quotes(command)) + return self.shell.execute(command) + + # File management + + def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221 + self._check_ready() + if not as_root or self.username == 'root': + self.shell.push_file(source, dest, timeout=timeout) + else: + tempfile = self.path.join(self.working_directory, self.path.basename(dest)) + self.shell.push_file(source, tempfile, timeout=timeout) + self.shell.execute('cp -r {} {}'.format(tempfile, dest), timeout=timeout, as_root=True) + + def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221 + self._check_ready() + if not as_root or self.username == 'root': + self.shell.pull_file(source, dest, timeout=timeout) + else: + tempfile = self.path.join(self.working_directory, self.path.basename(source)) + self.shell.execute('cp -r {} {}'.format(source, tempfile), timeout=timeout, as_root=True) + self.shell.execute('chown -R {} {}'.format(self.username, tempfile), timeout=timeout, as_root=True) + self.shell.pull_file(tempfile, dest, timeout=timeout) + + def delete_file(self, filepath, as_root=False): # pylint: disable=W0221 + self.execute('rm -rf {}'.format(filepath), as_root=as_root) + + def file_exists(self, filepath): + output = self.execute('if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath)) + return boolean(output.strip()) # pylint: disable=maybe-no-member + + def listdir(self, path, as_root=False, **kwargs): + contents = self.execute('ls -1 {}'.format(path), as_root=as_root) + return [x.strip() for x in contents.split('\n')] # pylint: disable=maybe-no-member + + def install(self, filepath, timeout=default_timeout, with_name=None): # pylint: disable=W0221 + if self.is_rooted: + destpath = self.path.join(self.binaries_directory, + with_name and with_name or self.path.basename(filepath)) + self.push_file(filepath, destpath, as_root=True) + self.execute('chmod a+x {}'.format(destpath), timeout=timeout, as_root=True) + else: + destpath = self.path.join(self.local_binaries_directory, + with_name and with_name or self.path.basename(filepath)) + self.push_file(filepath, destpath) + self.execute('chmod a+x {}'.format(destpath), timeout=timeout) + return destpath + + install_executable = install # compatibility + + def uninstall(self, name): + path = self.path.join(self.local_binaries_directory, name) + self.delete_file(path) + + uninstall_executable = uninstall # compatibility + + def is_installed(self, name): + try: + self.execute('which {}'.format(name)) + return True + except DeviceError: + return False + + # misc + + def ping(self): + try: + # May be triggered inside initialize() + self.shell.execute('ls /', timeout=5) + except (TimeoutError, CalledProcessError): + raise DeviceNotRespondingError(self.host) + + def capture_screen(self, filepath): + if not self.is_installed('scrot'): + self.logger.debug('Could not take screenshot as scrot is not installed.') + return + try: + tempfile = self.path.join(self.working_directory, os.path.basename(filepath)) + self.execute('DISPLAY=:0.0 scrot {}'.format(tempfile)) + self.pull_file(tempfile, filepath) + self.delete_file(tempfile) + except DeviceError as e: + if "Can't open X dispay." not in e.message: + raise e + message = e.message.split('OUTPUT:', 1)[1].strip() + self.logger.debug('Could not take screenshot: {}'.format(message)) + + def is_screen_on(self): + pass # TODO + + def ensure_screen_is_on(self): + pass # TODO + + def get_properties(self, context): + for propfile in self.property_files: + if not self.file_exists(propfile): + continue + normname = propfile.lstrip(self.path.sep).replace(self.path.sep, '.') + outfile = os.path.join(context.host_working_directory, normname) + self.pull_file(propfile, outfile) + return {} + diff --git a/wlauto/common/resources.py b/wlauto/common/resources.py new file mode 100644 index 00000000..bd841428 --- /dev/null +++ b/wlauto/common/resources.py @@ -0,0 +1,64 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import os + +from wlauto.core.resource import Resource + + +class FileResource(Resource): + """ + Base class for all resources that are a regular file in the + file system. + + """ + + def delete(self, instance): + os.remove(instance) + + +class File(FileResource): + + name = 'file' + + def __init__(self, owner, path, url=None): + super(File, self).__init__(owner) + self.path = path + self.url = url + + def __str__(self): + return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url) + + +class ExtensionAsset(File): + + name = 'extension_asset' + + def __init__(self, owner, path): + super(ExtensionAsset, self).__init__(owner, os.path.join(owner.name, path)) + + +class Executable(FileResource): + + name = 'executable' + + def __init__(self, owner, platform, filename): + super(Executable, self).__init__(owner) + self.platform = platform + self.filename = filename + + def __str__(self): + return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename) diff --git a/wlauto/config_example.py b/wlauto/config_example.py new file mode 100644 index 00000000..66eed1d8 --- /dev/null +++ b/wlauto/config_example.py @@ -0,0 +1,284 @@ +""" +Default config for Workload Automation. DO NOT MODIFY this file. This file +gets copied to ~/.workload_automation/config.py on initial run of run_workloads. +Add your configuration to that file instead. + +""" +# *** WARNING: *** +# Configuration listed in this file is NOT COMPLETE. This file sets the default +# configuration for WA and gives EXAMPLES of other configuration available. It +# is not supposed to be an exhaustive list. +# PLEASE REFER TO WA DOCUMENTATION FOR THE COMPLETE LIST OF AVAILABLE +# EXTENSIONS AND THEIR CONFIGURATION. + + +# This defines when the device will be rebooted during Workload Automation execution. # +# # +# Valid policies are: # +# never: The device will never be rebooted. # +# as_needed: The device will only be rebooted if the need arises (e.g. if it # +# becomes unresponsive # +# initial: The device will be rebooted when the execution first starts, just before executing # +# the first workload spec. # +# each_spec: The device will be rebooted before running a new workload spec. # +# each_iteration: The device will be rebooted before each new iteration. # +# # +reboot_policy = 'as_needed' + +# Defines the order in which the agenda spec will be executed. At the moment, # +# the following execution orders are supported: # +# # +# by_iteration: The first iteration of each workload spec is executed one ofter the other, # +# so all workloads are executed before proceeding on to the second iteration. # +# This is the default if no order is explicitly specified. # +# If multiple sections were specified, this will also split them up, so that specs # +# in the same section are further apart in the execution order. # +# by_section: Same as "by_iteration", but runn specs from the same section one after the other # +# by_spec: All iterations of the first spec are executed before moving on to the next # +# spec. This may also be specified as ``"classic"``, as this was the way # +# workloads were executed in earlier versions of WA. # +# random: Randomisizes the order in which specs run. # +execution_order = 'by_iteration' + +#################################################################################################### +######################################### Device Settings ########################################## +#################################################################################################### +# Specify the device you want to run workload automation on. This must be a # +# string with the ID of the device. At the moment, only 'TC2' is supported. # +# # +device = 'generic_android' + +# Configuration options that will be passed onto the device. These are obviously device-specific, # +# so check the documentation for the particular device to find out which options and values are # +# valid. The settings listed below are common to all devices # +# # +device_config = dict( + # The name used by adb to identify the device. Use "adb devices" in bash to list + # the devices currently seen by adb. + #adb_name='10.109.173.2:5555', + + # The directory on the device that WA will use to push files to + #working_directory='/sdcard/wa-working', + + # This specifies the device's CPU cores. The order must match how they + # appear in cpufreq. The example below is for TC2. + # core_names = ['a7', 'a7', 'a7', 'a15', 'a15'] + + # Specifies cluster mapping for the device's cores. + # core_clusters = [0, 0, 0, 1, 1] +) + + +#################################################################################################### +################################### Instrumention Configuration #################################### +#################################################################################################### +# This defines the additionnal instrumentation that will be enabled during workload execution, # +# which in turn determines what additional data (such as /proc/interrupts content or Streamline # +# traces) will be available in the results directory. # +# # +instrumentation = [ + # Records the time it took to run the workload + 'execution_time', + + # Collects /proc/interrupts before and after execution and does a diff. + 'interrupts', + + # Collects the contents of/sys/devices/system/cpu before and after execution and does a diff. + 'cpufreq', + + # Gets energy usage from the workload form HWMON devices + # NOTE: the hardware needs to have the right sensors in order for this to work + #'hwmon', + + # Run perf in the background during workload execution and then collect the results. perf is a + # standard Linux performance analysis tool. + #'perf', + + # Collect Streamline traces during workload execution. Streamline is part of DS-5 + #'streamline', + + # Collects traces by interacting with Ftrace Linux kernel internal tracer + #'trace-cmd', + + # Obtains the power consumption of the target device's core measured by National Instruments + # Data Acquisition(DAQ) device. + #'daq', + + # Collects CCI counter data. + #'cci_pmu_logger', + + # Collects FPS (Frames Per Second) and related metrics (such as jank) from + # the View of the workload (Note: only a single View per workload is + # supported at the moment, so this is mainly useful for games). + #'fps', +] + + +#################################################################################################### +################################# Result Processors Configuration ################################## +#################################################################################################### +# Specifies how results will be processed and presented. # +# # +result_processors = [ + # Creates a results.txt file for each iteration that lists all collected metrics + # in "name = value (units)" format + 'standard', + + # Creates a results.csv that contains metrics for all iterations of all workloads + # in the .csv format. + 'csv', + + # Creates a summary.csv that contains summary metrics for all iterations of all + # all in the .csv format. Summary metrics are defined on per-worklod basis + # are typically things like overall scores. The contents of summary.csv are + # always a subset of the contents of results.csv (if it is generated). + 'summary_csv', + + # Creates a results.csv that contains metrics for all iterations of all workloads + # in the JSON format + #'json', + + # Write results to an sqlite3 database. By default, a new database will be + # generated for each run, however it is possible to specify a path to an + # existing DB file (see result processor configuration below), in which + # case results from multiple runs may be stored in the one file. + #'sqlite', +] + + +#################################################################################################### +################################### Logging output Configuration ################################### +#################################################################################################### +# Specify the format of logging messages. The format uses the old formatting syntax: # +# # +# http://docs.python.org/2/library/stdtypes.html#string-formatting-operations # +# # +# The attributes that can be used in formats are listested here: # +# # +# http://docs.python.org/2/library/logging.html#logrecord-attributes # +# # +logging = { + # Log file format + 'file format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s', + # Verbose console output format + 'verbose format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s', + # Regular console output format + 'regular format': '%(levelname)-8s %(message)s', + # Colouring the console output + 'colour_enabled': True, +} + + +#################################################################################################### +#################################### Instruments Configuration ##################################### +#################################################################################################### +# Instrumention Configuration is related to specific insturment's settings. Some of the # +# instrumentations require specific settings in order for them to work. These settings are # +# specified here. # +# Note that these settings only take effect if the corresponding instrument is +# enabled above. + +#################################################################################################### +######################################## perf configuration ######################################## + +# The hardware events such as instructions executed, cache-misses suffered, or branches +# mispredicted to be reported by perf. Events can be obtained from the device by tpying +# 'perf list'. +#perf_events = ['migrations', 'cs'] + +# The perf options which can be obtained from man page for perf-record +#perf_options = '-a -i' + +#################################################################################################### +####################################### hwmon configuration ######################################## + +# The kinds of sensors hwmon instrument will look for +#hwmon_sensors = ['energy', 'temp'] + +#################################################################################################### +##################################### streamline configuration ##################################### + +# The port number on which gatord will listen +#port = 8080 + +# Enabling/disabling the run of 'streamline -analyze' on the captured data. +#streamline_analyze = True + +# Enabling/disabling the generation of a CSV report +#streamline_report_csv = True + +#################################################################################################### +###################################### trace-cmd configuration ##################################### + +# trace-cmd events to be traced. The events can be found by rooting on the device then type +# 'trace-cmd list -e' +#trace_events = ['power*'] + +#################################################################################################### +######################################### DAQ configuration ######################################## + +# The host address of the machine that runs the daq Server which the insturment communicates with +#daq_server_host = '10.1.17.56' + +# The port number for daq Server in which daq insturment communicates with +#daq_server_port = 56788 + +# The values of resistors 1 and 2 (in Ohms) across which the voltages are measured +#daq_resistor_values = [0.002, 0.002] + +#################################################################################################### +################################### cci_pmu_logger configuration ################################### + +# The events to be counted by PMU +# NOTE: The number of events must not exceed the number of counters available (which is 4 for CCI-400) +#cci_pmu_events = ['0x63', '0x83'] + +# The name of the events which will be used when reporting PMU counts +#cci_pmu_event_labels = ['event_0x63', 'event_0x83'] + +# The period (in jiffies) between counter reads +#cci_pmu_period = 15 + +#################################################################################################### +################################### fps configuration ############################################## + +# Data points below this FPS will dropped as not constituting "real" gameplay. The assumption +# being that while actually running, the FPS in the game will not drop below X frames per second, +# except on loading screens, menus, etc, which should not contribute to FPS calculation. +#fps_drop_threshold=5 + +# If set to True, this will keep the raw dumpsys output in the results directory (this is maily +# used for debugging). Note: frames.csv with collected frames data will always be generated +# regardless of this setting. +#fps_keep_raw=False + +#################################################################################################### +################################# Result Processor Configuration ################################### +#################################################################################################### + +# Specifies an alternative database to store results in. If the file does not +# exist, it will be created (the directiory of the file must exist however). If +# the file does exist, the results will be added to the existing data set (each +# run as a UUID, so results won't clash even if identical agendas were used). +# Note that in order for this to work, the version of the schema used to generate +# the DB file must match that of the schema used for the current run. Please +# see "What's new" secition in WA docs to check if the schema has changed in +# recent releases of WA. +#sqlite_database = '/work/results/myresults.sqlite' + +# If the file specified by sqlite_database exists, setting this to True will +# cause that file to be overwritten rather than updated -- existing results in +# the file will be lost. +#sqlite_overwrite = False + +# distribution: internal + +#################################################################################################### +#################################### Resource Getter configuration ################################# +#################################################################################################### + +# The location on your system where /arm/scratch is mounted. Used by +# Scratch resource getter. +#scratch_mount_point = '/arm/scratch' + +# end distribution diff --git a/wlauto/core/__init__.py b/wlauto/core/__init__.py new file mode 100644 index 00000000..cd5d64d6 --- /dev/null +++ b/wlauto/core/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + diff --git a/wlauto/core/agenda.py b/wlauto/core/agenda.py new file mode 100644 index 00000000..ad820c8f --- /dev/null +++ b/wlauto/core/agenda.py @@ -0,0 +1,244 @@ +# Copyright 2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +from copy import copy +from collections import OrderedDict, defaultdict + +from wlauto.exceptions import ConfigError +from wlauto.utils.misc import load_struct_from_yaml, LoadSyntaxError +from wlauto.utils.types import counter, reset_counter + +import yaml + + +def get_aliased_param(d, aliases, default=None, pop=True): + alias_map = [i for i, a in enumerate(aliases) if a in d] + if len(alias_map) > 1: + message = 'Only one of {} may be specified in a single entry' + raise ConfigError(message.format(aliases)) + elif alias_map: + if pop: + return d.pop(aliases[alias_map[0]]) + else: + return d[aliases[alias_map[0]]] + else: + return default + + +class AgendaEntry(object): + + def to_dict(self): + return copy(self.__dict__) + + +class AgendaWorkloadEntry(AgendaEntry): + """ + Specifies execution of a workload, including things like the number of + iterations, device runtime_parameters configuration, etc. + + """ + + def __init__(self, **kwargs): + super(AgendaWorkloadEntry, self).__init__() + self.id = kwargs.pop('id') + self.workload_name = get_aliased_param(kwargs, ['workload_name', 'name']) + if not self.workload_name: + raise ConfigError('No workload name specified in entry {}'.format(self.id)) + self.label = kwargs.pop('label', self.workload_name) + self.number_of_iterations = kwargs.pop('iterations', None) + self.boot_parameters = get_aliased_param(kwargs, + ['boot_parameters', 'boot_params'], + default=OrderedDict()) + self.runtime_parameters = get_aliased_param(kwargs, + ['runtime_parameters', 'runtime_params'], + default=OrderedDict()) + self.workload_parameters = get_aliased_param(kwargs, + ['workload_parameters', 'workload_params', 'params'], + default=OrderedDict()) + self.instrumentation = kwargs.pop('instrumentation', []) + self.flash = kwargs.pop('flash', OrderedDict()) + if kwargs: + raise ConfigError('Invalid entry(ies) in workload {}: {}'.format(self.id, ', '.join(kwargs.keys()))) + + +class AgendaSectionEntry(AgendaEntry): + """ + Specifies execution of a workload, including things like the number of + iterations, device runtime_parameters configuration, etc. + + """ + + def __init__(self, agenda, **kwargs): + super(AgendaSectionEntry, self).__init__() + self.id = kwargs.pop('id') + self.number_of_iterations = kwargs.pop('iterations', None) + self.boot_parameters = get_aliased_param(kwargs, + ['boot_parameters', 'boot_params'], + default=OrderedDict()) + self.runtime_parameters = get_aliased_param(kwargs, + ['runtime_parameters', 'runtime_params', 'params'], + default=OrderedDict()) + self.workload_parameters = get_aliased_param(kwargs, + ['workload_parameters', 'workload_params'], + default=OrderedDict()) + self.instrumentation = kwargs.pop('instrumentation', []) + self.flash = kwargs.pop('flash', OrderedDict()) + self.workloads = [] + for w in kwargs.pop('workloads', []): + self.workloads.append(agenda.get_workload_entry(w)) + if kwargs: + raise ConfigError('Invalid entry(ies) in section {}: {}'.format(self.id, ', '.join(kwargs.keys()))) + + def to_dict(self): + d = copy(self.__dict__) + d['workloads'] = [w.to_dict() for w in self.workloads] + return d + + +class AgendaGlobalEntry(AgendaEntry): + """ + Workload configuration global to all workloads. + + """ + + def __init__(self, **kwargs): + super(AgendaGlobalEntry, self).__init__() + self.number_of_iterations = kwargs.pop('iterations', None) + self.boot_parameters = get_aliased_param(kwargs, + ['boot_parameters', 'boot_params'], + default=OrderedDict()) + self.runtime_parameters = get_aliased_param(kwargs, + ['runtime_parameters', 'runtime_params', 'params'], + default=OrderedDict()) + self.workload_parameters = get_aliased_param(kwargs, + ['workload_parameters', 'workload_params'], + default=OrderedDict()) + self.instrumentation = kwargs.pop('instrumentation', []) + self.flash = kwargs.pop('flash', OrderedDict()) + if kwargs: + raise ConfigError('Invalid entries in global section: {}'.format(kwargs)) + + +class Agenda(object): + + def __init__(self, source=None): + self.filepath = None + self.config = None + self.global_ = None + self.sections = [] + self.workloads = [] + self._seen_ids = defaultdict(set) + if source: + try: + reset_counter('section') + reset_counter('workload') + self._load(source) + except (ConfigError, LoadSyntaxError, SyntaxError), e: + raise ConfigError(str(e)) + + def add_workload_entry(self, w): + entry = self.get_workload_entry(w) + self.workloads.append(entry) + + def get_workload_entry(self, w): + if isinstance(w, basestring): + w = {'name': w} + if not isinstance(w, dict): + raise ConfigError('Invalid workload entry: "{}" in {}'.format(w, self.filepath)) + self._assign_id_if_needed(w, 'workload') + return AgendaWorkloadEntry(**w) + + def _load(self, source): + raw = self._load_raw_from_source(source) + if not isinstance(raw, dict): + message = '{} does not contain a valid agenda structure; top level must be a dict.' + raise ConfigError(message.format(self.filepath)) + for k, v in raw.iteritems(): + if k == 'config': + self.config = v + elif k == 'global': + self.global_ = AgendaGlobalEntry(**v) + elif k == 'sections': + self._collect_existing_ids(v, 'section') + for s in v: + if not isinstance(s, dict): + raise ConfigError('Invalid section entry: "{}" in {}'.format(s, self.filepath)) + self._collect_existing_ids(s.get('workloads', []), 'workload') + for s in v: + self._assign_id_if_needed(s, 'section') + self.sections.append(AgendaSectionEntry(self, **s)) + elif k == 'workloads': + self._collect_existing_ids(v, 'workload') + for w in v: + self.workloads.append(self.get_workload_entry(w)) + else: + raise ConfigError('Unexpected agenda entry "{}" in {}'.format(k, self.filepath)) + + def _load_raw_from_source(self, source): + if hasattr(source, 'read') and hasattr(source, 'name'): # file-like object + self.filepath = source.name + raw = load_struct_from_yaml(text=source.read()) + elif isinstance(source, basestring): + if os.path.isfile(source): + self.filepath = source + raw = load_struct_from_yaml(filepath=self.filepath) + else: # assume YAML text + raw = load_struct_from_yaml(text=source) + else: + raise ConfigError('Unknown agenda source: {}'.format(source)) + return raw + + def _collect_existing_ids(self, ds, pool): + # Collection needs to take place first so that auto IDs can be + # correctly assigned, e.g. if someone explicitly specified an ID + # of '1' for one of the workloads. + for d in ds: + if isinstance(d, dict) and 'id' in d: + did = str(d['id']) + if did in self._seen_ids[pool]: + raise ConfigError('Duplicate {} ID: {}'.format(pool, did)) + self._seen_ids[pool].add(did) + + def _assign_id_if_needed(self, d, pool): + # Also enforces string IDs + if d.get('id') is None: + did = str(counter(pool)) + while did in self._seen_ids[pool]: + did = str(counter(pool)) + d['id'] = did + self._seen_ids[pool].add(did) + else: + d['id'] = str(d['id']) + + +# Modifying the yaml parser to use an OrderedDict, rather then regular Python +# dict for mappings. This preservers the order in which the items are +# specified. See +# http://stackoverflow.com/a/21048064 + +_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG + + +def dict_representer(dumper, data): + return dumper.represent_mapping(_mapping_tag, data.iteritems()) + + +def dict_constructor(loader, node): + return OrderedDict(loader.construct_pairs(node)) + + +yaml.add_representer(OrderedDict, dict_representer) +yaml.add_constructor(_mapping_tag, dict_constructor) diff --git a/wlauto/core/bootstrap.py b/wlauto/core/bootstrap.py new file mode 100644 index 00000000..cfca78bf --- /dev/null +++ b/wlauto/core/bootstrap.py @@ -0,0 +1,195 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import os +import shutil +import imp +import sys +import re +from collections import namedtuple, OrderedDict + +from wlauto.exceptions import ConfigError +from wlauto.utils.misc import merge_dicts, normalize, unique +from wlauto.utils.types import identifier + + +_this_dir = os.path.dirname(__file__) +_user_home = os.path.expanduser('~') + +# loading our external packages over those from the environment +sys.path.insert(0, os.path.join(_this_dir, '..', 'external')) + + +# Defines extension points for the WA framework. This table is used by the +# ExtensionLoader (among other places) to identify extensions it should look +# for. +# Parameters that need to be specified in a tuple for each extension type: +# name: The name of the extension type. This will be used to resolve get_ +# and list_methods in the extension loader. +# class: The base class for the extension type. Extension loader will check +# whether classes it discovers are subclassed from this. +# default package: This is the package that will be searched for extensions +# of that type by default (if not other packages are +# specified when creating the extension loader). This +# package *must* exist. +# default path: This is the subdirectory under the environment_root which +# will be searched for extensions of this type by default (if +# no other paths are specified when creating the extension +# loader). This directory will be automatically created if it +# does not exist. + +#pylint: disable=C0326 +_EXTENSION_TYPE_TABLE = [ + # name, class, default package, default path + ('command', 'wlauto.core.command.Command', 'wlauto.commands', 'commands'), + ('device', 'wlauto.core.device.Device', 'wlauto.devices', 'devices'), + ('instrument', 'wlauto.core.instrumentation.Instrument', 'wlauto.instrumentation', 'instruments'), + ('module', 'wlauto.core.extension.Module', 'wlauto.modules', 'modules'), + ('resource_getter', 'wlauto.core.resource.ResourceGetter', 'wlauto.resource_getters', 'resource_getters'), + ('result_processor', 'wlauto.core.result.ResultProcessor', 'wlauto.result_processors', 'result_processors'), + ('workload', 'wlauto.core.workload.Workload', 'wlauto.workloads', 'workloads'), +] +_Extension = namedtuple('_Extension', 'name, cls, default_package, default_path') +_extensions = [_Extension._make(ext) for ext in _EXTENSION_TYPE_TABLE] # pylint: disable=W0212 + + +class ConfigLoader(object): + """ + This class is responsible for loading and validating config files. + + """ + + def __init__(self): + self._loaded = False + self._config = {} + self.config_count = 0 + self._loaded_files = [] + self.environment_root = None + self.output_directory = 'wa_output' + self.reboot_after_each_iteration = True + self.dependencies_directory = None + self.agenda = None + self.extension_packages = [] + self.extension_paths = [] + self.extensions = [] + self.verbosity = 0 + self.debug = False + self.package_directory = os.path.dirname(_this_dir) + self.commands = {} + + @property + def meta_directory(self): + return os.path.join(self.output_directory, '__meta') + + @property + def log_file(self): + return os.path.join(self.output_directory, 'run.log') + + def update(self, source): + if isinstance(source, dict): + self.update_from_dict(source) + else: + self.config_count += 1 + self.update_from_file(source) + + def update_from_file(self, source): + try: + new_config = imp.load_source('config_{}'.format(self.config_count), source) + except SyntaxError, e: + message = 'Sytax error in config: {}'.format(str(e)) + raise ConfigError(message) + self._config = merge_dicts(self._config, vars(new_config), + list_duplicates='first', match_types=False, dict_type=OrderedDict) + self._loaded_files.append(source) + self._loaded = True + + def update_from_dict(self, source): + normalized_source = dict((identifier(k), v) for k, v in source.iteritems()) + self._config = merge_dicts(self._config, normalized_source, list_duplicates='first', + match_types=False, dict_type=OrderedDict) + self._loaded = True + + def get_config_paths(self): + return [lf.rstrip('c') for lf in self._loaded_files] + + def _check_loaded(self): + if not self._loaded: + raise ConfigError('Config file not loaded.') + + def __getattr__(self, name): + self._check_loaded() + return self._config.get(normalize(name)) + + +def init_environment(env_root, dep_dir, extension_paths, overwrite_existing=False): # pylint: disable=R0914 + """Initialise a fresh user environment creating the workload automation""" + if os.path.exists(env_root): + if not overwrite_existing: + raise ConfigError('Environment {} already exists.'.format(env_root)) + shutil.rmtree(env_root) + + os.makedirs(env_root) + with open(os.path.join(_this_dir, '..', 'config_example.py')) as rf: + text = re.sub(r'""".*?"""', '', rf.read(), 1, re.DOTALL) + with open(os.path.join(_env_root, 'config.py'), 'w') as wf: + wf.write(text) + + os.makedirs(dep_dir) + for path in extension_paths: + os.makedirs(path) + + # If running with sudo on POSIX, change the ownership to the real user. + real_user = os.getenv('SUDO_USER') + if real_user: + import pwd # done here as module won't import on win32 + user_entry = pwd.getpwnam(real_user) + uid, gid = user_entry.pw_uid, user_entry.pw_gid + os.chown(env_root, uid, gid) + # why, oh why isn't there a recusive=True option for os.chown? + for root, dirs, files in os.walk(env_root): + for d in dirs: + os.chown(os.path.join(root, d), uid, gid) + for f in files: # pylint: disable=W0621 + os.chown(os.path.join(root, f), uid, gid) + + +_env_root = os.getenv('WA_USER_DIRECTORY', os.path.join(_user_home, '.workload_automation')) +_dep_dir = os.path.join(_env_root, 'dependencies') +_extension_paths = [os.path.join(_env_root, ext.default_path) for ext in _extensions] +_extension_paths.extend(os.getenv('WA_EXTENSION_PATHS', '').split(os.pathsep)) + +if not os.path.isdir(_env_root): + init_environment(_env_root, _dep_dir, _extension_paths) +elif not os.path.isfile(os.path.join(_env_root, 'config.py')): + with open(os.path.join(_this_dir, '..', 'config_example.py')) as f: + f_text = re.sub(r'""".*?"""', '', f.read(), 1, re.DOTALL) + with open(os.path.join(_env_root, 'config.py'), 'w') as f: + f.write(f_text) + +settings = ConfigLoader() +settings.environment_root = _env_root +settings.dependencies_directory = _dep_dir +settings.extension_paths = _extension_paths +settings.extensions = _extensions + +_packages_file = os.path.join(_env_root, 'packages') +if os.path.isfile(_packages_file): + with open(_packages_file) as fh: + settings.extension_packages = unique(fh.read().split()) + +_env_config = os.path.join(settings.environment_root, 'config.py') +settings.update(_env_config) + diff --git a/wlauto/core/command.py b/wlauto/core/command.py new file mode 100644 index 00000000..5822145a --- /dev/null +++ b/wlauto/core/command.py @@ -0,0 +1,67 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import textwrap + +from wlauto.core.extension import Extension +from wlauto.core.entry_point import init_argument_parser +from wlauto.utils.doc import format_body + + +class Command(Extension): + """ + Defines a Workload Automation command. This will be executed from the command line as + ``wa [args ...]``. This defines the name to be used when invoking wa, the + code that will actually be executed on invocation and the argument parser to be used + to parse the reset of the command line arguments. + + """ + + help = None + usage = None + description = None + epilog = None + formatter_class = None + + def __init__(self, subparsers): + super(Command, self).__init__() + self.group = subparsers + parser_params = dict(help=(self.help or self.description), usage=self.usage, + description=format_body(textwrap.dedent(self.description), 80), + epilog=self.epilog) + if self.formatter_class: + parser_params['formatter_class'] = self.formatter_class + self.parser = subparsers.add_parser(self.name, **parser_params) + init_argument_parser(self.parser) # propagate top-level options + self.initialize() + + def initialize(self): + """ + Perform command-specific initialisation (e.g. adding command-specific options to the command's + parser). + + """ + pass + + def execute(self, args): + """ + Execute this command. + + :args: An ``argparse.Namespace`` containing command line arguments (as returned by + ``argparse.ArgumentParser.parse_args()``. This would usually be the result of + invoking ``self.parser``. + + """ + raise NotImplementedError() diff --git a/wlauto/core/configuration.py b/wlauto/core/configuration.py new file mode 100644 index 00000000..432e55ae --- /dev/null +++ b/wlauto/core/configuration.py @@ -0,0 +1,756 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import os +import json +from copy import copy +from collections import OrderedDict + +from wlauto.exceptions import ConfigError +from wlauto.utils.misc import merge_dicts, merge_lists, load_struct_from_file +from wlauto.utils.types import regex_type, identifier + + +class SharedConfiguration(object): + + def __init__(self): + self.number_of_iterations = None + self.workload_name = None + self.label = None + self.boot_parameters = OrderedDict() + self.runtime_parameters = OrderedDict() + self.workload_parameters = OrderedDict() + self.instrumentation = [] + + +class ConfigurationJSONEncoder(json.JSONEncoder): + + def default(self, obj): # pylint: disable=E0202 + if isinstance(obj, WorkloadRunSpec): + return obj.to_dict() + elif isinstance(obj, RunConfiguration): + return obj.to_dict() + elif isinstance(obj, RebootPolicy): + return obj.policy + elif isinstance(obj, regex_type): + return obj.pattern + else: + return json.JSONEncoder.default(self, obj) + + +class WorkloadRunSpec(object): + """ + Specifies execution of a workload, including things like the number of + iterations, device runtime_parameters configuration, etc. + + """ + + # These should be handled by the framework if not explicitly specified + # so it's a programming error if they're not + framework_mandatory_parameters = ['id', 'number_of_iterations'] + + # These *must* be specified by the user (through one mechanism or another) + # and it is a configuration error if they're not. + mandatory_parameters = ['workload_name'] + + def __init__(self, + id=None, # pylint: disable=W0622 + number_of_iterations=None, + workload_name=None, + boot_parameters=None, + label=None, + section_id=None, + workload_parameters=None, + runtime_parameters=None, + instrumentation=None, + flash=None, + ): # pylint: disable=W0622 + self.id = id + self.number_of_iterations = number_of_iterations + self.workload_name = workload_name + self.label = label or self.workload_name + self.section_id = section_id + self.boot_parameters = boot_parameters or OrderedDict() + self.runtime_parameters = runtime_parameters or OrderedDict() + self.workload_parameters = workload_parameters or OrderedDict() + self.instrumentation = instrumentation or [] + self.flash = flash or OrderedDict() + self._workload = None + self._section = None + self.enabled = True + + def set(self, param, value): + if param in ['id', 'section_id', 'number_of_iterations', 'workload_name', 'label']: + if value is not None: + setattr(self, param, value) + elif param in ['boot_parameters', 'runtime_parameters', 'workload_parameters', 'flash']: + setattr(self, param, merge_dicts(getattr(self, param), value, list_duplicates='last', + dict_type=OrderedDict, should_normalize=False)) + elif param in ['instrumentation']: + setattr(self, param, merge_lists(getattr(self, param), value, duplicates='last')) + else: + raise ValueError('Unexpected workload spec parameter: {}'.format(param)) + + def validate(self): + for param_name in self.framework_mandatory_parameters: + param = getattr(self, param_name) + if param is None: + msg = '{} not set for workload spec.' + raise RuntimeError(msg.format(param_name)) + for param_name in self.mandatory_parameters: + param = getattr(self, param_name) + if param is None: + msg = '{} not set for workload spec for workload {}' + raise ConfigError(msg.format(param_name, self.id)) + + def match_selectors(self, selectors): + """ + Returns ``True`` if this spec matches the specified selectors, and + ``False`` otherwise. ``selectors`` must be a dict-like object with + attribute names mapping onto selector values. At the moment, only equality + selection is supported; i.e. the value of the attribute of the spec must + match exactly the corresponding value specified in the ``selectors`` dict. + + """ + if not selectors: + return True + for k, v in selectors.iteritems(): + if getattr(self, k, None) != v: + return False + return True + + @property + def workload(self): + if self._workload is None: + raise RuntimeError("Workload for {} has not been loaded".format(self)) + return self._workload + + @property + def secition(self): + if self.section_id and self._section is None: + raise RuntimeError("Section for {} has not been loaded".format(self)) + return self._section + + def load(self, device, ext_loader): + """Loads the workload for the specified device using the specified loader. + This must be done before attempting to execute the spec.""" + self._workload = ext_loader.get_workload(self.workload_name, device, **self.workload_parameters) + + def to_dict(self): + d = copy(self.__dict__) + del d['_workload'] + del d['_section'] + return d + + def __str__(self): + return '{} {}'.format(self.id, self.label) + + def __cmp__(self, other): + if not isinstance(other, WorkloadRunSpec): + return cmp('WorkloadRunSpec', other.__class__.__name__) + return cmp(self.id, other.id) + + +class _SpecConfig(object): + # TODO: This is a bit of HACK for alias resolution. This formats Alias + # params as if they came from config. + + def __init__(self, name, params=None): + setattr(self, name, params or {}) + + +class RebootPolicy(object): + """ + Represents the reboot policy for the execution -- at what points the device + should be rebooted. This, in turn, is controlled by the policy value that is + passed in on construction and would typically be read from the user's settings. + Valid policy values are: + + :never: The device will never be rebooted. + :as_needed: Only reboot the device if it becomes unresponsive, or needs to be flashed, etc. + :initial: The device will be rebooted when the execution first starts, just before + executing the first workload spec. + :each_spec: The device will be rebooted before running a new workload spec. + :each_iteration: The device will be rebooted before each new iteration. + + """ + + valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_iteration'] + + def __init__(self, policy): + policy = policy.strip().lower().replace(' ', '_') + if policy not in self.valid_policies: + message = 'Invalid reboot policy {}; must be one of {}'.format(policy, ', '.join(self.valid_policies)) + raise ConfigError(message) + self.policy = policy + + @property + def can_reboot(self): + return self.policy != 'never' + + @property + def perform_initial_boot(self): + return self.policy not in ['never', 'as_needed'] + + @property + def reboot_on_each_spec(self): + return self.policy in ['each_spec', 'each_iteration'] + + @property + def reboot_on_each_iteration(self): + return self.policy == 'each_iteration' + + def __str__(self): + return self.policy + + __repr__ = __str__ + + def __cmp__(self, other): + if isinstance(other, RebootPolicy): + return cmp(self.policy, other.policy) + else: + return cmp(self.policy, other) + + +class RunConfigurationItem(object): + """ + This represents a predetermined "configuration point" (an individual setting) + and describes how it must be handled when encountered. + + """ + + # Also defines the NULL value for each category + valid_categories = { + 'scalar': None, + 'list': [], + 'dict': {}, + } + + # A callable that takes an arbitrary number of positional arguments + # is also valid. + valid_methods = ['keep', 'replace', 'merge'] + + def __init__(self, name, category, method): + if category not in self.valid_categories: + raise ValueError('Invalid category: {}'.format(category)) + if not callable(method) and method not in self.valid_methods: + raise ValueError('Invalid method: {}'.format(method)) + if category == 'scalar' and method == 'merge': + raise ValueError('Method cannot be "merge" for a scalar') + self.name = name + self.category = category + self.method = method + + def combine(self, *args): + """ + Combine the provided values according to the method for this + configuration item. Order matters -- values are assumed to be + in the order they were specified by the user. The resulting value + is also checked to patch the specified type. + + """ + args = [a for a in args if a is not None] + if not args: + return self.valid_categories[self.category] + if self.method == 'keep' or len(args) == 1: + value = args[0] + elif self.method == 'replace': + value = args[-1] + elif self.method == 'merge': + if self.category == 'list': + value = merge_lists(*args, duplicates='last', dict_type=OrderedDict) + elif self.category == 'dict': + value = merge_dicts(*args, + should_merge_lists=True, + should_normalize=False, + list_duplicates='last', + dict_type=OrderedDict) + else: + raise ValueError('Unexpected category for merge : "{}"'.format(self.category)) + elif callable(self.method): + value = self.method(*args) + else: + raise ValueError('Unexpected method: "{}"'.format(self.method)) + + return value + + +def _combine_ids(*args): + return '_'.join(args) + + +class RunConfiguration(object): + """ + Loads and maintains the unified configuration for this run. This includes configuration + for WA execution as a whole, and parameters for specific specs. + + WA configuration mechanism aims to be flexible and easy to use, while at the same + time providing storing validation and early failure on error. To meet these requirements, + the implementation gets rather complicated. This is going to be a quick overview of + the underlying mechanics. + + .. note:: You don't need to know this to use WA, or to write extensions for it. From + the point of view of extension writers, configuration from various sources + "magically" appears as attributes of their classes. This explanation peels + back the curtain and is intended for those who, for one reason or another, + need to understand how the magic works. + + **terminology** + + run + + A single execution of a WA agenda. + + run config(uration) (object) + + An instance of this class. There is one per run. + + config(uration) item + + A single configuration entry or "setting", e.g. the device interface to use. These + can be for the run as a whole, or for a specific extension. + + (workload) spec + + A specification of a single workload execution. This combines workload configuration + with things like the number of iterations to run, which instruments to enable, etc. + More concretely, this is an instance of :class:`WorkloadRunSpec`. + + **overview** + + There are three types of WA configuration: + + 1. "Meta" configuration that determines how the rest of the configuration is + processed (e.g. where extensions get loaded from). Since this does not pertain + to *run* configuration, it will not be covered further. + 2. Global run configuration, e.g. which workloads, result processors and instruments + will be enabled for a run. + 3. Per-workload specification configuration, that determines how a particular workload + instance will get executed (e.g. what workload parameters will be used, how many + iterations. + + **run configuration** + + Run configuration may appear in a config file (usually ``~/.workload_automation/config.py``), + or in the ``config`` section of an agenda. Configuration is specified as a nested structure + of dictionaries (associative arrays, or maps) and lists in the syntax following the format + implied by the file extension (currently, YAML and Python are supported). If the same + configuration item appears in more than one source, they are merged with conflicting entries + taking the value from the last source that specified them. + + In addition to a fixed set of global configuration items, configuration for any WA + Extension (instrument, result processor, etc) may also be specified, namespaced under + the extension's name (i.e. the extensions name is a key in the global config with value + being a dict of parameters and their values). Some Extension parameters also specify a + "global alias" that may appear at the top-level of the config rather than under the + Extension's name. It is *not* an error to specify configuration for an Extension that has + not been enabled for a particular run; such configuration will be ignored. + + + **per-workload configuration** + + Per-workload configuration can be specified in three places in the agenda: the + workload entry in the ``workloads`` list, the ``global`` entry (configuration there + will be applied to every workload entry), and in a section entry in ``sections`` list + ( configuration in every section will be applied to every workload entry separately, + creating a "cross-product" of section and workload configurations; additionally, + sections may specify their own workload lists). + + If they same configuration item appears in more than one of the above places, they will + be merged in the following order: ``global``, ``section``, ``workload``, with conflicting + scalar values in the later overriding those from previous locations. + + + **Global parameter aliases** + + As mentioned above, an Extension's parameter may define a global alias, which will be + specified and picked up from the top-level config, rather than config for that specific + extension. It is an error to specify the value for a parameter both through a global + alias and through extension config dict in the same configuration file. It is, however, + possible to use a global alias in one file, and specify extension configuration for the + same parameter in another file, in which case, the usual merging rules would apply. + + **Loading and validation of configuration** + + Validation of user-specified configuration happens at several stages of run initialisation, + to ensure that appropriate context for that particular type of validation is available and + that meaningful errors can be reported, as early as is feasible. + + - Syntactic validation is performed when configuration is first loaded. + This is done by the loading mechanism (e.g. YAML parser), rather than WA itself. WA + propagates any errors encountered as ``ConfigError``\ s. + - Once a config file is loaded into a Python structure, it scanned to + extract settings. Static configuration is validated and added to the config. Extension + configuration is collected into a collection of "raw" config, and merged as appropriate, but + is not processed further at this stage. + - Once all configuration sources have been processed, the configuration as a whole + is validated (to make sure there are no missing settings, etc). + - Extensions are loaded through the run config object, which instantiates + them with appropriate parameters based on the "raw" config collected earlier. When an + Extension is instantiated in such a way, it's config is "officially" added to run configuration + tracked by the run config object. Raw config is discarded at the end of the run, so + that any config that wasn't loaded in this way is not recoded (as it was not actually used). + - Extension parameters a validated individually (for type, value ranges, etc) as they are + loaded in the Extension's __init__. + - An extension's ``validate()`` method is invoked before it is used (exactly when this + happens depends on the extension's type) to perform any final validation *that does not + rely on the target being present* (i.e. this would happen before WA connects to the target). + This can be used perform inter-parameter validation for an extension (e.g. when valid range for + one parameter depends on another), and more general WA state assumptions (e.g. a result + processor can check that an instrument it depends on has been installed). + - Finally, it is the responsibility of individual extensions to validate any assumptions + they make about the target device (usually as part of their ``setup()``). + + **Handling of Extension aliases.** + + WA extensions can have zero or more aliases (not to be confused with global aliases for extension + *parameters*). An extension allows associating an alternative name for the extension with a set + of parameter values. In other words aliases associate common configurations for an extension with + a name, providing a shorthand for it. For example, "t-rex_offscreen" is an alias for "glbenchmark" + workload that specifies that "use_case" should be "t-rex" and "variant" should be "offscreen". + + **special loading rules** + + Note that as a consequence of being able to specify configuration for *any* Extension namespaced + under the Extension's name in the top-level config, two distinct mechanisms exist form configuring + devices and workloads. This is valid, however due to their nature, they are handled in a special way. + This may be counter intuitive, so configuration of devices and workloads creating entries for their + names in the config is discouraged in favour of using the "normal" mechanisms of configuring them + (``device_config`` for devices and workload specs in the agenda for workloads). + + In both cases (devices and workloads), "normal" config will always override named extension config + *irrespective of which file it was specified in*. So a ``adb_name`` name specified in ``device_config`` + inside ``~/.workload_automation/config.py`` will override ``adb_name`` specified for ``juno`` in the + agenda (even when device is set to "juno"). + + Again, this ignores normal loading rules, so the use of named extension configuration for devices + and workloads is discouraged. There maybe some situations where this behaviour is useful however + (e.g. maintaining configuration for different devices in one config file). + + """ + + default_reboot_policy = 'as_needed' + default_execution_order = 'by_iteration' + + # This is generic top-level configuration. + general_config = [ + RunConfigurationItem('run_name', 'scalar', 'replace'), + RunConfigurationItem('project', 'scalar', 'replace'), + RunConfigurationItem('project_stage', 'dict', 'replace'), + RunConfigurationItem('execution_order', 'scalar', 'replace'), + RunConfigurationItem('reboot_policy', 'scalar', 'replace'), + RunConfigurationItem('device', 'scalar', 'replace'), + RunConfigurationItem('flashing_config', 'dict', 'replace'), + ] + + # Configuration specified for each workload spec. "workload_parameters" + # aren't listed because they are handled separately. + workload_config = [ + RunConfigurationItem('id', 'scalar', _combine_ids), + RunConfigurationItem('number_of_iterations', 'scalar', 'replace'), + RunConfigurationItem('workload_name', 'scalar', 'replace'), + RunConfigurationItem('label', 'scalar', 'replace'), + RunConfigurationItem('section_id', 'scalar', 'replace'), + RunConfigurationItem('boot_parameters', 'dict', 'merge'), + RunConfigurationItem('runtime_parameters', 'dict', 'merge'), + RunConfigurationItem('instrumentation', 'list', 'merge'), + RunConfigurationItem('flash', 'dict', 'merge'), + ] + + # List of names that may be present in configuration (and it is valid for + # them to be there) but are not handled buy RunConfiguration. + ignore_names = ['logging'] + + def get_reboot_policy(self): + if not self._reboot_policy: + self._reboot_policy = RebootPolicy(self.default_reboot_policy) + return self._reboot_policy + + def set_reboot_policy(self, value): + if isinstance(value, RebootPolicy): + self._reboot_policy = value + else: + self._reboot_policy = RebootPolicy(value) + + reboot_policy = property(get_reboot_policy, set_reboot_policy) + + @property + def all_instrumentation(self): + result = set() + for spec in self.workload_specs: + result = result.union(set(spec.instrumentation)) + return result + + def __init__(self, ext_loader): + self.ext_loader = ext_loader + self.device = None + self.device_config = None + self.execution_order = None + self.project = None + self.project_stage = None + self.run_name = None + self.instrumentation = {} + self.result_processors = {} + self.workload_specs = [] + self.flashing_config = {} + self.other_config = {} # keeps track of used config for extensions other than of the four main kinds. + self._used_config_items = [] + self._global_instrumentation = [] + self._reboot_policy = None + self._agenda = None + self._finalized = False + self._general_config_map = {i.name: i for i in self.general_config} + self._workload_config_map = {i.name: i for i in self.workload_config} + # Config files may contains static configuration for extensions that + # would not be part of this of this run (e.g. DB connection settings + # for a result processor that has not been enabled). Such settings + # should not be part of configuration for this run (as they will not + # be affecting it), but we still need to keep track it in case a later + # config (e.g. from the agenda) enables the extension. + # For this reason, all extension config is first loaded into the + # following dict and when an extension is identified as need for the + # run, its config is picked up from this "raw" dict and it becomes part + # of the run configuration. + self._raw_config = {'instrumentation': [], 'result_processors': []} + + def get_extension(self, ext_name, *args): + self._check_finalized() + self._load_default_config_if_necessary(ext_name) + ext_config = self._raw_config[ext_name] + ext_cls = self.ext_loader.get_extension_class(ext_name) + if ext_cls.kind not in ['workload', 'device', 'instrument', 'result_processor']: + self.other_config[ext_name] = ext_config + return self.ext_loader.get_extension(ext_name, *args, **ext_config) + + def to_dict(self): + d = copy(self.__dict__) + to_remove = ['ext_loader', 'workload_specs'] + [k for k in d.keys() if k.startswith('_')] + for attr in to_remove: + del d[attr] + d['workload_specs'] = [s.to_dict() for s in self.workload_specs] + d['reboot_policy'] = self.reboot_policy # this is a property so not in __dict__ + return d + + def load_config(self, source): + """Load configuration from the specified source. The source must be + either a path to a valid config file or a dict-like object. Currently, + config files can be either python modules (.py extension) or YAML documents + (.yaml extension).""" + if self._finalized: + raise ValueError('Attempting to load a config file after run configuration has been finalized.') + try: + config_struct = _load_raw_struct(source) + self._merge_config(config_struct) + except ConfigError as e: + message = 'Error in {}:\n\t{}' + raise ConfigError(message.format(getattr(source, 'name', None), e.message)) + + def set_agenda(self, agenda, selectors=None): + """Set the agenda for this run; Unlike with config files, there can only be one agenda.""" + if self._agenda: + # note: this also guards against loading an agenda after finalized() has been called, + # as that would have required an agenda to be set. + message = 'Attempting to set a second agenda {};\n\talready have agenda {} set' + raise ValueError(message.format(agenda.filepath, self._agenda.filepath)) + try: + self._merge_config(agenda.config or {}) + self._load_specs_from_agenda(agenda, selectors) + self._agenda = agenda + except ConfigError as e: + message = 'Error in {}:\n\t{}' + raise ConfigError(message.format(agenda.filepath, e.message)) + + def finalize(self): + """This must be invoked once all configuration sources have been loaded. This will + do the final processing, setting instrumentation and result processor configuration + for the run And making sure that all the mandatory config has been specified.""" + if self._finalized: + return + if not self._agenda: + raise ValueError('Attempting to finalize run configuration before an agenda is loaded.') + self._finalize_config_list('instrumentation') + self._finalize_config_list('result_processors') + if not self.device: + raise ConfigError('Device not specified in the config.') + self._finalize_device_config() + if not self.reboot_policy.reboot_on_each_spec: + for spec in self.workload_specs: + if spec.boot_parameters: + message = 'spec {} specifies boot_parameters; reboot policy must be at least "each_spec"' + raise ConfigError(message.format(spec.id)) + for spec in self.workload_specs: + for globinst in self._global_instrumentation: + if globinst not in spec.instrumentation: + spec.instrumentation.append(globinst) + spec.validate() + self._finalized = True + + def serialize(self, wfh): + json.dump(self, wfh, cls=ConfigurationJSONEncoder, indent=4) + + def _merge_config(self, config): + """ + Merge the settings specified by the ``config`` dict-like object into current + configuration. + + """ + if not isinstance(config, dict): + raise ValueError('config must be a dict; found {}'.format(config.__class__.__name__)) + + for k, v in config.iteritems(): + k = identifier(k) + if k in self.ext_loader.global_param_aliases: + self._resolve_global_alias(k, v) + elif k in self._general_config_map: + self._set_run_config_item(k, v) + elif self.ext_loader.has_extension(k): + self._set_extension_config(k, v) + elif k == 'device_config': + self._set_raw_dict(k, v) + elif k in ['instrumentation', 'result_processors']: + # Instrumentation can be enabled and disabled by individual + # workloads, so we need to track it in two places: a list of + # all instruments for the run (as they will all need to be + # initialized and installed, and a list of only the "global" + # instruments which can then be merged into instrumentation + # lists of individual workload specs. + self._set_raw_list('_global_{}'.format(k), v) + self._set_raw_list(k, v) + elif k in self.ignore_names: + pass + else: + raise ConfigError('Unknown configuration option: {}'.format(k)) + + def _resolve_global_alias(self, name, value): + ga = self.ext_loader.global_param_aliases[name] + for param, ext in ga.iteritems(): + for name in [ext.name] + [a.name for a in ext.aliases]: + self._load_default_config_if_necessary(name) + self._raw_config[name][param.name] = value + + def _set_run_config_item(self, name, value): + item = self._general_config_map[name] + combined_value = item.combine(getattr(self, name, None), value) + setattr(self, name, combined_value) + + def _set_extension_config(self, name, value): + default_config = self.ext_loader.get_default_config(name) + self._set_raw_dict(name, value, default_config) + + def _set_raw_dict(self, name, value, default_config=None): + existing_config = self._raw_config.get(name, default_config or {}) + new_config = _merge_config_dicts(existing_config, value) + self._raw_config[name] = new_config + + def _set_raw_list(self, name, value): + old_value = self._raw_config.get(name, []) + new_value = merge_lists(old_value, value, duplicates='last') + self._raw_config[name] = new_value + + def _finalize_config_list(self, attr_name): + """Note: the name is somewhat misleading. This finalizes a list + form the specified configuration (e.g. "instrumentation"); internal + representation is actually a dict, not a list...""" + ext_config = {} + raw_list = self._raw_config.get(attr_name, []) + for extname in raw_list: + default_config = self.ext_loader.get_default_config(extname) + ext_config[extname] = self._raw_config.get(extname, default_config) + list_name = '_global_{}'.format(attr_name) + setattr(self, list_name, raw_list) + setattr(self, attr_name, ext_config) + + def _finalize_device_config(self): + self._load_default_config_if_necessary(self.device) + config = _merge_config_dicts(self._raw_config.get(self.device), + self._raw_config.get('device_config', {})) + self.device_config = config + + def _load_default_config_if_necessary(self, name): + if name not in self._raw_config: + self._raw_config[name] = self.ext_loader.get_default_config(name) + + def _load_specs_from_agenda(self, agenda, selectors): + global_dict = agenda.global_.to_dict() if agenda.global_ else {} + if agenda.sections: + for section_entry in agenda.sections: + section_dict = section_entry.to_dict() + for workload_entry in agenda.workloads + section_entry.workloads: + workload_dict = workload_entry.to_dict() + self._load_workload_spec(global_dict, section_dict, workload_dict, selectors) + else: # no sections were specified + for workload_entry in agenda.workloads: + workload_dict = workload_entry.to_dict() + self._load_workload_spec(global_dict, {}, workload_dict, selectors) + + def _load_workload_spec(self, global_dict, section_dict, workload_dict, selectors): + spec = WorkloadRunSpec() + for name, config in self._workload_config_map.iteritems(): + value = config.combine(global_dict.get(name), section_dict.get(name), workload_dict.get(name)) + spec.set(name, value) + if section_dict: + spec.set('section_id', section_dict.get('id')) + + realname, alias_config = self.ext_loader.resolve_alias(spec.workload_name) + if not spec.label: + spec.label = spec.workload_name + spec.workload_name = realname + dicts = [self.ext_loader.get_default_config(realname), + alias_config, + self._raw_config.get(spec.workload_name), + global_dict.get('workload_parameters'), + section_dict.get('workload_parameters'), + workload_dict.get('workload_parameters')] + dicts = [d for d in dicts if d is not None] + value = _merge_config_dicts(*dicts) + spec.set('workload_parameters', value) + + if not spec.number_of_iterations: + spec.number_of_iterations = 1 + + if spec.match_selectors(selectors): + instrumentation_config = self._raw_config['instrumentation'] + for instname in spec.instrumentation: + if instname not in instrumentation_config: + instrumentation_config.append(instname) + self.workload_specs.append(spec) + + def _check_finalized(self): + if not self._finalized: + raise ValueError('Attempting to access configuration before it has been finalized.') + + +def _load_raw_struct(source): + """Load a raw dict config structure from the specified source.""" + if isinstance(source, basestring): + if os.path.isfile(source): + raw = load_struct_from_file(filepath=source) + else: + raise ConfigError('File "{}" does not exit'.format(source)) + elif isinstance(source, dict): + raw = source + else: + raise ConfigError('Unknown config source: {}'.format(source)) + return raw + + +def _merge_config_dicts(*args, **kwargs): + """Provides a different set of default settings for ```merge_dicts`` """ + return merge_dicts(*args, + should_merge_lists=kwargs.get('should_merge_lists', False), + should_normalize=kwargs.get('should_normalize', False), + list_duplicates=kwargs.get('list_duplicates', 'last'), + dict_type=kwargs.get('dict_type', OrderedDict)) diff --git a/wlauto/core/device.py b/wlauto/core/device.py new file mode 100644 index 00000000..bef51fce --- /dev/null +++ b/wlauto/core/device.py @@ -0,0 +1,418 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Base classes for device interfaces. + + :Device: The base class for all devices. This defines the interface that must be + implemented by all devices and therefore any workload and instrumentation + can always rely on. + :AndroidDevice: Implements most of the :class:`Device` interface, and extends it + with a number of Android-specific methods. + :BigLittleDevice: Subclasses :class:`AndroidDevice` to implement big.LITTLE-specific + runtime parameters. + :SimpleMulticoreDevice: Subclasses :class:`AndroidDevice` to implement homogeneous cores + device runtime parameters. + +""" + +import os +import imp +import string +from collections import OrderedDict +from contextlib import contextmanager + +from wlauto.core.extension import Extension, ExtensionMeta, AttributeCollection, Parameter +from wlauto.exceptions import DeviceError, ConfigError +from wlauto.utils.types import list_of_strings, list_of_integers + + +__all__ = ['RuntimeParameter', 'CoreParameter', 'Device', 'DeviceMeta'] + + +class RuntimeParameter(object): + """ + A runtime parameter which has its getter and setter methods associated it + with it. + + """ + + def __init__(self, name, getter, setter, + getter_args=None, setter_args=None, + value_name='value', override=False): + """ + :param name: the name of the parameter. + :param getter: the getter method which returns the value of this parameter. + :param setter: the setter method which sets the value of this parameter. The setter + always expects to be passed one argument when it is called. + :param getter_args: keyword arguments to be used when invoking the getter. + :param setter_args: keyword arguments to be used when invoking the setter. + :param override: A ``bool`` that specifies whether a parameter of the same name further up the + hierarchy should be overridden. If this is ``False`` (the default), an exception + will be raised by the ``AttributeCollection`` instead. + + """ + self.name = name + self.getter = getter + self.setter = setter + self.getter_args = getter_args or {} + self.setter_args = setter_args or {} + self.value_name = value_name + self.override = override + + def __str__(self): + return self.name + + __repr__ = __str__ + + +class CoreParameter(RuntimeParameter): + """A runtime parameter that will get expanded into a RuntimeParameter for each core type.""" + + def get_runtime_parameters(self, core_names): + params = [] + for core in set(core_names): + name = string.Template(self.name).substitute(core=core) + getter = string.Template(self.getter).substitute(core=core) + setter = string.Template(self.setter).substitute(core=core) + getargs = dict(self.getter_args.items() + [('core', core)]) + setargs = dict(self.setter_args.items() + [('core', core)]) + params.append(RuntimeParameter(name, getter, setter, getargs, setargs, self.value_name, self.override)) + return params + + +class DeviceMeta(ExtensionMeta): + + to_propagate = ExtensionMeta.to_propagate + [ + ('runtime_parameters', RuntimeParameter, AttributeCollection), + ] + + +class Device(Extension): + """ + Base class for all devices supported by Workload Automation. Defines + the interface the rest of WA uses to interact with devices. + + :name: Unique name used to identify the device. + :platform: The name of the device's platform (e.g. ``Android``) this may + be used by workloads and instrumentation to assess whether they + can run on the device. + :working_directory: a string of the directory which is + going to be used by the workloads on the device. + :binaries_directory: a string of the binary directory for + the device. + :has_gpu: Should be ``True`` if the device as a separate GPU, and + ``False`` if graphics processing is done on a CPU. + + .. note:: Pretty much all devices currently on the market + have GPUs, however this may not be the case for some + development boards. + + :path_module: The name of one of the modules implementing the os.path + interface, e.g. ``posixpath`` or ``ntpath``. You can provide + your own implementation rather than relying on one of the + standard library modules, in which case you need to specify + the *full* path to you module. e.g. '/home/joebloggs/mypathimp.py' + :parameters: A list of RuntimeParameter objects. The order of the objects + is very important as the setters and getters will be called + in the order the RuntimeParameter objects inserted. + :active_cores: This should be a list of all the currently active cpus in + the device in ``'/sys/devices/system/cpu/online'``. The + returned list should be read from the device at the time + of read request. + + """ + __metaclass__ = DeviceMeta + + parameters = [ + Parameter('core_names', kind=list_of_strings, mandatory=True, default=None, + description=""" + This is a list of all cpu cores on the device with each + element being the core type, e.g. ``['a7', 'a7', 'a15']``. The + order of the cores must match the order they are listed in + ``'/sys/devices/system/cpu'``. So in this case, ``'cpu0'`` must + be an A7 core, and ``'cpu2'`` an A15.' + """), + Parameter('core_clusters', kind=list_of_integers, mandatory=True, default=None, + description=""" + This is a list indicating the cluster affinity of the CPU cores, + each element correponding to the cluster ID of the core coresponding + to it's index. E.g. ``[0, 0, 1]`` indicates that cpu0 and cpu1 are on + cluster 0, while cpu2 is on cluster 1. + """), + ] + + runtime_parameters = [] + + # These must be overwritten by subclasses. + name = None + platform = None + default_working_directory = None + has_gpu = None + path_module = None + active_cores = None + + def __init__(self, **kwargs): # pylint: disable=W0613 + super(Device, self).__init__(**kwargs) + if not self.path_module: + raise NotImplementedError('path_module must be specified by the deriving classes.') + libpath = os.path.dirname(os.__file__) + modpath = os.path.join(libpath, self.path_module) + if not modpath.lower().endswith('.py'): + modpath += '.py' + try: + self.path = imp.load_source('device_path', modpath) + except IOError: + raise DeviceError('Unsupported path module: {}'.format(self.path_module)) + + def reset(self): + """ + Initiate rebooting of the device. + + Added in version 2.1.3. + + """ + raise NotImplementedError() + + def boot(self, *args, **kwargs): + """ + Perform the seteps necessary to boot the device to the point where it is ready + to accept other commands. + + Changed in version 2.1.3: no longer expected to wait until boot completes. + + """ + raise NotImplementedError() + + def connect(self, *args, **kwargs): + """ + Establish a connection to the device that will be used for subsequent commands. + + Added in version 2.1.3. + """ + raise NotImplementedError() + + def disconnect(self): + """ Close the established connection to the device. """ + raise NotImplementedError() + + def initialize(self, context, *args, **kwargs): + """ + Default implementation just calls through to init(). May be overriden by specialised + abstract sub-cleasses to implent platform-specific intialization without requiring + concrete implementations to explicitly invoke parent's init(). + + Added in version 2.1.3. + + """ + self.init(context, *args, **kwargs) + + def init(self, context, *args, **kwargs): + """ + Initialize the device. This method *must* be called after a device reboot before + any other commands can be issued, however it may also be called without rebooting. + + It is up to device-specific implementations to identify what initialisation needs + to be preformed on a particular invocation. Bear in mind that no assumptions can be + made about the state of the device prior to the initiation of workload execution, + so full initialisation must be performed at least once, even if no reboot has occurred. + After that, the device-specific implementation may choose to skip initialization if + the device has not been rebooted; it is up to the implementation to keep track of + that, however. + + All arguments are device-specific (see the documentation for the your device). + + """ + pass + + def ping(self): + """ + This must return successfully if the device is able to receive commands, or must + raise :class:`wlauto.exceptions.DeviceUnresponsiveError` if the device cannot respond. + + """ + raise NotImplementedError() + + def get_runtime_parameter_names(self): + return [p.name for p in self._expand_runtime_parameters()] + + def get_runtime_parameters(self): + """ returns the runtime parameters that have been set. """ + # pylint: disable=cell-var-from-loop + runtime_parameters = OrderedDict() + for rtp in self._expand_runtime_parameters(): + if not rtp.getter: + continue + getter = getattr(self, rtp.getter) + rtp_value = getter(**rtp.getter_args) + runtime_parameters[rtp.name] = rtp_value + return runtime_parameters + + def set_runtime_parameters(self, params): + """ + The parameters are taken from the keyword arguments and are specific to + a particular device. See the device documentation. + + """ + runtime_parameters = self._expand_runtime_parameters() + rtp_map = {rtp.name.lower(): rtp for rtp in runtime_parameters} + + params = OrderedDict((k.lower(), v) for k, v in params.iteritems()) + + expected_keys = rtp_map.keys() + if not set(params.keys()) <= set(expected_keys): + unknown_params = list(set(params.keys()).difference(set(expected_keys))) + raise ConfigError('Unknown runtime parameter(s): {}'.format(unknown_params)) + + for param in params: + rtp = rtp_map[param] + setter = getattr(self, rtp.setter) + args = dict(rtp.setter_args.items() + [(rtp.value_name, params[rtp.name.lower()])]) + setter(**args) + + def capture_screen(self, filepath): + """Captures the current device screen into the specified file in a PNG format.""" + raise NotImplementedError() + + def get_properties(self, output_path): + """Captures and saves the device configuration properties version and + any other relevant information. Return them in a dict""" + raise NotImplementedError() + + def listdir(self, path, **kwargs): + """ List the contents of the specified directory. """ + raise NotImplementedError() + + def push_file(self, source, dest): + """ Push a file from the host file system onto the device. """ + raise NotImplementedError() + + def pull_file(self, source, dest): + """ Pull a file from device system onto the host file system. """ + raise NotImplementedError() + + def delete_file(self, filepath): + """ Delete the specified file on the device. """ + raise NotImplementedError() + + def file_exists(self, filepath): + """ Check if the specified file or directory exist on the device. """ + raise NotImplementedError() + + def get_pids_of(self, process_name): + """ Returns a list of PIDs of the specified process name. """ + raise NotImplementedError() + + def kill(self, pid, as_root=False): + """ Kill the process with the specified PID. """ + raise NotImplementedError() + + def killall(self, process_name, as_root=False): + """ Kill all running processes with the specified name. """ + raise NotImplementedError() + + def install(self, filepath, **kwargs): + """ Install the specified file on the device. What "install" means is device-specific + and may possibly also depend on the type of file.""" + raise NotImplementedError() + + def uninstall(self, filepath): + """ Uninstall the specified file on the device. What "uninstall" means is device-specific + and may possibly also depend on the type of file.""" + raise NotImplementedError() + + def execute(self, command, timeout=None, **kwargs): + """ + Execute the specified command command on the device and return the output. + + :param command: Command to be executed on the device. + :param timeout: If the command does not return after the specified time, + execute() will abort with an error. If there is no timeout for + the command, this should be set to 0 or None. + + Other device-specific keyword arguments may also be specified. + + :returns: The stdout output from the command. + + """ + raise NotImplementedError() + + def set_sysfile_value(self, filepath, value, verify=True): + """ + Write the specified value to the specified file on the device + and verify that the value has actually been written. + + :param file: The file to be modified. + :param value: The value to be written to the file. Must be + an int or a string convertable to an int. + :param verify: Specifies whether the value should be verified, once written. + + Should raise DeviceError if could write value. + + """ + raise NotImplementedError() + + def get_sysfile_value(self, sysfile, kind=None): + """ + Get the contents of the specified sysfile. + + :param sysfile: The file who's contents will be returned. + + :param kind: The type of value to be expected in the sysfile. This can + be any Python callable that takes a single str argument. + If not specified or is None, the contents will be returned + as a string. + + """ + raise NotImplementedError() + + def start(self): + """ + This gets invoked before an iteration is started and is endented to help the + device manange any internal supporting functions. + + """ + pass + + def stop(self): + """ + This gets invoked after iteration execution has completed and is endented to help the + device manange any internal supporting functions. + + """ + pass + + def __str__(self): + return 'Device<{}>'.format(self.name) + + __repr__ = __str__ + + def _expand_runtime_parameters(self): + expanded_params = [] + for param in self.runtime_parameters: + if isinstance(param, CoreParameter): + expanded_params.extend(param.get_runtime_parameters(self.core_names)) # pylint: disable=no-member + else: + expanded_params.append(param) + return expanded_params + + @contextmanager + def _check_alive(self): + try: + yield + except Exception as e: + self.ping() + raise e + diff --git a/wlauto/core/entry_point.py b/wlauto/core/entry_point.py new file mode 100644 index 00000000..a0af5f58 --- /dev/null +++ b/wlauto/core/entry_point.py @@ -0,0 +1,75 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import sys +import argparse +import logging + +from wlauto.core.bootstrap import settings +from wlauto.core.extension_loader import ExtensionLoader +from wlauto.exceptions import WAError +from wlauto.utils.misc import get_traceback +from wlauto.utils.log import init_logging +from wlauto.utils.cli import init_argument_parser +from wlauto.utils.doc import format_body + + +import warnings +warnings.filterwarnings(action='ignore', category=UserWarning, module='zope') + + +logger = logging.getLogger('command_line') + + +def load_commands(subparsers): + ext_loader = ExtensionLoader(paths=settings.extension_paths) + for command in ext_loader.list_commands(): + settings.commands[command.name] = ext_loader.get_command(command.name, subparsers=subparsers) + + +def main(): + try: + description = ("Execute automated workloads on a remote device and process " + "the resulting output.\n\nUse \"wa -h\" to see " + "help for individual subcommands.") + parser = argparse.ArgumentParser(description=format_body(description, 80), + prog='wa', + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + init_argument_parser(parser) + load_commands(parser.add_subparsers(dest='command')) # each command will add its own subparser + args = parser.parse_args() + settings.verbosity = args.verbose + settings.debug = args.debug + if args.config: + settings.update(args.config) + init_logging(settings.verbosity) + + command = settings.commands[args.command] + sys.exit(command.execute(args)) + + except KeyboardInterrupt: + logging.info('Got CTRL-C. Aborting.') + sys.exit(3) + except WAError, e: + logging.critical(e) + sys.exit(1) + except Exception, e: # pylint: disable=broad-except + tb = get_traceback() + logging.critical(tb) + logging.critical('{}({})'.format(e.__class__.__name__, e)) + sys.exit(2) + diff --git a/wlauto/core/execution.py b/wlauto/core/execution.py new file mode 100644 index 00000000..c903031c --- /dev/null +++ b/wlauto/core/execution.py @@ -0,0 +1,798 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# pylint: disable=no-member + +""" +This module contains the execution logic for Workload Automation. It defines the +following actors: + + WorkloadSpec: Identifies the workload to be run and defines parameters under + which it should be executed. + + Executor: Responsible for the overall execution process. It instantiates + and/or intialises the other actors, does any necessary vaidation + and kicks off the whole process. + + Execution Context: Provides information about the current state of run + execution to instrumentation. + + RunInfo: Information about the current run. + + Runner: This executes workload specs that are passed to it. It goes through + stages of execution, emitting an appropriate signal at each step to + allow instrumentation to do its stuff. + +""" +import os +import uuid +import logging +import subprocess +import random +from copy import copy +from datetime import datetime +from contextlib import contextmanager +from collections import Counter, defaultdict, OrderedDict +from itertools import izip_longest + +import wlauto.core.signal as signal +from wlauto.core import instrumentation +from wlauto.core.bootstrap import settings +from wlauto.core.extension import Artifact +from wlauto.core.configuration import RunConfiguration +from wlauto.core.extension_loader import ExtensionLoader +from wlauto.core.resolver import ResourceResolver +from wlauto.core.result import ResultManager, IterationResult, RunResult +from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError, + DeviceError, DeviceNotRespondingError) +from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, merge_dicts, format_duration + + +# The maximum number of reboot attempts for an iteration. +MAX_REBOOT_ATTEMPTS = 3 + +# If something went wrong during device initialization, wait this +# long (in seconds) before retrying. This is necessary, as retrying +# immediately may not give the device enough time to recover to be able +# to reboot. +REBOOT_DELAY = 3 + + +class RunInfo(object): + """ + Information about the current run, such as it's unique ID, run + time, etc. + + """ + + def __init__(self, config): + self.config = config + self.uuid = uuid.uuid4() + self.start_time = None + self.end_time = None + self.duration = None + self.project = config.project + self.project_stage = config.project_stage + self.run_name = config.run_name + self.notes = None + self.device_properties = {} + + def to_dict(self): + d = copy(self.__dict__) + d['uuid'] = str(self.uuid) + del d['config'] + d = merge_dicts(d, self.config.to_dict()) + return d + + +class ExecutionContext(object): + """ + Provides a context for instrumentation. Keeps track of things like + current workload and iteration. + + This class also provides two status members that can be used by workloads + and instrumentation to keep track of arbitrary state. ``result`` + is reset on each new iteration of a workload; run_status is maintained + throughout a Workload Automation run. + + """ + + # These are the artifacts generated by the core framework. + default_run_artifacts = [ + Artifact('runlog', 'run.log', 'log', mandatory=True, + description='The log for the entire run.'), + ] + + @property + def current_iteration(self): + if self.current_job: + spec_id = self.current_job.spec.id + return self.job_iteration_counts[spec_id] + else: + return None + + @property + def workload(self): + return getattr(self.spec, 'workload', None) + + @property + def spec(self): + return getattr(self.current_job, 'spec', None) + + @property + def result(self): + return getattr(self.current_job, 'result', None) + + def __init__(self, device, config): + self.device = device + self.config = config + self.reboot_policy = config.reboot_policy + self.output_directory = None + self.current_job = None + self.resolver = None + self.last_error = None + self.run_info = None + self.run_result = None + self.run_output_directory = settings.output_directory + self.host_working_directory = settings.meta_directory + self.iteration_artifacts = None + self.run_artifacts = copy(self.default_run_artifacts) + self.job_iteration_counts = defaultdict(int) + self.aborted = False + if settings.agenda: + self.run_artifacts.append(Artifact('agenda', + os.path.join(self.host_working_directory, + os.path.basename(settings.agenda)), + 'meta', + mandatory=True, + description='Agenda for this run.')) + for i in xrange(1, settings.config_count + 1): + self.run_artifacts.append(Artifact('config_{}'.format(i), + os.path.join(self.host_working_directory, + 'config_{}.py'.format(i)), + kind='meta', + mandatory=True, + description='Config file used for the run.')) + + def initialize(self): + if not os.path.isdir(self.run_output_directory): + os.makedirs(self.run_output_directory) + self.output_directory = self.run_output_directory + self.resolver = ResourceResolver(self.config) + self.run_info = RunInfo(self.config) + self.run_result = RunResult(self.run_info) + + def next_job(self, job): + """Invoked by the runner when starting a new iteration of workload execution.""" + self.current_job = job + self.job_iteration_counts[self.spec.id] += 1 + self.current_job.result.iteration = self.current_iteration + if not self.aborted: + outdir_name = '_'.join(map(str, [self.spec.label, self.spec.id, self.current_iteration])) + self.output_directory = _d(os.path.join(self.run_output_directory, outdir_name)) + self.iteration_artifacts = [wa for wa in self.workload.artifacts] + + def end_job(self): + if self.current_job.result.status == IterationResult.ABORTED: + self.aborted = True + self.current_job = None + self.output_directory = self.run_output_directory + + def add_artifact(self, name, path, kind, *args, **kwargs): + if self.current_job is None: + self.add_run_artifact(name, path, kind, *args, **kwargs) + else: + self.add_iteration_artifact(name, path, kind, *args, **kwargs) + + def add_run_artifact(self, name, path, kind, *args, **kwargs): + path = _check_artifact_path(path, self.run_output_directory) + self.run_artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs)) + + def add_iteration_artifact(self, name, path, kind, *args, **kwargs): + path = _check_artifact_path(path, self.output_directory) + self.iteration_artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs)) + + def get_artifact(self, name): + if self.iteration_artifacts: + for art in self.iteration_artifacts: + if art.name == name: + return art + for art in self.run_artifacts: + if art.name == name: + return art + return None + + +def _check_artifact_path(path, rootpath): + if path.startswith(rootpath): + return os.path.abspath(path) + rootpath = os.path.abspath(rootpath) + full_path = os.path.join(rootpath, path) + if not os.path.isfile(full_path): + raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path)) + return full_path + + +class Executor(object): + """ + The ``Executor``'s job is to set up the execution context and pass to a ``Runner`` + along with a loaded run specification. Once the ``Runner`` has done its thing, + the ``Executor`` performs some final reporint before returning. + + The initial context set up involves combining configuration from various sources, + loading of requided workloads, loading and installation of instruments and result + processors, etc. Static validation of the combined configuration is also performed. + + """ + # pylint: disable=R0915 + + def __init__(self): + self.logger = logging.getLogger('Executor') + self.error_logged = False + self.warning_logged = False + self.config = None + self.ext_loader = None + self.device = None + self.context = None + + def execute(self, agenda, selectors=None): # NOQA + """ + Execute the run specified by an agenda. Optionally, selectors may be used to only + selecute a subset of the specified agenda. + + Params:: + + :agenda: an ``Agenda`` instance to be executed. + :selectors: A dict mapping selector name to the coresponding values. + + **Selectors** + + Currently, the following seectors are supported: + + ids + The value must be a sequence of workload specfication IDs to be executed. Note + that if sections are specified inthe agenda, the workload specifacation ID will + be a combination of the section and workload IDs. + + """ + signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED) + signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED) + + self.logger.info('Initializing') + self.ext_loader = ExtensionLoader(packages=settings.extension_packages, + paths=settings.extension_paths) + + self.logger.debug('Loading run configuration.') + self.config = RunConfiguration(self.ext_loader) + for filepath in settings.get_config_paths(): + self.config.load_config(filepath) + self.config.set_agenda(agenda, selectors) + self.config.finalize() + config_outfile = os.path.join(settings.meta_directory, 'run_config.json') + with open(config_outfile, 'w') as wfh: + self.config.serialize(wfh) + + self.logger.debug('Initialising device configuration.') + if not self.config.device: + raise ConfigError('Make sure a device is specified in the config.') + self.device = self.ext_loader.get_device(self.config.device, **self.config.device_config) + self.device.validate() + + self.context = ExecutionContext(self.device, self.config) + + self.logger.debug('Loading resource discoverers.') + self.context.initialize() + self.context.resolver.load() + self.context.add_artifact('run_config', config_outfile, 'meta') + + self.logger.debug('Installing instrumentation') + for name, params in self.config.instrumentation.iteritems(): + instrument = self.ext_loader.get_instrument(name, self.device, **params) + instrumentation.install(instrument) + instrumentation.validate() + + self.logger.debug('Installing result processors') + result_manager = ResultManager() + for name, params in self.config.result_processors.iteritems(): + processor = self.ext_loader.get_result_processor(name, **params) + result_manager.install(processor) + result_manager.validate() + + self.logger.debug('Loading workload specs') + for workload_spec in self.config.workload_specs: + workload_spec.load(self.device, self.ext_loader) + workload_spec.workload.init_resources(self.context) + workload_spec.workload.validate() + + if self.config.flashing_config: + if not self.device.flasher: + msg = 'flashing_config specified for {} device that does not support flashing.' + raise ConfigError(msg.format(self.device.name)) + self.logger.debug('Flashing the device') + self.device.flasher.flash(self.device) + + self.logger.info('Running workloads') + runner = self._get_runner(result_manager) + runner.init_queue(self.config.workload_specs) + runner.run() + self.execute_postamble() + + def execute_postamble(self): + """ + This happens after the run has completed. The overall results of the run are + summarised to the user. + + """ + result = self.context.run_result + counter = Counter() + for ir in result.iteration_results: + counter[ir.status] += 1 + self.logger.info('Done.') + self.logger.info('Run duration: {}'.format(format_duration(self.context.run_info.duration))) + status_summary = 'Ran a total of {} iterations: '.format(sum(self.context.job_iteration_counts.values())) + parts = [] + for status in IterationResult.values: + if status in counter: + parts.append('{} {}'.format(counter[status], status)) + self.logger.info(status_summary + ', '.join(parts)) + self.logger.info('Results can be found in {}'.format(settings.output_directory)) + + if self.error_logged: + self.logger.warn('There were errors during execution.') + self.logger.warn('Please see {}'.format(settings.log_file)) + elif self.warning_logged: + self.logger.warn('There were warnings during execution.') + self.logger.warn('Please see {}'.format(settings.log_file)) + + def _get_runner(self, result_manager): + if not self.config.execution_order or self.config.execution_order == 'by_iteration': + if self.config.reboot_policy == 'each_spec': + self.logger.info('each_spec reboot policy with the default by_iteration execution order is ' + 'equivalent to each_iteration policy.') + runnercls = ByIterationRunner + elif self.config.execution_order in ['classic', 'by_spec']: + runnercls = BySpecRunner + elif self.config.execution_order == 'by_section': + runnercls = BySectionRunner + elif self.config.execution_order == 'random': + runnercls = RandomRunner + else: + raise ConfigError('Unexpected execution order: {}'.format(self.config.execution_order)) + return runnercls(self.device, self.context, result_manager) + + def _error_signalled_callback(self): + self.error_logged = True + signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED) + + def _warning_signalled_callback(self): + self.warning_logged = True + signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED) + + +class RunnerJob(object): + """ + Represents a single execution of a ``RunnerJobDescription``. There will be one created for each iteration + specified by ``RunnerJobDescription.number_of_iterations``. + + """ + + def __init__(self, spec): + self.spec = spec + self.iteration = None + self.result = IterationResult(self.spec) + + +class Runner(object): + """ + This class is responsible for actually performing a workload automation + run. The main responsibility of this class is to emit appropriate signals + at the various stages of the run to allow things like traces an other + instrumentation to hook into the process. + + This is an abstract base class that defines each step of the run, but not + the order in which those steps are executed, which is left to the concrete + derived classes. + + """ + class _RunnerError(Exception): + """Internal runner error.""" + pass + + @property + def current_job(self): + if self.job_queue: + return self.job_queue[0] + return None + + @property + def previous_job(self): + if self.completed_jobs: + return self.completed_jobs[-1] + return None + + @property + def next_job(self): + if self.job_queue: + if len(self.job_queue) > 1: + return self.job_queue[1] + return None + + @property + def spec_changed(self): + if self.previous_job is None and self.current_job is not None: # Start of run + return True + if self.previous_job is not None and self.current_job is None: # End of run + return True + return self.current_job.spec.id != self.previous_job.spec.id + + @property + def spec_will_change(self): + if self.current_job is None and self.next_job is not None: # Start of run + return True + if self.current_job is not None and self.next_job is None: # End of run + return True + return self.current_job.spec.id != self.next_job.spec.id + + def __init__(self, device, context, result_manager): + self.device = device + self.context = context + self.result_manager = result_manager + self.logger = logging.getLogger('Runner') + self.job_queue = [] + self.completed_jobs = [] + self._initial_reset = True + + def init_queue(self, specs): + raise NotImplementedError() + + def run(self): # pylint: disable=too-many-branches + self._send(signal.RUN_START) + self._initialize_run() + + try: + while self.job_queue: + try: + self._init_job() + self._run_job() + except KeyboardInterrupt: + self.current_job.result.status = IterationResult.ABORTED + raise + except Exception, e: # pylint: disable=broad-except + self.current_job.result.status = IterationResult.FAILED + self.current_job.result.add_event(e.message) + if isinstance(e, DeviceNotRespondingError): + self.logger.info('Device appears to be unresponsive.') + if self.context.reboot_policy.can_reboot and self.device.can('reset_power'): + self.logger.info('Attempting to hard-reset the device...') + try: + self.device.hard_reset() + self.device.connect() + except DeviceError: # hard_boot not implemented for the device. + raise e + else: + raise e + else: # not a DeviceNotRespondingError + self.logger.error(e) + finally: + self._finalize_job() + except KeyboardInterrupt: + self.logger.info('Got CTRL-C. Finalizing run... (CTRL-C again to abort).') + # Skip through the remaining jobs. + while self.job_queue: + self.context.next_job(self.current_job) + self.current_job.result.status = IterationResult.ABORTED + self._finalize_job() + except DeviceNotRespondingError: + self.logger.info('Device unresponsive and recovery not possible. Skipping the rest of the run.') + self.context.aborted = True + while self.job_queue: + self.context.next_job(self.current_job) + self.current_job.result.status = IterationResult.SKIPPED + self._finalize_job() + + instrumentation.enable_all() + self._finalize_run() + self._process_results() + + self.result_manager.finalize(self.context) + self._send(signal.RUN_END) + + def _initialize_run(self): + self.context.run_info.start_time = datetime.utcnow() + if self.context.reboot_policy.perform_initial_boot: + self.logger.info('\tBooting device') + with self._signal_wrap('INITIAL_BOOT'): + self._reboot_device() + else: + self.logger.info('Connecting to device') + self.device.connect() + self.logger.info('Initializing device') + self.device.initialize(self.context) + + props = self.device.get_properties(self.context) + self.context.run_info.device_properties = props + self.result_manager.initialize(self.context) + self._send(signal.RUN_INIT) + + if instrumentation.check_failures(): + raise InstrumentError('Detected failure(s) during instrumentation initialization.') + + def _init_job(self): + self.current_job.result.status = IterationResult.RUNNING + self.context.next_job(self.current_job) + + def _run_job(self): # pylint: disable=too-many-branches + spec = self.current_job.spec + if not spec.enabled: + self.logger.info('Skipping workload %s (iteration %s)', spec, self.context.current_iteration) + self.current_job.result.status = IterationResult.SKIPPED + return + + self.logger.info('Running workload %s (iteration %s)', spec, self.context.current_iteration) + if spec.flash: + if not self.context.reboot_policy.can_reboot: + raise ConfigError('Cannot flash as reboot_policy does not permit rebooting.') + if not self.device.can('flash'): + raise DeviceError('Device does not support flashing.') + self._flash_device(spec.flash) + elif not self.completed_jobs: + # Never reboot on the very fist job of a run, as we would have done + # the initial reboot if a reboot was needed. + pass + elif self.context.reboot_policy.reboot_on_each_spec and self.spec_changed: + self.logger.debug('Rebooting on spec change.') + self._reboot_device() + elif self.context.reboot_policy.reboot_on_each_iteration: + self.logger.debug('Rebooting on iteration.') + self._reboot_device() + + instrumentation.disable_all() + instrumentation.enable(spec.instrumentation) + self.device.start() + + if self.spec_changed: + self._send(signal.WORKLOAD_SPEC_START) + self._send(signal.ITERATION_START) + + try: + setup_ok = False + with self._handle_errors('Setting up device parameters'): + self.device.set_runtime_parameters(spec.runtime_parameters) + setup_ok = True + + if setup_ok: + with self._handle_errors('running {}'.format(spec.workload.name)): + self.current_job.result.status = IterationResult.RUNNING + self._run_workload_iteration(spec.workload) + else: + self.logger.info('\tSkipping the rest of the iterations for this spec.') + spec.enabled = False + except KeyboardInterrupt: + self._send(signal.ITERATION_END) + self._send(signal.WORKLOAD_SPEC_END) + raise + else: + self._send(signal.ITERATION_END) + if self.spec_will_change or not spec.enabled: + self._send(signal.WORKLOAD_SPEC_END) + finally: + self.device.stop() + + def _finalize_job(self): + self.context.run_result.iteration_results.append(self.current_job.result) + self.job_queue[0].iteration = self.context.current_iteration + self.completed_jobs.append(self.job_queue.pop(0)) + self.context.end_job() + + def _finalize_run(self): + self.logger.info('Finalizing.') + self._send(signal.RUN_FIN) + + with self._handle_errors('Disconnecting from the device'): + self.device.disconnect() + + info = self.context.run_info + info.end_time = datetime.utcnow() + info.duration = info.end_time - info.start_time + + def _process_results(self): + self.logger.info('Processing overall results') + with self._signal_wrap('OVERALL_RESULTS_PROCESSING'): + if instrumentation.check_failures(): + self.context.run_result.non_iteration_errors = True + self.result_manager.process_run_result(self.context.run_result, self.context) + + def _run_workload_iteration(self, workload): + self.logger.info('\tSetting up') + with self._signal_wrap('WORKLOAD_SETUP'): + try: + workload.setup(self.context) + except: + self.logger.info('\tSkipping the rest of the iterations for this spec.') + self.current_job.spec.enabled = False + raise + try: + + self.logger.info('\tExecuting') + with self._handle_errors('Running workload'): + with self._signal_wrap('WORKLOAD_EXECUTION'): + workload.run(self.context) + + self.logger.info('\tProcessing result') + self._send(signal.BEFORE_WORKLOAD_RESULT_UPDATE) + try: + if self.current_job.result.status != IterationResult.FAILED: + with self._handle_errors('Processing workload result', + on_error_status=IterationResult.PARTIAL): + workload.update_result(self.context) + self._send(signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE) + + if self.current_job.result.status == IterationResult.RUNNING: + self.current_job.result.status = IterationResult.OK + finally: + self._send(signal.AFTER_WORKLOAD_RESULT_UPDATE) + + finally: + self.logger.info('\tTearing down') + with self._handle_errors('Tearing down workload', + on_error_status=IterationResult.NONCRITICAL): + with self._signal_wrap('WORKLOAD_TEARDOWN'): + workload.teardown(self.context) + self.result_manager.add_result(self.current_job.result, self.context) + + def _flash_device(self, flashing_params): + with self._signal_wrap('FLASHING'): + self.device.flash(**flashing_params) + self.device.connect() + + def _reboot_device(self): + with self._signal_wrap('BOOT'): + for reboot_attempts in xrange(MAX_REBOOT_ATTEMPTS): + if reboot_attempts: + self.logger.info('\tRetrying...') + with self._handle_errors('Rebooting device'): + self.device.boot(**self.current_job.spec.boot_parameters) + break + else: + raise DeviceError('Could not reboot device; max reboot attempts exceeded.') + self.device.connect() + + def _send(self, s): + signal.send(s, self, self.context) + + def _take_screenshot(self, filename): + if self.context.output_directory: + filepath = os.path.join(self.context.output_directory, filename) + else: + filepath = os.path.join(settings.output_directory, filename) + self.device.capture_screen(filepath) + + @contextmanager + def _handle_errors(self, action, on_error_status=IterationResult.FAILED): + try: + if action is not None: + self.logger.debug(action) + yield + except (KeyboardInterrupt, DeviceNotRespondingError): + raise + except (WAError, TimeoutError), we: + self.device.ping() + if self.current_job: + self.current_job.result.status = on_error_status + self.current_job.result.add_event(str(we)) + try: + self._take_screenshot('error.png') + except Exception, e: # pylint: disable=W0703 + # We're already in error state, so the fact that taking a + # screenshot failed is not surprising... + pass + if action: + action = action[0].lower() + action[1:] + self.logger.error('Error while {}:\n\t{}'.format(action, we)) + except Exception, e: # pylint: disable=W0703 + error_text = '{}("{}")'.format(e.__class__.__name__, e) + if self.current_job: + self.current_job.result.status = on_error_status + self.current_job.result.add_event(error_text) + self.logger.error('Error while {}'.format(action)) + self.logger.error(error_text) + if isinstance(e, subprocess.CalledProcessError): + self.logger.error('Got:') + self.logger.error(e.output) + tb = get_traceback() + self.logger.error(tb) + + @contextmanager + def _signal_wrap(self, signal_name): + """Wraps the suite in before/after signals, ensuring + that after signal is always sent.""" + before_signal = getattr(signal, 'BEFORE_' + signal_name) + success_signal = getattr(signal, 'SUCCESSFUL_' + signal_name) + after_signal = getattr(signal, 'AFTER_' + signal_name) + try: + self._send(before_signal) + yield + self._send(success_signal) + finally: + self._send(after_signal) + + +class BySpecRunner(Runner): + """ + This is that "classic" implementation that executes all iterations of a workload + spec before proceeding onto the next spec. + + """ + + def init_queue(self, specs): + jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] # pylint: disable=unused-variable + self.job_queue = [j for spec_jobs in jobs for j in spec_jobs] + + +class BySectionRunner(Runner): + """ + Runs the first iteration for all benchmarks first, before proceeding to the next iteration, + i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2... + + If multiple sections where specified in the agenda, this will run all specs for the first section + followed by all specs for the seciod section, etc. + + e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run + + X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2 + + """ + + def init_queue(self, specs): + jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] + self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j] + + +class ByIterationRunner(Runner): + """ + Runs the first iteration for all benchmarks first, before proceeding to the next iteration, + i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2... + + If multiple sections where specified in the agenda, this will run all sections for the first global + spec first, followed by all sections for the second spec, etc. + + e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run + + X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2 + + """ + + def init_queue(self, specs): + sections = OrderedDict() + for s in specs: + if s.section_id not in sections: + sections[s.section_id] = [] + sections[s.section_id].append(s) + specs = [s for section_specs in izip_longest(*sections.values()) for s in section_specs if s] + jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] + self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j] + + +class RandomRunner(Runner): + """ + This will run specs in a random order. + + """ + + def init_queue(self, specs): + jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] # pylint: disable=unused-variable + all_jobs = [j for spec_jobs in jobs for j in spec_jobs] + random.shuffle(all_jobs) + self.job_queue = all_jobs diff --git a/wlauto/core/extension.py b/wlauto/core/extension.py new file mode 100644 index 00000000..f09f7d8e --- /dev/null +++ b/wlauto/core/extension.py @@ -0,0 +1,652 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=E1101 +import os +import logging +import inspect +from copy import copy +from collections import OrderedDict + +from wlauto.core.bootstrap import settings +from wlauto.exceptions import ValidationError, ConfigError +from wlauto.utils.misc import isiterable, ensure_directory_exists as _d, get_article +from wlauto.utils.types import identifier + + +class AttributeCollection(object): + """ + Accumulator for extension attribute objects (such as Parameters or Artifacts). This will + replace any class member list accumulating such attributes through the magic of + metaprogramming\ [*]_. + + .. [*] which is totally safe and not going backfire in any way... + + """ + + @property + def values(self): + return self._attrs.values() + + def __init__(self, attrcls): + self._attrcls = attrcls + self._attrs = OrderedDict() + + def add(self, p): + p = self._to_attrcls(p) + if p.name in self._attrs: + if p.override: + newp = copy(self._attrs[p.name]) + for a, v in p.__dict__.iteritems(): + if v is not None: + setattr(newp, a, v) + self._attrs[p.name] = newp + else: + # Duplicate attribute condition is check elsewhere. + pass + else: + self._attrs[p.name] = p + + append = add + + def __str__(self): + return 'AC({})'.format(map(str, self._attrs.values())) + + __repr__ = __str__ + + def _to_attrcls(self, p): + if isinstance(p, basestring): + p = self._attrcls(p) + elif isinstance(p, tuple) or isinstance(p, list): + p = self._attrcls(*p) + elif isinstance(p, dict): + p = self._attrcls(**p) + elif not isinstance(p, self._attrcls): + raise ValueError('Invalid parameter value: {}'.format(p)) + if (p.name in self._attrs and not p.override and + p.name != 'modules'): # TODO: HACK due to "diamond dependecy" in workloads... + raise ValueError('Attribute {} has already been defined.'.format(p.name)) + return p + + def __iadd__(self, other): + for p in other: + self.add(p) + return self + + def __iter__(self): + return iter(self.values) + + def __contains__(self, p): + return p in self._attrs + + def __getitem__(self, i): + return self._attrs[i] + + def __len__(self): + return len(self._attrs) + + +class AliasCollection(AttributeCollection): + + def __init__(self): + super(AliasCollection, self).__init__(Alias) + + def _to_attrcls(self, p): + if isinstance(p, tuple) or isinstance(p, list): + # must be in the form (name, {param: value, ...}) + p = self._attrcls(p[1], **p[1]) + elif not isinstance(p, self._attrcls): + raise ValueError('Invalid parameter value: {}'.format(p)) + if p.name in self._attrs: + raise ValueError('Attribute {} has already been defined.'.format(p.name)) + return p + + +class ListCollection(list): + + def __init__(self, attrcls): # pylint: disable=unused-argument + super(ListCollection, self).__init__() + + +class Param(object): + """ + This is a generic parameter for an extension. Extensions instantiate this to declare which parameters + are supported. + + """ + + def __init__(self, name, kind=None, mandatory=None, default=None, override=False, + allowed_values=None, description=None, constraint=None, global_alias=None): + """ + Create a new Parameter object. + + :param name: The name of the parameter. This will become an instance member of the + extension object to which the parameter is applied, so it must be a valid + python identifier. This is the only mandatory parameter. + :param kind: The type of parameter this is. This must be a callable that takes an arbitrary + object and converts it to the expected type, or raised ``ValueError`` if such + conversion is not possible. Most Python standard types -- ``str``, ``int``, ``bool``, etc. -- + can be used here (though for ``bool``, ``wlauto.utils.misc.as_bool`` is preferred + as it intuitively handles strings like ``'false'``). This defaults to ``str`` if + not specified. + :param mandatory: If set to ``True``, then a non-``None`` value for this parameter *must* be + provided on extension object construction, otherwise ``ConfigError`` will be + raised. + :param default: The default value for this parameter. If no value is specified on extension + construction, this value will be used instead. (Note: if this is specified and + is not ``None``, then ``mandatory`` parameter will be ignored). + :param override: A ``bool`` that specifies whether a parameter of the same name further up the + hierarchy should be overridden. If this is ``False`` (the default), an exception + will be raised by the ``AttributeCollection`` instead. + :param allowed_values: This should be the complete list of allowed values for this parameter. + Note: ``None`` value will always be allowed, even if it is not in this list. + If you want to disallow ``None``, set ``mandatory`` to ``True``. + :param constraint: If specified, this must be a callable that takes the parameter value + as an argument and return a boolean indicating whether the constraint + has been satisfied. Alternatively, can be a two-tuple with said callable as + the first element and a string describing the constraint as the second. + :param global_alias: This is an alternative alias for this parameter, unlike the name, this + alias will not be namespaced under the owning extension's name (hence the + global part). This is introduced primarily for backward compatibility -- so + that old extension settings names still work. This should not be used for + new parameters. + + """ + self.name = identifier(name) + if kind is not None and not callable(kind): + raise ValueError('Kind must be callable.') + self.kind = kind + self.mandatory = mandatory + self.default = default + self.override = override + self.allowed_values = allowed_values + self.description = description + if self.kind is None and not self.override: + self.kind = str + if constraint is not None and not callable(constraint) and not isinstance(constraint, tuple): + raise ValueError('Constraint must be callable or a (callable, str) tuple.') + self.constraint = constraint + self.global_alias = global_alias + + def set_value(self, obj, value=None): + if value is None: + if self.default is not None: + value = self.default + elif self.mandatory: + msg = 'No values specified for mandatory parameter {} in {}' + raise ConfigError(msg.format(self.name, obj.name)) + else: + try: + value = self.kind(value) + except (ValueError, TypeError): + typename = self.get_type_name() + msg = 'Bad value "{}" for {}; must be {} {}' + article = get_article(typename) + raise ConfigError(msg.format(value, self.name, article, typename)) + current_value = getattr(obj, self.name, None) + if current_value is None: + setattr(obj, self.name, value) + elif not isiterable(current_value): + setattr(obj, self.name, value) + else: + new_value = current_value + [value] + setattr(obj, self.name, new_value) + + def validate(self, obj): + value = getattr(obj, self.name, None) + if value is not None: + if self.allowed_values: + self._validate_allowed_values(obj, value) + if self.constraint: + self._validate_constraint(obj, value) + else: + if self.mandatory: + msg = 'No value specified for mandatory parameter {} in {}.' + raise ConfigError(msg.format(self.name, obj.name)) + + def get_type_name(self): + typename = str(self.kind) + if '\'' in typename: + typename = typename.split('\'')[1] + elif typename.startswith(''.format(self.owner, self.name) + + +class ResourceGetter(Extension): + """ + Base class for implementing resolvers. Defines resolver interface. Resolvers are + responsible for discovering resources (such as particular kinds of files) they know + about based on the parameters that are passed to them. Each resolver also has a dict of + attributes that describe it's operation, and may be used to determine which get invoked. + There is no pre-defined set of attributes and resolvers may define their own. + + Class attributes: + + :name: Name that uniquely identifies this getter. Must be set by any concrete subclass. + :resource_type: Identifies resource type(s) that this getter can handle. This must + be either a string (for a single type) or a list of strings for + multiple resource types. This must be set by any concrete subclass. + :priority: Priority with which this getter will be invoked. This should be one of + the standard priorities specified in ``GetterPriority`` enumeration. If not + set, this will default to ``GetterPriority.environment``. + + """ + + name = None + resource_type = None + priority = GetterPriority.environment + + def __init__(self, resolver, **kwargs): + super(ResourceGetter, self).__init__(**kwargs) + self.resolver = resolver + + def register(self): + """ + Registers with a resource resolver. Concrete implementations must override this + to invoke ``self.resolver.register()`` method to register ``self`` for specific + resource types. + + """ + if self.resource_type is None: + raise ValueError('No resource type specified for {}'.format(self.name)) + elif isinstance(self.resource_type, list): + for rt in self.resource_type: + self.resolver.register(self, rt, self.priority) + else: + self.resolver.register(self, self.resource_type, self.priority) + + def unregister(self): + """Unregister from a resource resolver.""" + if self.resource_type is None: + raise ValueError('No resource type specified for {}'.format(self.name)) + elif isinstance(self.resource_type, list): + for rt in self.resource_type: + self.resolver.unregister(self, rt) + else: + self.resolver.unregister(self, self.resource_type) + + def get(self, resource, **kwargs): + """ + This will get invoked by the resolver when attempting to resolve a resource, passing + in the resource to be resolved as the first parameter. Any additional parameters would + be specific to a particular resource type. + + This method will only be invoked for resource types that the getter has registered for. + + :param resource: an instance of :class:`wlauto.core.resource.Resource`. + + :returns: Implementations of this method must return either the discovered resource or + ``None`` if the resource could not be discovered. + + """ + raise NotImplementedError() + + def delete(self, resource, *args, **kwargs): + """ + Delete the resource if it is discovered. All arguments are passed to a call + to``self.get()``. If that call returns a resource, it is deleted. + + :returns: ``True`` if the specified resource has been discovered and deleted, + and ``False`` otherwise. + + """ + discovered = self.get(resource, *args, **kwargs) + if discovered: + resource.delete(discovered) + return True + else: + return False + + def __str__(self): + return ''.format(self.name) + + +class __NullOwner(object): + """Represents an owner for a resource not owned by anyone.""" + + name = 'noone' + + def __getattr__(self, name): + return None + + def __str__(self): + return 'no-one' + + __repr__ = __str__ + + +NO_ONE = __NullOwner() diff --git a/wlauto/core/result.py b/wlauto/core/result.py new file mode 100644 index 00000000..900cbeb9 --- /dev/null +++ b/wlauto/core/result.py @@ -0,0 +1,321 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# pylint: disable=no-member + +""" +This module defines the classes used to handle result +processing inside Workload Automation. There will be a +:class:`wlauto.core.workload.WorkloadResult` object generated for +every workload iteration executed. This object will have a list of +:class:`wlauto.core.workload.WorkloadMetric` objects. This list will be +populated by the workload itself and may also be updated by instrumentation +(e.g. to add power measurements). Once the result object has been fully +populated, it will be passed into the ``process_iteration_result`` method of +:class:`ResultProcessor`. Once the entire run has completed, a list containing +result objects from all iterations will be passed into ``process_results`` +method of :class`ResultProcessor`. + +Which result processors will be active is defined by the ``result_processors`` +list in the ``~/.workload_automation/config.py``. Only the result_processors +who's names appear in this list will be used. + +A :class:`ResultsManager` keeps track of active results processors. + +""" +import logging +import traceback +from copy import copy +from contextlib import contextmanager +from datetime import datetime + +from wlauto.core.extension import Extension +from wlauto.exceptions import WAError +from wlauto.utils.types import numeric +from wlauto.utils.misc import enum_metaclass + + +class ResultManager(object): + """ + Keeps track of result processors and passes on the results onto the individual processors. + + """ + + def __init__(self): + self.logger = logging.getLogger('ResultsManager') + self.processors = [] + self._bad = [] + + def install(self, processor): + self.logger.debug('Installing results processor %s', processor.name) + self.processors.append(processor) + + def uninstall(self, processor): + if processor in self.processors: + self.logger.debug('Uninstalling results processor %s', processor.name) + self.processors.remove(processor) + else: + self.logger.warning('Attempting to uninstall results processor %s, which is not installed.', + processor.name) + + def initialize(self, context): + # Errors aren't handled at this stage, because this gets executed + # before workload execution starts and we just want to propagte them + # and terminate (so that error can be corrected and WA restarted). + for processor in self.processors: + processor.initialize(context) + + def add_result(self, result, context): + with self._manage_processors(context): + for processor in self.processors: + with self._handle_errors(processor): + processor.process_iteration_result(result, context) + for processor in self.processors: + with self._handle_errors(processor): + processor.export_iteration_result(result, context) + + def process_run_result(self, result, context): + with self._manage_processors(context): + for processor in self.processors: + with self._handle_errors(processor): + processor.process_run_result(result, context) + for processor in self.processors: + with self._handle_errors(processor): + processor.export_run_result(result, context) + + def finalize(self, context): + with self._manage_processors(context): + for processor in self.processors: + with self._handle_errors(processor): + processor.finalize(context) + + def validate(self): + for processor in self.processors: + processor.validate() + + @contextmanager + def _manage_processors(self, context, finalize_bad=True): + yield + for processor in self._bad: + if finalize_bad: + processor.finalize(context) + self.uninstall(processor) + self._bad = [] + + @contextmanager + def _handle_errors(self, processor): + try: + yield + except KeyboardInterrupt, e: + raise e + except WAError, we: + self.logger.error('"{}" result processor has encountered an error'.format(processor.name)) + self.logger.error('{}("{}")'.format(we.__class__.__name__, we.message)) + self._bad.append(processor) + except Exception, e: # pylint: disable=W0703 + self.logger.error('"{}" result processor has encountered an error'.format(processor.name)) + self.logger.error('{}("{}")'.format(e.__class__.__name__, e)) + self.logger.error(traceback.format_exc()) + self._bad.append(processor) + + +class ResultProcessor(Extension): + """ + Base class for result processors. Defines an interface that should be implemented + by the subclasses. A result processor can be used to do any kind of post-processing + of the results, from writing them out to a file, to uploading them to a database, + performing calculations, generating plots, etc. + + """ + + def initialize(self, context): + pass + + def process_iteration_result(self, result, context): + pass + + def export_iteration_result(self, result, context): + pass + + def process_run_result(self, result, context): + pass + + def export_run_result(self, result, context): + pass + + def finalize(self, context): + pass + + +class RunResult(object): + """ + Contains overall results for a run. + + """ + + __metaclass__ = enum_metaclass('values', return_name=True) + + values = [ + 'OK', + 'OKISH', + 'PARTIAL', + 'FAILED', + 'UNKNOWN', + ] + + @property + def status(self): + if not self.iteration_results or all([s.status == IterationResult.FAILED for s in self.iteration_results]): + return self.FAILED + elif any([s.status == IterationResult.FAILED for s in self.iteration_results]): + return self.PARTIAL + elif any([s.status == IterationResult.ABORTED for s in self.iteration_results]): + return self.PARTIAL + elif (any([s.status == IterationResult.PARTIAL for s in self.iteration_results]) or + self.non_iteration_errors): + return self.OKISH + elif all([s.status == IterationResult.OK for s in self.iteration_results]): + return self.OK + else: + return self.UNKNOWN # should never happen + + def __init__(self, run_info): + self.info = run_info + self.iteration_results = [] + self.artifacts = [] + self.events = [] + self.non_iteration_errors = False + + +class RunEvent(object): + """ + An event that occured during a run. + + """ + def __init__(self, message): + self.timestamp = datetime.utcnow() + self.message = message + + def to_dict(self): + return copy(self.__dict__) + + def __str__(self): + return '{} {}'.format(self.timestamp, self.message) + + __repr__ = __str__ + + +class IterationResult(object): + """ + Contains the result of running a single iteration of a workload. It is the + responsibility of a workload to instantiate a IterationResult, populate it, + and return it form its get_result() method. + + Status explanations: + + :NOT_STARTED: This iteration has not yet started. + :RUNNING: This iteration is currently running and no errors have been detected. + :OK: This iteration has completed and no errors have been detected + :PARTIAL: One or more instruments have failed (the iteration may still be running). + :FAILED: The workload itself has failed. + :ABORTED: The user interupted the workload + :SKIPPED: The iteration was skipped due to a previous failure + + """ + + __metaclass__ = enum_metaclass('values', return_name=True) + + values = [ + 'NOT_STARTED', + 'RUNNING', + + 'OK', + 'NONCRITICAL', + 'PARTIAL', + 'FAILED', + 'ABORTED', + 'SKIPPED', + ] + + def __init__(self, spec): + self.spec = spec + self.id = spec.id + self.workload = spec.workload + self.iteration = None + self.status = self.NOT_STARTED + self.events = [] + self.metrics = [] + self.artifacts = [] + + def add_metric(self, name, value, units=None, lower_is_better=False): + self.metrics.append(Metric(name, value, units, lower_is_better)) + + def has_metric(self, name): + for metric in self.metrics: + if metric.name == name: + return True + return False + + def add_event(self, message): + self.events.append(RunEvent(message)) + + def to_dict(self): + d = copy(self.__dict__) + d['events'] = [e.to_dict() for e in self.events] + return d + + def __iter__(self): + return iter(self.metrics) + + def __getitem__(self, name): + for metric in self.metrics: + if metric.name == name: + return metric + raise KeyError('Metric {} not found.'.format(name)) + + +class Metric(object): + """ + This is a single metric collected from executing a workload. + + :param name: the name of the metric. Uniquely identifies the metric + within the results. + :param value: The numerical value of the metric for this execution of + a workload. This can be either an int or a float. + :param units: Units for the collected value. Can be None if the value + has no units (e.g. it's a count or a standardised score). + :param lower_is_better: Boolean flag indicating where lower values are + better than higher ones. Defaults to False. + + """ + + def __init__(self, name, value, units=None, lower_is_better=False): + self.name = name + self.value = numeric(value) + self.units = units + self.lower_is_better = lower_is_better + + def to_dict(self): + return self.__dict__ + + def __str__(self): + result = '{}: {}'.format(self.name, self.value) + if self.units: + result += ' ' + self.units + result += ' ({})'.format('-' if self.lower_is_better else '+') + return '<{}>'.format(result) + + __repr__ = __str__ + diff --git a/wlauto/core/signal.py b/wlauto/core/signal.py new file mode 100644 index 00000000..012bf0fd --- /dev/null +++ b/wlauto/core/signal.py @@ -0,0 +1,189 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +""" +This module wraps louie signalling mechanism. It relies on modified version of loiue +that has prioritization added to handler invocation. + +""" +from louie import dispatcher # pylint: disable=F0401 + + +class Signal(object): + """ + This class implements the signals to be used for notifiying callbacks + registered to respond to different states and stages of the execution of workload + automation. + + """ + + def __init__(self, name, invert_priority=False): + """ + Instantiates a Signal. + + :param name: name is the identifier of the Signal object. Signal instances with + the same name refer to the same execution stage/stage. + :param invert_priority: boolean parameter that determines whether multiple + callbacks for the same signal should be ordered with + ascending or descending priorities. Typically this flag + should be set to True if the Signal is triggered AFTER an + a state/stage has been reached. That way callbacks with high + priorities will be called right after the event has occured. + """ + self.name = name + self.invert_priority = invert_priority + + def __str__(self): + return self.name + + __repr__ = __str__ + + def __hash__(self): + return id(self.name) + + +# These are paired events -- if the before_event is sent, the after_ signal is +# guaranteed to also be sent. In particular, the after_ signals will be sent +# even if there is an error, so you cannot assume in the handler that the +# device has booted successfully. In most cases, you should instead use the +# non-paired signals below. +BEFORE_FLASHING = Signal('before-flashing-signal', invert_priority=True) +SUCCESSFUL_FLASHING = Signal('successful-flashing-signal') +AFTER_FLASHING = Signal('after-flashing-signal') + +BEFORE_BOOT = Signal('before-boot-signal', invert_priority=True) +SUCCESSFUL_BOOT = Signal('successful-boot-signal') +AFTER_BOOT = Signal('after-boot-signal') + +BEFORE_INITIAL_BOOT = Signal('before-initial-boot-signal', invert_priority=True) +SUCCESSFUL_INITIAL_BOOT = Signal('successful-initial-boot-signal') +AFTER_INITIAL_BOOT = Signal('after-initial-boot-signal') + +BEFORE_FIRST_ITERATION_BOOT = Signal('before-first-iteration-boot-signal', invert_priority=True) +SUCCESSFUL_FIRST_ITERATION_BOOT = Signal('successful-first-iteration-boot-signal') +AFTER_FIRST_ITERATION_BOOT = Signal('after-first-iteration-boot-signal') + +BEFORE_WORKLOAD_SETUP = Signal('before-workload-setup-signal', invert_priority=True) +SUCCESSFUL_WORKLOAD_SETUP = Signal('successful-workload-setup-signal') +AFTER_WORKLOAD_SETUP = Signal('after-workload-setup-signal') + +BEFORE_WORKLOAD_EXECUTION = Signal('before-workload-execution-signal', invert_priority=True) +SUCCESSFUL_WORKLOAD_EXECUTION = Signal('successful-workload-execution-signal') +AFTER_WORKLOAD_EXECUTION = Signal('after-workload-execution-signal') + +BEFORE_WORKLOAD_RESULT_UPDATE = Signal('before-iteration-result-update-signal', invert_priority=True) +SUCCESSFUL_WORKLOAD_RESULT_UPDATE = Signal('successful-iteration-result-update-signal') +AFTER_WORKLOAD_RESULT_UPDATE = Signal('after-iteration-result-update-signal') + +BEFORE_WORKLOAD_TEARDOWN = Signal('before-workload-teardown-signal', invert_priority=True) +SUCCESSFUL_WORKLOAD_TEARDOWN = Signal('successful-workload-teardown-signal') +AFTER_WORKLOAD_TEARDOWN = Signal('after-workload-teardown-signal') + +BEFORE_OVERALL_RESULTS_PROCESSING = Signal('before-overall-results-process-signal', invert_priority=True) +SUCCESSFUL_OVERALL_RESULTS_PROCESSING = Signal('successful-overall-results-process-signal') +AFTER_OVERALL_RESULTS_PROCESSING = Signal('after-overall-results-process-signal') + +# These are the not-paired signals; they are emitted independently. E.g. the +# fact that RUN_START was emitted does not mean run end will be. +RUN_START = Signal('start-signal', invert_priority=True) +RUN_END = Signal('end-signal') +WORKLOAD_SPEC_START = Signal('workload-spec-start-signal', invert_priority=True) +WORKLOAD_SPEC_END = Signal('workload-spec-end-signal') +ITERATION_START = Signal('iteration-start-signal', invert_priority=True) +ITERATION_END = Signal('iteration-end-signal') + +RUN_INIT = Signal('run-init-signal') +SPEC_INIT = Signal('spec-init-signal') +ITERATION_INIT = Signal('iteration-init-signal') + +RUN_FIN = Signal('run-fin-signal') + +# These signals are used by the LoggerFilter to tell about logging events +ERROR_LOGGED = Signal('error_logged') +WARNING_LOGGED = Signal('warning_logged') + + +def connect(handler, signal, sender=dispatcher.Any, priority=0): + """ + Connects a callback to a signal, so that the callback will be automatically invoked + when that signal is sent. + + Parameters: + + :handler: This can be any callable that that takes the right arguments for + the signal. For most siginals this means a single argument that + will be an ``ExecutionContext`` instance. But please see documentaion + for individual signals in the :ref:`signals reference `. + :signal: The signal to which the hanlder will be subscribed. Please see + :ref:`signals reference ` for the list of standard WA + signals. + + .. note:: There is nothing that prevents instrumentation from sending their + own signals that are not part of the standard set. However the signal + must always be an :class:`wlauto.core.signal.Signal` instance. + + :sender: The handler will be invoked only for the signals emitted by this sender. By + default, this is set to :class:`louie.dispatcher.Any`, so the handler will + be invoked for signals from any sentder. + :priority: An integer (positive or negative) the specifies the priority of the handler. + Handlers with higher priority will be called before handlers with lower + priority. The call order of handlers with the same priority is not specified. + Defaults to 0. + + .. note:: Priorities for some signals are inverted (so highest priority + handlers get executed last). Please see :ref:`signals reference ` + for details. + + """ + if signal.invert_priority: + dispatcher.connect(handler, signal, sender, priority=-priority) # pylint: disable=E1123 + else: + dispatcher.connect(handler, signal, sender, priority=priority) # pylint: disable=E1123 + + +def disconnect(handler, signal, sender=dispatcher.Any): + """ + Disconnect a previously connected handler form the specified signal, optionally, only + for the specified sender. + + Parameters: + + :handler: The callback to be disconnected. + :signal: The signal the handler is to be disconnected form. It will + be an :class:`wlauto.core.signal.Signal` instance. + :sender: If specified, the handler will only be disconnected from the signal + sent by this sender. + + """ + dispatcher.disconnect(handler, signal, sender) + + +def send(signal, sender, *args, **kwargs): + """ + Sends a signal, causing connected handlers to be invoked. + + Paramters: + + :signal: Signal to be sent. This must be an instance of :class:`wlauto.core.signal.Signal` + or its subclasses. + :sender: The sender of the signal (typically, this would be ``self``). Some handlers may only + be subscribed to signals from a particular sender. + + The rest of the parameters will be passed on as aruments to the handler. + + """ + dispatcher.send(signal, sender, *args, **kwargs) + diff --git a/wlauto/core/version.py b/wlauto/core/version.py new file mode 100644 index 00000000..1ae12231 --- /dev/null +++ b/wlauto/core/version.py @@ -0,0 +1,26 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from collections import namedtuple + +VersionTuple = namedtuple('Version', ['major', 'minor', 'revision']) + +version = VersionTuple(2, 3, 0) + + +def get_wa_version(): + version_string = '{}.{}.{}'.format(version.major, version.minor, version.revision) + return version_string diff --git a/wlauto/core/workload.py b/wlauto/core/workload.py new file mode 100644 index 00000000..dad52aaa --- /dev/null +++ b/wlauto/core/workload.py @@ -0,0 +1,94 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +""" +A workload is the unit of execution. It represents a set of activities are are performed +and measured together, as well as the necessary setup and teardown procedures. A single +execution of a workload produces one :class:`wlauto.core.result.WorkloadResult` that is populated with zero or more +:class:`wlauto.core.result.WorkloadMetric`\ s and/or +:class:`wlauto.core.result.Artifact`\s by the workload and active instrumentation. + +""" +from wlauto.core.extension import Extension +from wlauto.exceptions import WorkloadError + + +class Workload(Extension): + """ + This is the base class for the workloads executed by the framework. + Each of the methods throwing NotImplementedError *must* be implemented + by the derived classes. + + """ + + supported_devices = [] + supported_platforms = [] + summary_metrics = [] + + def __init__(self, device, **kwargs): + """ + Creates a new Workload. + + :param device: the Device on which the workload will be executed. + """ + super(Workload, self).__init__(**kwargs) + if self.supported_devices and device.name not in self.supported_devices: + raise WorkloadError('Workload {} does not support device {}'.format(self.name, device.name)) + if self.supported_platforms and device.platform not in self.supported_platforms: + raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.platform)) + self.device = device + + def init_resources(self, context): + """ + May be optionally overridden by concrete instances in order to discover and initialise + necessary resources. This method will be invoked at most once during the execution: + before running any workloads, and before invocation of ``validate()``, but after it is + clear that this workload will run (i.e. this method will not be invoked for workloads + that have been discovered but have not been scheduled run in the agenda). + + """ + pass + + def setup(self, context): + """ + Perform the setup necessary to run the workload, such as copying the necessry files + to the device, configuring the environments, etc. + + This is also the place to perform any on-device checks prior to attempting to execute + the workload. + + """ + pass + + def run(self, context): + """Execute the workload. This is the method that performs the actual "work" of the""" + pass + + def update_result(self, context): + """ + Update the result within the specified execution context with the metrics + form this workload iteration. + + """ + pass + + def teardown(self, context): + """ Perform any final clean up for the Workload. """ + pass + + def __str__(self): + return ''.format(self.name) + diff --git a/wlauto/devices/__init__.py b/wlauto/devices/__init__.py new file mode 100644 index 00000000..16224d6f --- /dev/null +++ b/wlauto/devices/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + diff --git a/wlauto/devices/android/__init__.py b/wlauto/devices/android/__init__.py new file mode 100644 index 00000000..cd5d64d6 --- /dev/null +++ b/wlauto/devices/android/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + diff --git a/wlauto/devices/android/generic/__init__.py b/wlauto/devices/android/generic/__init__.py new file mode 100644 index 00000000..51a43948 --- /dev/null +++ b/wlauto/devices/android/generic/__init__.py @@ -0,0 +1,37 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from wlauto import AndroidDevice, Parameter + + +class GenericDevice(AndroidDevice): + name = 'generic_android' + description = """ + Generic Android device. Use this if you do not have a device file for + your device. + + This implements the minimum functionality that should be supported by + all android devices. + + """ + + default_working_directory = '/storage/sdcard0/working' + has_gpu = True + + parameters = [ + Parameter('core_names', default=[], override=True), + Parameter('core_clusters', default=[], override=True), + ] diff --git a/wlauto/devices/android/juno/__init__.py b/wlauto/devices/android/juno/__init__.py new file mode 100644 index 00000000..712c4e1d --- /dev/null +++ b/wlauto/devices/android/juno/__init__.py @@ -0,0 +1,173 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=E1101 +import os +import re +import time + +import pexpect + +from wlauto import BigLittleDevice, Parameter +from wlauto.exceptions import DeviceError +from wlauto.utils.serial_port import open_serial_connection, pulse_dtr +from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices +from wlauto.utils.uefi import UefiMenu + + +AUTOSTART_MESSAGE = 'Press Enter to stop auto boot...' + + +class Juno(BigLittleDevice): + + name = 'juno' + description = """ + ARM Juno next generation big.LITTLE development platform. + """ + + capabilities = ['reset_power'] + + has_gpu = True + + modules = [ + 'vexpress', + ] + + parameters = [ + Parameter('retries', kind=int, default=2, + description="""Specifies the number of times the device will attempt to recover + (normally, with a hard reset) if it detects that something went wrong."""), + + # VExpress flasher expects a device to have these: + Parameter('uefi_entry', default='WA', + description='The name of the entry to use (will be created if does not exist).'), + Parameter('microsd_mount_point', default='/media/JUNO', + description='Location at which the device\'s MicroSD card will be mounted.'), + Parameter('port', default='/dev/ttyS0', description='Serial port on which the device is connected.'), + Parameter('baudrate', kind=int, default=115200, description='Serial connection baud.'), + Parameter('timeout', kind=int, default=300, description='Serial connection timeout.'), + Parameter('core_names', default=['a53', 'a53', 'a53', 'a53', 'a57', 'a57'], override=True), + Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1], override=True), + ] + + short_delay = 1 + firmware_prompt = 'Cmd>' + # this is only used if there is no UEFI entry and one has to be created. + kernel_arguments = 'console=ttyAMA0,115200 earlyprintk=pl011,0x7ff80000 verbose debug init=/init root=/dev/sda1 rw ip=dhcp rootwait' + + def boot(self, **kwargs): + self.logger.debug('Resetting the device.') + self.reset() + with open_serial_connection(port=self.port, + baudrate=self.baudrate, + timeout=self.timeout, + init_dtr=0) as target: + menu = UefiMenu(target) + self.logger.debug('Waiting for UEFI menu...') + menu.open(timeout=120) + try: + menu.select(self.uefi_entry) + except LookupError: + self.logger.debug('{} UEFI entry not found.'.format(self.uefi_entry)) + self.logger.debug('Attempting to create one using default flasher configuration.') + self.flasher.image_args = self.kernel_arguments + self.flasher.create_uefi_enty(self, menu) + menu.select(self.uefi_entry) + self.logger.debug('Waiting for the Android prompt.') + target.expect(self.android_prompt, timeout=self.timeout) + + def connect(self): + if not self._is_ready: + if not self.adb_name: # pylint: disable=E0203 + with open_serial_connection(timeout=self.timeout, + port=self.port, + baudrate=self.baudrate, + init_dtr=0) as target: + target.sendline('') + self.logger.debug('Waiting for android prompt.') + target.expect(self.android_prompt) + + self.logger.debug('Waiting for IP address...') + wait_start_time = time.time() + while True: + target.sendline('ip addr list eth0') + time.sleep(1) + try: + target.expect('inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10) + self.adb_name = target.match.group(1) + ':5555' # pylint: disable=W0201 + break + except pexpect.TIMEOUT: + pass # We have our own timeout -- see below. + if (time.time() - wait_start_time) > self.ready_timeout: + raise DeviceError('Could not acquire IP address.') + + if self.adb_name in adb_list_devices(): + adb_disconnect(self.adb_name) + adb_connect(self.adb_name, timeout=self.timeout) + super(Juno, self).connect() # wait for boot to complete etc. + self._is_ready = True + + def disconnect(self): + if self._is_ready: + super(Juno, self).disconnect() + adb_disconnect(self.adb_name) + self._is_ready = False + + def reset(self): + # Currently, reboot is not working in Android on Juno, so + # perfrom a ahard reset instead + self.hard_reset() + + def get_cpuidle_states(self, cpu=0): + return {} + + def hard_reset(self): + self.disconnect() + self.adb_name = None # Force re-acquire IP address on reboot. pylint: disable=attribute-defined-outside-init + with open_serial_connection(port=self.port, + baudrate=self.baudrate, + timeout=self.timeout, + init_dtr=0, + get_conn=True) as (target, conn): + pulse_dtr(conn, state=True, duration=0.1) # TRM specifies a pulse of >=100ms + + i = target.expect([AUTOSTART_MESSAGE, self.firmware_prompt]) + if i: + self.logger.debug('Saw firmware prompt.') + time.sleep(self.short_delay) + target.sendline('reboot') + else: + self.logger.debug('Saw auto boot message.') + + def wait_for_microsd_mount_point(self, target, timeout=100): + attempts = 1 + self.retries + if os.path.exists(os.path.join(self.microsd_mount_point, 'config.txt')): + return + + self.logger.debug('Waiting for VExpress MicroSD to mount...') + for i in xrange(attempts): + if i: # Do not reboot on the first attempt. + target.sendline('reboot') + for _ in xrange(timeout): + time.sleep(self.short_delay) + if os.path.exists(os.path.join(self.microsd_mount_point, 'config.txt')): + return + raise DeviceError('Did not detect MicroSD mount on {}'.format(self.microsd_mount_point)) + + def get_android_id(self): + # Android ID currenlty not set properly in Juno Android builds. + return 'abad1deadeadbeef' + diff --git a/wlauto/devices/android/nexus10/__init__.py b/wlauto/devices/android/nexus10/__init__.py new file mode 100644 index 00000000..ad6f2555 --- /dev/null +++ b/wlauto/devices/android/nexus10/__init__.py @@ -0,0 +1,48 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import time + +from wlauto import AndroidDevice, Parameter + + +class Nexus10Device(AndroidDevice): + + name = 'Nexus10' + description = """ + Nexus10 is a 10 inch tablet device, which has dual-core A15. + + To be able to use Nexus10 in WA, the following must be true: + + - USB Debugging Mode is enabled. + - Generate USB debugging authorisation for the host machine + + """ + + default_working_directory = '/sdcard/working' + has_gpu = True + max_cores = 2 + + parameters = [ + Parameter('core_names', default=['A15', 'A15'], override=True), + Parameter('core_clusters', default=[0, 0], override=True), + ] + + def init(self, context, *args, **kwargs): + time.sleep(self.long_delay) + self.execute('svc power stayon true', check_exit_code=False) + time.sleep(self.long_delay) + self.execute('input keyevent 82') diff --git a/wlauto/devices/android/nexus5/__init__.py b/wlauto/devices/android/nexus5/__init__.py new file mode 100644 index 00000000..cd2f09db --- /dev/null +++ b/wlauto/devices/android/nexus5/__init__.py @@ -0,0 +1,40 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from wlauto import AndroidDevice, Parameter + + +class Nexus5Device(AndroidDevice): + + name = 'Nexus5' + description = """ + Adapter for Nexus 5. + + To be able to use Nexus5 in WA, the following must be true: + + - USB Debugging Mode is enabled. + - Generate USB debugging authorisation for the host machine + + """ + + default_working_directory = '/storage/sdcard0/working' + has_gpu = True + max_cores = 4 + + parameters = [ + Parameter('core_names', default=['krait400', 'krait400', 'krait400', 'krait400'], override=True), + Parameter('core_clusters', default=[0, 0, 0, 0], override=True), + ] diff --git a/wlauto/devices/android/note3/__init__.py b/wlauto/devices/android/note3/__init__.py new file mode 100644 index 00000000..9c8f42ae --- /dev/null +++ b/wlauto/devices/android/note3/__init__.py @@ -0,0 +1,76 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import time + +from wlauto import AndroidDevice, Parameter +from wlauto.exceptions import TimeoutError +from wlauto.utils.android import adb_shell + + +class Note3Device(AndroidDevice): + + name = 'Note3' + description = """ + Adapter for Galaxy Note 3. + + To be able to use Note3 in WA, the following must be true: + + - USB Debugging Mode is enabled. + - Generate USB debugging authorisation for the host machine + + """ + + parameters = [ + Parameter('core_names', default=['A15', 'A15', 'A15', 'A15'], override=True), + Parameter('core_clusters', default=[0, 0, 0, 0], override=True), + Parameter('working_directory', default='/storage/sdcard0/wa-working', override=True), + ] + + def __init__(self, **kwargs): + super(Note3Device, self).__init__(**kwargs) + self._just_rebooted = False + + def init(self, context): + self.execute('svc power stayon true', check_exit_code=False) + + def reset(self): + super(Note3Device, self).reset() + self._just_rebooted = True + + def hard_reset(self): + super(Note3Device, self).hard_reset() + self._just_rebooted = True + + def connect(self): # NOQA pylint: disable=R0912 + super(Note3Device, self).connect() + if self._just_rebooted: + self.logger.debug('Waiting for boot to complete...') + # On the Note 3, adb connection gets reset some time after booting. + # This causes errors during execution. To prevent this, open a shell + # session and wait for it to be killed. Once its killed, give adb + # enough time to restart, and then the device should be ready. + try: + adb_shell(self.adb_name, '', timeout=20) # pylint: disable=no-member + time.sleep(5) # give adb time to re-initialize + except TimeoutError: + pass # timed out waiting for the session to be killed -- assume not going to be. + + self.logger.debug('Boot completed.') + self._just_rebooted = False + # Swipe upwards to unlock the screen. + time.sleep(self.long_delay) + self.execute('input touchscreen swipe 540 1600 560 800 ') diff --git a/wlauto/devices/android/odroidxu3/__init__.py b/wlauto/devices/android/odroidxu3/__init__.py new file mode 100644 index 00000000..60f780b7 --- /dev/null +++ b/wlauto/devices/android/odroidxu3/__init__.py @@ -0,0 +1,38 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from wlauto import AndroidDevice, Parameter + + +class OdroidXU3(AndroidDevice): + + name = "odroidxu3" + description = 'HardKernel Odroid XU3 development board.' + + core_modules = [ + 'odroidxu3-fan', + ] + + parameters = [ + Parameter('adb_name', default='BABABEEFBABABEEF', override=True), + Parameter('working_directory', default='/data/local/wa-working', override=True), + Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True), + Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True), + Parameter('port', default='/dev/ttyUSB0', kind=str, + description='Serial port on which the device is connected'), + Parameter('baudrate', default=115200, kind=int, description='Serial connection baud rate'), + ] + diff --git a/wlauto/devices/android/tc2/__init__.py b/wlauto/devices/android/tc2/__init__.py new file mode 100644 index 00000000..9d3f92b9 --- /dev/null +++ b/wlauto/devices/android/tc2/__init__.py @@ -0,0 +1,847 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import os +import sys +import re +import string +import shutil +import time +from collections import Counter + +import pexpect + +from wlauto import BigLittleDevice, RuntimeParameter, Parameter, settings +from wlauto.exceptions import ConfigError, DeviceError +from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices +from wlauto.utils.serial_port import open_serial_connection +from wlauto.utils.misc import merge_dicts +from wlauto.utils.types import boolean + + +BOOT_FIRMWARE = { + 'uefi': { + 'SCC_0x010': '0x000003E0', + 'reboot_attempts': 0, + }, + 'bootmon': { + 'SCC_0x010': '0x000003D0', + 'reboot_attempts': 2, + }, +} + +MODES = { + 'mp_a7_only': { + 'images_file': 'images_mp.txt', + 'dtb': 'mp_a7', + 'initrd': 'init_mp', + 'kernel': 'kern_mp', + 'SCC_0x700': '0x1032F003', + 'cpus': ['a7', 'a7', 'a7'], + }, + 'mp_a7_bootcluster': { + 'images_file': 'images_mp.txt', + 'dtb': 'mp_a7bc', + 'initrd': 'init_mp', + 'kernel': 'kern_mp', + 'SCC_0x700': '0x1032F003', + 'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'], + }, + 'mp_a15_only': { + 'images_file': 'images_mp.txt', + 'dtb': 'mp_a15', + 'initrd': 'init_mp', + 'kernel': 'kern_mp', + 'SCC_0x700': '0x0032F003', + 'cpus': ['a15', 'a15'], + }, + 'mp_a15_bootcluster': { + 'images_file': 'images_mp.txt', + 'dtb': 'mp_a15bc', + 'initrd': 'init_mp', + 'kernel': 'kern_mp', + 'SCC_0x700': '0x0032F003', + 'cpus': ['a15', 'a15', 'a7', 'a7', 'a7'], + }, + 'iks_cpu': { + 'images_file': 'images_iks.txt', + 'dtb': 'iks', + 'initrd': 'init_iks', + 'kernel': 'kern_iks', + 'SCC_0x700': '0x1032F003', + 'cpus': ['a7', 'a7'], + }, + 'iks_a15': { + 'images_file': 'images_iks.txt', + 'dtb': 'iks', + 'initrd': 'init_iks', + 'kernel': 'kern_iks', + 'SCC_0x700': '0x0032F003', + 'cpus': ['a15', 'a15'], + }, + 'iks_a7': { + 'images_file': 'images_iks.txt', + 'dtb': 'iks', + 'initrd': 'init_iks', + 'kernel': 'kern_iks', + 'SCC_0x700': '0x0032F003', + 'cpus': ['a7', 'a7'], + }, + 'iks_ns_a15': { + 'images_file': 'images_iks.txt', + 'dtb': 'iks', + 'initrd': 'init_iks', + 'kernel': 'kern_iks', + 'SCC_0x700': '0x0032F003', + 'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'], + }, + 'iks_ns_a7': { + 'images_file': 'images_iks.txt', + 'dtb': 'iks', + 'initrd': 'init_iks', + 'kernel': 'kern_iks', + 'SCC_0x700': '0x0032F003', + 'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'], + }, +} + +A7_ONLY_MODES = ['mp_a7_only', 'iks_a7', 'iks_cpu'] +A15_ONLY_MODES = ['mp_a15_only', 'iks_a15'] + +DEFAULT_A7_GOVERNOR_TUNABLES = { + 'interactive': { + 'above_hispeed_delay': 80000, + 'go_hispeed_load': 85, + 'hispeed_freq': 800000, + 'min_sample_time': 80000, + 'timer_rate': 20000, + }, + 'ondemand': { + 'sampling_rate': 50000, + }, +} + +DEFAULT_A15_GOVERNOR_TUNABLES = { + 'interactive': { + 'above_hispeed_delay': 80000, + 'go_hispeed_load': 85, + 'hispeed_freq': 1000000, + 'min_sample_time': 80000, + 'timer_rate': 20000, + }, + 'ondemand': { + 'sampling_rate': 50000, + }, +} + +ADB_SHELL_TIMEOUT = 30 + + +class _TC2DeviceConfig(object): + + name = 'TC2 Configuration' + device_name = 'TC2' + + def __init__(self, # pylint: disable=R0914,W0613 + root_mount='/media/VEMSD', + + disable_boot_configuration=False, + boot_firmware=None, + mode=None, + + fs_medium='usb', + + device_working_directory='/data/local/usecase', + + bm_image='bm_v519r.axf', + + serial_device='/dev/ttyS0', + serial_baud=38400, + serial_max_timeout=600, + serial_log=sys.stdout, + + init_timeout=120, + + always_delete_uefi_entry=True, + psci_enable=True, + + host_working_directory=None, + + a7_governor_tunables=None, + a15_governor_tunables=None, + + adb_name=None, + # Compatibility with other android devices. + enable_screen_check=None, # pylint: disable=W0613 + **kwargs + ): + self.root_mount = root_mount + self.disable_boot_configuration = disable_boot_configuration + if not disable_boot_configuration: + self.boot_firmware = boot_firmware or 'uefi' + self.default_mode = mode or 'mp_a7_bootcluster' + elif boot_firmware or mode: + raise ConfigError('boot_firmware and/or mode cannot be specified when disable_boot_configuration is enabled.') + + self.mode = self.default_mode + self.working_directory = device_working_directory + self.serial_device = serial_device + self.serial_baud = serial_baud + self.serial_max_timeout = serial_max_timeout + self.serial_log = serial_log + self.bootmon_prompt = re.compile('^([KLM]:\\\)?>', re.MULTILINE) + + self.fs_medium = fs_medium.lower() + + self.bm_image = bm_image + + self.init_timeout = init_timeout + + self.always_delete_uefi_entry = always_delete_uefi_entry + self.psci_enable = psci_enable + + self.resource_dir = os.path.join(os.path.dirname(__file__), 'resources') + self.board_dir = os.path.join(self.root_mount, 'SITE1', 'HBI0249A') + self.board_file = 'board.txt' + self.board_file_bak = 'board.bak' + self.images_file = 'images.txt' + + self.host_working_directory = host_working_directory or settings.meta_directory + + if not a7_governor_tunables: + self.a7_governor_tunables = DEFAULT_A7_GOVERNOR_TUNABLES + else: + self.a7_governor_tunables = merge_dicts(DEFAULT_A7_GOVERNOR_TUNABLES, a7_governor_tunables) + + if not a15_governor_tunables: + self.a15_governor_tunables = DEFAULT_A15_GOVERNOR_TUNABLES + else: + self.a15_governor_tunables = merge_dicts(DEFAULT_A15_GOVERNOR_TUNABLES, a15_governor_tunables) + + self.adb_name = adb_name + + @property + def src_images_template_file(self): + return os.path.join(self.resource_dir, MODES[self.mode]['images_file']) + + @property + def src_images_file(self): + return os.path.join(self.host_working_directory, 'images.txt') + + @property + def src_board_template_file(self): + return os.path.join(self.resource_dir, 'board_template.txt') + + @property + def src_board_file(self): + return os.path.join(self.host_working_directory, 'board.txt') + + @property + def kernel_arguments(self): + kernel_args = ' console=ttyAMA0,38400 androidboot.console=ttyAMA0 selinux=0' + if self.fs_medium == 'usb': + kernel_args += ' androidboot.hardware=arm-versatileexpress-usb' + if 'iks' in self.mode: + kernel_args += ' no_bL_switcher=0' + return kernel_args + + @property + def kernel(self): + return MODES[self.mode]['kernel'] + + @property + def initrd(self): + return MODES[self.mode]['initrd'] + + @property + def dtb(self): + return MODES[self.mode]['dtb'] + + @property + def SCC_0x700(self): + return MODES[self.mode]['SCC_0x700'] + + @property + def SCC_0x010(self): + return BOOT_FIRMWARE[self.boot_firmware]['SCC_0x010'] + + @property + def reboot_attempts(self): + return BOOT_FIRMWARE[self.boot_firmware]['reboot_attempts'] + + def validate(self): + valid_modes = MODES.keys() + if self.mode not in valid_modes: + message = 'Invalid mode: {}; must be in {}'.format( + self.mode, valid_modes) + raise ConfigError(message) + + valid_boot_firmware = BOOT_FIRMWARE.keys() + if self.boot_firmware not in valid_boot_firmware: + message = 'Invalid boot_firmware: {}; must be in {}'.format( + self.boot_firmware, + valid_boot_firmware) + raise ConfigError(message) + + if self.fs_medium not in ['usb', 'sdcard']: + message = 'Invalid filesystem medium: {} allowed values : usb, sdcard '.format(self.fs_medium) + raise ConfigError(message) + + +class TC2Device(BigLittleDevice): + + name = 'TC2' + description = """ + TC2 is a development board, which has three A7 cores and two A15 cores. + + TC2 has a number of boot parameters which are: + + :root_mount: Defaults to '/media/VEMSD' + :boot_firmware: It has only two boot firmware options, which are + uefi and bootmon. Defaults to 'uefi'. + :fs_medium: Defaults to 'usb'. + :device_working_directory: The direcitory that WA will be using to copy + files to. Defaults to 'data/local/usecase' + :serial_device: The serial device which TC2 is connected to. Defaults to + '/dev/ttyS0'. + :serial_baud: Defaults to 38400. + :serial_max_timeout: Serial timeout value in seconds. Defaults to 600. + :serial_log: Defaults to standard output. + :init_timeout: The timeout in seconds to init the device. Defaults set + to 30. + :always_delete_uefi_entry: If true, it will delete the ufi entry. + Defaults to True. + :psci_enable: Enabling the psci. Defaults to True. + :host_working_directory: The host working directory. Defaults to None. + :disable_boot_configuration: Disables boot configuration through images.txt and board.txt. When + this is ``True``, those two files will not be overwritten in VEMSD. + This option may be necessary if the firmware version in the ``TC2`` + is not compatible with the templates in WA. Please note that enabling + this will prevent you form being able to set ``boot_firmware`` and + ``mode`` parameters. Defaults to ``False``. + + TC2 can also have a number of different booting mode, which are: + + :mp_a7_only: Only the A7 cluster. + :mp_a7_bootcluster: Both A7 and A15 clusters, but it boots on A7 + cluster. + :mp_a15_only: Only the A15 cluster. + :mp_a15_bootcluster: Both A7 and A15 clusters, but it boots on A15 + clusters. + :iks_cpu: Only A7 cluster with only 2 cpus. + :iks_a15: Only A15 cluster. + :iks_a7: Same as iks_cpu + :iks_ns_a15: Both A7 and A15 clusters. + :iks_ns_a7: Both A7 and A15 clusters. + + The difference between mp and iks is the scheduling policy. + + TC2 takes the following runtime parameters + + :a7_cores: Number of active A7 cores. + :a15_cores: Number of active A15 cores. + :a7_governor: CPUFreq governor for the A7 cluster. + :a15_governor: CPUFreq governor for the A15 cluster. + :a7_min_frequency: Minimum CPU frequency for the A7 cluster. + :a15_min_frequency: Minimum CPU frequency for the A15 cluster. + :a7_max_frequency: Maximum CPU frequency for the A7 cluster. + :a15_max_frequency: Maximum CPU frequency for the A7 cluster. + :irq_affinity: lambda x: Which cluster will receive IRQs. + :cpuidle: Whether idle states should be enabled. + :sysfile_values: A dict mapping a complete file path to the value that + should be echo'd into it. By default, the file will be + subsequently read to verify that the value was written + into it with DeviceError raised otherwise. For write-only + files, this check can be disabled by appending a ``!`` to + the end of the file path. + + """ + + has_gpu = False + a15_only_modes = A15_ONLY_MODES + a7_only_modes = A7_ONLY_MODES + not_configurable_modes = ['iks_a7', 'iks_cpu', 'iks_a15'] + + parameters = [ + Parameter('core_names', mandatory=False, override=True, + description='This parameter will be ignored for TC2'), + Parameter('core_clusters', mandatory=False, override=True, + description='This parameter will be ignored for TC2'), + ] + + runtime_parameters = [ + RuntimeParameter('irq_affinity', lambda d, x: d.set_irq_affinity(x.lower()), lambda: None), + RuntimeParameter('cpuidle', lambda d, x: d.enable_idle_states() if boolean(x) else d.disable_idle_states(), + lambda d: d.get_cpuidle()) + ] + + def get_mode(self): + return self.config.mode + + def set_mode(self, mode): + if self._has_booted: + raise DeviceError('Attempting to set boot mode when already booted.') + valid_modes = MODES.keys() + if mode is None: + mode = self.config.default_mode + if mode not in valid_modes: + message = 'Invalid mode: {}; must be in {}'.format(mode, valid_modes) + raise ConfigError(message) + self.config.mode = mode + + mode = property(get_mode, set_mode) + + def _get_core_names(self): + return MODES[self.mode]['cpus'] + + def _set_core_names(self, value): + pass + + core_names = property(_get_core_names, _set_core_names) + + def _get_core_clusters(self): + seen = set([]) + core_clusters = [] + cluster_id = -1 + for core in MODES[self.mode]['cpus']: + if core not in seen: + seen.add(core) + cluster_id += 1 + core_clusters.append(cluster_id) + return core_clusters + + def _set_core_clusters(self, value): + pass + + core_clusters = property(_get_core_clusters, _set_core_clusters) + + @property + def cpu_cores(self): + return MODES[self.mode]['cpus'] + + @property + def max_a7_cores(self): + return Counter(MODES[self.mode]['cpus'])['a7'] + + @property + def max_a15_cores(self): + return Counter(MODES[self.mode]['cpus'])['a15'] + + @property + def a7_governor_tunables(self): + return self.config.a7_governor_tunables + + @property + def a15_governor_tunables(self): + return self.config.a15_governor_tunables + + def __init__(self, **kwargs): + super(TC2Device, self).__init__() + self.config = _TC2DeviceConfig(**kwargs) + self.working_directory = self.config.working_directory + self._serial = None + self._has_booted = None + + def boot(self, **kwargs): # NOQA + mode = kwargs.get('os_mode', None) + self._is_ready = False + self._has_booted = False + + self.mode = mode + self.logger.debug('Booting in {} mode'.format(self.mode)) + + with open_serial_connection(timeout=self.config.serial_max_timeout, + port=self.config.serial_device, + baudrate=self.config.serial_baud) as target: + if self.config.boot_firmware == 'bootmon': + self._boot_using_bootmon(target) + elif self.config.boot_firmware == 'uefi': + self._boot_using_uefi(target) + else: + message = 'Unexpected boot firmware: {}'.format(self.config.boot_firmware) + raise ConfigError(message) + + try: + target.sendline('') + self.logger.debug('Waiting for the Android prompt.') + target.expect(self.android_prompt, timeout=40) # pylint: disable=E1101 + except pexpect.TIMEOUT: + # Try a second time before giving up. + self.logger.debug('Did not get Android prompt, retrying...') + target.sendline('') + target.expect(self.android_prompt, timeout=10) # pylint: disable=E1101 + + self.logger.debug('Waiting for OS to initialize...') + started_waiting_time = time.time() + time.sleep(20) # we know it's not going to to take less time than this. + boot_completed, got_ip_address = False, False + while True: + try: + if not boot_completed: + target.sendline('getprop sys.boot_completed') + boot_completed = target.expect(['0.*', '1.*'], timeout=10) + if not got_ip_address: + target.sendline('getprop dhcp.eth0.ipaddress') + # regexes are processed in order, so ip regex has to + # come first (as we only want to match new line if we + # don't match the IP). We do a "not" make the logic + # consistent with boot_completed. + got_ip_address = not target.expect(['[1-9]\d*.\d+.\d+.\d+', '\n'], timeout=10) + except pexpect.TIMEOUT: + pass # We have our own timeout -- see below. + if boot_completed and got_ip_address: + break + time.sleep(5) + if (time.time() - started_waiting_time) > self.config.init_timeout: + raise DeviceError('Timed out waiting for the device to initialize.') + + self._has_booted = True + + def connect(self): + if not self._is_ready: + if self.config.adb_name: + self.adb_name = self.config.adb_name # pylint: disable=attribute-defined-outside-init + else: + with open_serial_connection(timeout=self.config.serial_max_timeout, + port=self.config.serial_device, + baudrate=self.config.serial_baud) as target: + # Get IP address and push the Gator and PMU logger. + target.sendline('su') # as of Android v5.0.2, Linux does not boot into root shell + target.sendline('netcfg') + ipaddr_re = re.compile('eth0 +UP +(.+)/.+', re.MULTILINE) + target.expect(ipaddr_re) + output = target.after + match = re.search('eth0 +UP +(.+)/.+', output) + if not match: + raise DeviceError('Could not get adb IP address.') + ipaddr = match.group(1) + + # Connect to device using adb. + target.expect(self.android_prompt) # pylint: disable=E1101 + self.adb_name = ipaddr + ":5555" # pylint: disable=W0201 + + if self.adb_name in adb_list_devices(): + adb_disconnect(self.adb_name) + adb_connect(self.adb_name) + self._is_ready = True + self.execute("input keyevent 82", timeout=ADB_SHELL_TIMEOUT) + self.execute("svc power stayon true", timeout=ADB_SHELL_TIMEOUT) + + def disconnect(self): + adb_disconnect(self.adb_name) + self._is_ready = False + + # TC2-specific methods. You should avoid calling these in + # Workloads/Instruments as that would tie them to TC2 (and if that is + # the case, then you should set the supported_devices parameter in the + # Workload/Instrument accordingly). Most of these can be replace with a + # call to set_runtime_parameters. + + def get_cpuidle(self): + return self.get_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable') + + def enable_idle_states(self): + """ + Fully enables idle states on TC2. + See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section) + and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels + + """ + # Enable C1 (cluster shutdown). + self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 0, verify=False) + # Enable C0 on A15 cluster. + self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0, verify=False) + # Enable C0 on A7 cluster. + self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 1, verify=False) + + def disable_idle_states(self): + """ + Disable idle states on TC2. + See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section) + and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels + + """ + # Disable C1 (cluster shutdown). + self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 1, verify=False) + # Disable C0. + self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0xFF, verify=False) + + def set_irq_affinity(self, cluster): + """ + Set's IRQ affinity to the specified cluster. + + This method will only work if the device mode is mp_a7_bootcluster or + mp_a15_bootcluster. This operation does not make sense if there is only one + cluster active (all IRQs will obviously go to that), and it will not work for + IKS kernel because clusters are not exposed to sysfs. + + :param cluster: must be either 'a15' or 'a7'. + + """ + if self.config.mode not in ('mp_a7_bootcluster', 'mp_a15_bootcluster'): + raise ConfigError('Cannot set IRQ affinity with mode {}'.format(self.config.mode)) + if cluster == 'a7': + self.execute('/sbin/set_irq_affinity.sh 0xc07', check_exit_code=False) + elif cluster == 'a15': + self.execute('/sbin/set_irq_affinity.sh 0xc0f', check_exit_code=False) + else: + raise ConfigError('cluster must either "a15" or "a7"; got {}'.format(cluster)) + + def _boot_using_uefi(self, target): + self.logger.debug('Booting using UEFI.') + self._wait_for_vemsd_mount(target) + self._setup_before_reboot() + self._perform_uefi_reboot(target) + + # Get to the UEFI menu. + self.logger.debug('Waiting for UEFI default selection.') + target.sendline('reboot') + target.expect('The default boot selection will start in'.rstrip()) + time.sleep(1) + target.sendline(''.rstrip()) + + # If delete every time is specified, try to delete entry. + if self.config.always_delete_uefi_entry: + self._delete_uefi_entry(target, entry='workload_automation_MP') + self.config.always_delete_uefi_entry = False + + # Specify argument to be passed specifying that psci is (or is not) enabled + if self.config.psci_enable: + psci_enable = ' psci=enable' + else: + psci_enable = '' + + # Identify the workload automation entry. + selection_pattern = r'\[([0-9]*)\] ' + + try: + target.expect(re.compile(selection_pattern + 'workload_automation_MP'), timeout=5) + wl_menu_item = target.match.group(1) + except pexpect.TIMEOUT: + self._create_uefi_entry(target, psci_enable, entry_name='workload_automation_MP') + # At this point the board should be rebooted so we need to retry to boot + self._boot_using_uefi(target) + else: # Did not time out. + try: + #Identify the boot manager menu item + target.expect(re.compile(selection_pattern + 'Boot Manager')) + boot_manager_menu_item = target.match.group(1) + + #Update FDT + target.sendline(boot_manager_menu_item) + target.expect(re.compile(selection_pattern + 'Update FDT path'), timeout=15) + update_fdt_menu_item = target.match.group(1) + target.sendline(update_fdt_menu_item) + target.expect(re.compile(selection_pattern + 'NOR Flash .*'), timeout=15) + bootmonfs_menu_item = target.match.group(1) + target.sendline(bootmonfs_menu_item) + target.expect('File path of the FDT blob:') + target.sendline(self.config.dtb) + + #Return to main manu and boot from wl automation + target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15) + return_to_main_menu_item = target.match.group(1) + target.sendline(return_to_main_menu_item) + target.sendline(wl_menu_item) + except pexpect.TIMEOUT: + raise DeviceError('Timed out') + + def _setup_before_reboot(self): + if not self.config.disable_boot_configuration: + self.logger.debug('Performing pre-boot setup.') + substitution = { + 'SCC_0x010': self.config.SCC_0x010, + 'SCC_0x700': self.config.SCC_0x700, + } + with open(self.config.src_board_template_file, 'r') as fh: + template_board_txt = string.Template(fh.read()) + with open(self.config.src_board_file, 'w') as wfh: + wfh.write(template_board_txt.substitute(substitution)) + + with open(self.config.src_images_template_file, 'r') as fh: + template_images_txt = string.Template(fh.read()) + with open(self.config.src_images_file, 'w') as wfh: + wfh.write(template_images_txt.substitute({'bm_image': self.config.bm_image})) + + shutil.copyfile(self.config.src_board_file, + os.path.join(self.config.board_dir, self.config.board_file)) + shutil.copyfile(self.config.src_images_file, + os.path.join(self.config.board_dir, self.config.images_file)) + os.system('sync') # make sure everything is flushed to microSD + else: + self.logger.debug('Boot configuration disabled proceeding with existing board.txt and images.txt.') + + def _delete_uefi_entry(self, target, entry): # pylint: disable=R0201 + """ + this method deletes the entry specified as parameter + as a precondition serial port input needs to be parsed AT MOST up to + the point BEFORE recognizing this entry (both entry and boot manager has + not yet been parsed) + + """ + try: + selection_pattern = r'\[([0-9]+)\] *' + + try: + target.expect(re.compile(selection_pattern + entry), timeout=5) + wl_menu_item = target.match.group(1) + except pexpect.TIMEOUT: + return # Entry does not exist, nothing to delete here... + + # Identify and select boot manager menu item + target.expect(selection_pattern + 'Boot Manager', timeout=15) + bootmanager_item = target.match.group(1) + target.sendline(bootmanager_item) + + # Identify and select 'Remove entry' + target.expect(selection_pattern + 'Remove Boot Device Entry', timeout=15) + new_entry_item = target.match.group(1) + target.sendline(new_entry_item) + + # Delete entry + target.expect(re.compile(selection_pattern + entry), timeout=5) + wl_menu_item = target.match.group(1) + target.sendline(wl_menu_item) + + # Return to main manu + target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15) + return_to_main_menu_item = target.match.group(1) + target.sendline(return_to_main_menu_item) + except pexpect.TIMEOUT: + raise DeviceError('Timed out while deleting UEFI entry.') + + def _create_uefi_entry(self, target, psci_enable, entry_name): + """ + Creates the default boot entry that is expected when booting in uefi mode. + + """ + self._wait_for_vemsd_mount(target) + try: + selection_pattern = '\[([0-9]+)\] *' + + # Identify and select boot manager menu item. + target.expect(selection_pattern + 'Boot Manager', timeout=15) + bootmanager_item = target.match.group(1) + target.sendline(bootmanager_item) + + # Identify and select 'add new entry'. + target.expect(selection_pattern + 'Add Boot Device Entry', timeout=15) + new_entry_item = target.match.group(1) + target.sendline(new_entry_item) + + # Identify and select BootMonFs. + target.expect(selection_pattern + 'NOR Flash .*', timeout=15) + BootMonFs_item = target.match.group(1) + target.sendline(BootMonFs_item) + + # Specify the parameters of the new entry. + target.expect('.+the kernel', timeout=5) + target.sendline(self.config.kernel) # kernel path + target.expect('Has FDT support\?.*\[y\/n\].*', timeout=5) + time.sleep(0.5) + target.sendline('y') # Has Fdt support? -> y + target.expect('Add an initrd.*\[y\/n\].*', timeout=5) + time.sleep(0.5) + target.sendline('y') # add an initrd? -> y + target.expect('.+the initrd.*', timeout=5) + time.sleep(0.5) + target.sendline(self.config.initrd) # initrd path + target.expect('.+to the binary.*', timeout=5) + time.sleep(0.5) + _slow_sendline(target, self.config.kernel_arguments + psci_enable) # arguments to pass to binary + time.sleep(0.5) + target.expect('.+new Entry.+', timeout=5) + _slow_sendline(target, entry_name) # Entry name + target.expect('Choice.+', timeout=15) + time.sleep(2) + except pexpect.TIMEOUT: + raise DeviceError('Timed out while creating UEFI entry.') + self._perform_uefi_reboot(target) + + def _perform_uefi_reboot(self, target): + self._wait_for_vemsd_mount(target) + open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close() + + def _wait_for_vemsd_mount(self, target, timeout=100): + attempts = 1 + self.config.reboot_attempts + if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')): + return + + self.logger.debug('Waiting for VEMSD to mount...') + for i in xrange(attempts): + if i: # Do not reboot on the first attempt. + target.sendline('reboot') + target.sendline('usb_on') + for _ in xrange(timeout): + time.sleep(1) + if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')): + return + + raise DeviceError('Timed out waiting for VEMSD to mount.') + + def _boot_using_bootmon(self, target): + """ + This method Boots TC2 using the bootmon interface. + """ + self.logger.debug('Booting using bootmon.') + + try: + self._wait_for_vemsd_mount(target, timeout=20) + except DeviceError: + # OK, something's wrong. Reboot the board and try again. + self.logger.debug('VEMSD not mounted, attempting to power cycle device.') + target.sendline(' ') + state = target.expect(['Cmd> ', self.config.bootmon_prompt, self.android_prompt]) # pylint: disable=E1101 + + if state == 0 or state == 1: + # Reboot - Bootmon + target.sendline('reboot') + target.expect('Powering up system...') + elif state == 2: + target.sendline('reboot -n') + target.expect('Powering up system...') + else: + raise DeviceError('Unexpected board state {}; should be 0, 1 or 2'.format(state)) + + self._wait_for_vemsd_mount(target) + + self._setup_before_reboot() + + # Reboot - Bootmon + self.logger.debug('Rebooting into bootloader...') + open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close() + target.expect('Powering up system...') + target.expect(self.config.bootmon_prompt) + + # Wait for VEMSD to mount + self._wait_for_vemsd_mount(target) + + #Boot Linux - Bootmon + target.sendline('fl linux fdt ' + self.config.dtb) + target.expect(self.config.bootmon_prompt) + target.sendline('fl linux initrd ' + self.config.initrd) + target.expect(self.config.bootmon_prompt) + target.sendline('fl linux boot ' + self.config.kernel + self.config.kernel_arguments) + + +# Utility functions. + +def _slow_sendline(target, line): + for c in line: + target.send(c) + time.sleep(0.1) + target.sendline('') + diff --git a/wlauto/devices/android/tc2/resources/board_template.txt b/wlauto/devices/android/tc2/resources/board_template.txt new file mode 100644 index 00000000..39535d13 --- /dev/null +++ b/wlauto/devices/android/tc2/resources/board_template.txt @@ -0,0 +1,96 @@ +BOARD: HBI0249 +TITLE: V2P-CA15_A7 Configuration File + +[DCCS] +TOTALDCCS: 1 ;Total Number of DCCS +M0FILE: dbb_v110.ebf ;DCC0 Filename +M0MODE: MICRO ;DCC0 Programming Mode + +[FPGAS] +TOTALFPGAS: 0 ;Total Number of FPGAs + +[TAPS] +TOTALTAPS: 3 ;Total Number of TAPs +T0NAME: STM32TMC ;TAP0 Device Name +T0FILE: NONE ;TAP0 Filename +T0MODE: NONE ;TAP0 Programming Mode +T1NAME: STM32CM3 ;TAP1 Device Name +T1FILE: NONE ;TAP1 Filename +T1MODE: NONE ;TAP1 Programming Mode +T2NAME: CORTEXA15 ;TAP2 Device Name +T2FILE: NONE ;TAP2 Filename +T2MODE: NONE ;TAP2 Programming Mode + +[OSCCLKS] +TOTALOSCCLKS: 9 ;Total Number of OSCCLKS +OSC0: 50.0 ;CPUREFCLK0 A15 CPU (20:1 - 1.0GHz) +OSC1: 50.0 ;CPUREFCLK1 A15 CPU (20:1 - 1.0GHz) +OSC2: 40.0 ;CPUREFCLK0 A7 CPU (20:1 - 800MHz) +OSC3: 40.0 ;CPUREFCLK1 A7 CPU (20:1 - 800MHz) +OSC4: 40.0 ;HSBM AXI (40MHz) +OSC5: 23.75 ;HDLCD (23.75MHz - TC PLL is in bypass) +OSC6: 50.0 ;SMB (50MHz) +OSC7: 50.0 ;SYSREFCLK (20:1 - 1.0GHz, ACLK - 500MHz) +OSC8: 50.0 ;DDR2 (8:1 - 400MHz) + +[SCC REGISTERS] +TOTALSCCS: 33 ;Total Number of SCC registers + +;SCC: 0x010 0x000003D0 ;Remap to NOR0 +SCC: 0x010 $SCC_0x010 ;Switch between NOR0/NOR1 +SCC: 0x01C 0xFF00FF00 ;CFGRW3 - SMC CS6/7 N/U +SCC: 0x118 0x01CD1011 ;CFGRW17 - HDLCD PLL external bypass +;SCC: 0x700 0x00320003 ;CFGRW48 - [25:24]Boot CPU [28]Boot Cluster (default CA7_0) +SCC: 0x700 $SCC_0x700 ;CFGRW48 - [25:24]Boot CPU [28]Boot Cluster (default CA7_0) + ; Bootmon configuration: + ; [15]: A7 Event stream generation (default: disabled) + ; [14]: A15 Event stream generation (default: disabled) + ; [13]: Power down the non-boot cluster (default: disabled) + ; [12]: Use per-cpu mailboxes for power management (default: disabled) + ; [11]: A15 executes WFEs as nops (default: disabled) + +SCC: 0x400 0x33330c00 ;CFGREG41 - A15 configuration register 0 (Default 0x33330c80) + ; [29:28] SPNIDEN + ; [25:24] SPIDEN + ; [21:20] NIDEN + ; [17:16] DBGEN + ; [13:12] CFGTE + ; [9:8] VINITHI_CORE + ; [7] IMINLN + ; [3:0] CLUSTER_ID + + ;Set the CPU clock PLLs +SCC: 0x120 0x022F1010 ;CFGRW19 - CA15_0 PLL control - 20:1 (lock OFF) +SCC: 0x124 0x0011710D ;CFGRW20 - CA15_0 PLL value +SCC: 0x128 0x022F1010 ;CFGRW21 - CA15_1 PLL control - 20:1 (lock OFF) +SCC: 0x12C 0x0011710D ;CFGRW22 - CA15_1 PLL value +SCC: 0x130 0x022F1010 ;CFGRW23 - CA7_0 PLL control - 20:1 (lock OFF) +SCC: 0x134 0x0011710D ;CFGRW24 - CA7_0 PLL value +SCC: 0x138 0x022F1010 ;CFGRW25 - CA7_1 PLL control - 20:1 (lock OFF) +SCC: 0x13C 0x0011710D ;CFGRW26 - CA7_1 PLL value + + ;Power management interface +SCC: 0xC00 0x00000005 ;Control: [0]PMI_EN [1]DBG_EN [2]SPC_SYSCFG +SCC: 0xC04 0x060E0356 ;Latency in uS max: [15:0]DVFS [31:16]PWRUP +SCC: 0xC08 0x00000000 ;Reserved +SCC: 0xC0C 0x00000000 ;Reserved + + ;CA15 performance values: 0xVVVFFFFF +SCC: 0xC10 0x384061A8 ;CA15 PERFVAL0, 900mV, 20,000*20= 500MHz +SCC: 0xC14 0x38407530 ;CA15 PERFVAL1, 900mV, 25,000*20= 600MHz +SCC: 0xC18 0x384088B8 ;CA15 PERFVAL2, 900mV, 30,000*20= 700MHz +SCC: 0xC1C 0x38409C40 ;CA15 PERFVAL3, 900mV, 35,000*20= 800MHz +SCC: 0xC20 0x3840AFC8 ;CA15 PERFVAL4, 900mV, 40,000*20= 900MHz +SCC: 0xC24 0x3840C350 ;CA15 PERFVAL5, 900mV, 45,000*20=1000MHz +SCC: 0xC28 0x3CF0D6D8 ;CA15 PERFVAL6, 975mV, 50,000*20=1100MHz +SCC: 0xC2C 0x41A0EA60 ;CA15 PERFVAL7, 1050mV, 55,000*20=1200MHz + + ;CA7 performance values: 0xVVVFFFFF +SCC: 0xC30 0x3840445C ;CA7 PERFVAL0, 900mV, 10,000*20= 350MHz +SCC: 0xC34 0x38404E20 ;CA7 PERFVAL1, 900mV, 15,000*20= 400MHz +SCC: 0xC38 0x384061A8 ;CA7 PERFVAL2, 900mV, 20,000*20= 500MHz +SCC: 0xC3C 0x38407530 ;CA7 PERFVAL3, 900mV, 25,000*20= 600MHz +SCC: 0xC40 0x384088B8 ;CA7 PERFVAL4, 900mV, 30,000*20= 700MHz +SCC: 0xC44 0x38409C40 ;CA7 PERFVAL5, 900mV, 35,000*20= 800MHz +SCC: 0xC48 0x3CF0AFC8 ;CA7 PERFVAL6, 975mV, 40,000*20= 900MHz +SCC: 0xC4C 0x41A0C350 ;CA7 PERFVAL7, 1050mV, 45,000*20=1000MHz diff --git a/wlauto/devices/android/tc2/resources/images_iks.txt b/wlauto/devices/android/tc2/resources/images_iks.txt new file mode 100644 index 00000000..05707092 --- /dev/null +++ b/wlauto/devices/android/tc2/resources/images_iks.txt @@ -0,0 +1,25 @@ +TITLE: Versatile Express Images Configuration File + +[IMAGES] +TOTALIMAGES: 4 ;Number of Images (Max : 32) +NOR0UPDATE: AUTO ;Image Update:NONE/AUTO/FORCE +NOR0ADDRESS: BOOT ;Image Flash Address +NOR0FILE: \SOFTWARE\$bm_image ;Image File Name + +NOR1UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE +NOR1ADDRESS: 0x00000000 ;Image Flash Address +NOR1FILE: \SOFTWARE\kern_iks.bin ;Image File Name +NOR1LOAD: 0x80008000 +NOR1ENTRY: 0x80008000 + +NOR2UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE +NOR2ADDRESS: 0x00000000 ;Image Flash Address +NOR2FILE: \SOFTWARE\iks.dtb ;Image File Name for booting in A7 cluster +NOR2LOAD: 0x84000000 +NOR2ENTRY: 0x84000000 + +NOR3UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE +NOR3ADDRESS: 0x00000000 ;Image Flash Address +NOR3FILE: \SOFTWARE\init_iks.bin ;Image File Name +NOR3LOAD: 0x90100000 +NOR3ENTRY: 0x90100000 diff --git a/wlauto/devices/android/tc2/resources/images_mp.txt b/wlauto/devices/android/tc2/resources/images_mp.txt new file mode 100644 index 00000000..e671a74b --- /dev/null +++ b/wlauto/devices/android/tc2/resources/images_mp.txt @@ -0,0 +1,55 @@ +TITLE: Versatile Express Images Configuration File +[IMAGES] +TOTALIMAGES: 9 ;Number of Images (Max: 32) +NOR0UPDATE: AUTO ;Image Update:NONE/AUTO/FORCE +NOR0ADDRESS: BOOT ;Image Flash Address +NOR0FILE: \SOFTWARE\$bm_image ;Image File Name + +NOR1UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE +NOR1ADDRESS: 0x0E000000 ;Image Flash Address +NOR1FILE: \SOFTWARE\kern_mp.bin ;Image File Name +NOR1LOAD: 0x80008000 +NOR1ENTRY: 0x80008000 + +NOR2UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE +NOR2ADDRESS: 0x0E800000 ;Image Flash Address +NOR2FILE: \SOFTWARE\mp_a7.dtb ;Image File Name for booting in A7 cluster +NOR2LOAD: 0x84000000 +NOR2ENTRY: 0x84000000 + +NOR3UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE +NOR3ADDRESS: 0x0E900000 ;Image Flash Address +NOR3FILE: \SOFTWARE\mp_a15.dtb ;Image File Name +NOR3LOAD: 0x84000000 +NOR3ENTRY: 0x84000000 + +NOR4UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE +NOR4ADDRESS: 0x0EA00000 ;Image Flash Address +NOR4FILE: \SOFTWARE\mp_a7bc.dtb ;Image File Name +NOR4LOAD: 0x84000000 +NOR4ENTRY: 0x84000000 + +NOR5UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE +NOR5ADDRESS: 0x0EB00000 ;Image Flash Address +NOR5FILE: \SOFTWARE\mp_a15bc.dtb ;Image File Name +NOR5LOAD: 0x84000000 +NOR5ENTRY: 0x84000000 + +NOR6UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE +NOR6ADDRESS: 0x0EC00000 ;Image Flash Address +NOR6FILE: \SOFTWARE\init_mp.bin ;Image File Name +NOR6LOAD: 0x85000000 +NOR6ENTRY: 0x85000000 + +NOR7UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE +NOR7ADDRESS: 0x0C000000 ;Image Flash Address +NOR7FILE: \SOFTWARE\tc2_sec.bin ;Image File Name +NOR7LOAD: 0 +NOR7ENTRY: 0 + +NOR8UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE +NOR8ADDRESS: 0x0D000000 ;Image Flash Address +NOR8FILE: \SOFTWARE\tc2_uefi.bin ;Image File Name +NOR8LOAD: 0 +NOR8ENTRY: 0 + diff --git a/wlauto/devices/linux/__init__.py b/wlauto/devices/linux/__init__.py new file mode 100644 index 00000000..16224d6f --- /dev/null +++ b/wlauto/devices/linux/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + diff --git a/wlauto/devices/linux/generic/__init__.py b/wlauto/devices/linux/generic/__init__.py new file mode 100644 index 00000000..d6fb67a5 --- /dev/null +++ b/wlauto/devices/linux/generic/__init__.py @@ -0,0 +1,37 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from wlauto import LinuxDevice, Parameter + + +class GenericDevice(LinuxDevice): + name = 'generic_linux' + description = """ + Generic Linux device. Use this if you do not have a device file for + your device. + + This implements the minimum functionality that should be supported by + all Linux devices. + + """ + + abi = 'armeabi' + has_gpu = True + + parameters = [ + Parameter('core_names', default=[], override=True), + Parameter('core_clusters', default=[], override=True), + ] diff --git a/wlauto/devices/linux/odroidxu3_linux/__init__.py b/wlauto/devices/linux/odroidxu3_linux/__init__.py new file mode 100644 index 00000000..f174950a --- /dev/null +++ b/wlauto/devices/linux/odroidxu3_linux/__init__.py @@ -0,0 +1,35 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from wlauto import LinuxDevice, Parameter + + +class OdroidXU3LinuxDevice(LinuxDevice): + + name = "odroidxu3_linux" + description = 'HardKernel Odroid XU3 development board (Ubuntu image).' + + core_modules = [ + 'odroidxu3-fan', + ] + + parameters = [ + Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True), + Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True), + ] + + abi = 'armeabi' + diff --git a/wlauto/exceptions.py b/wlauto/exceptions.py new file mode 100644 index 00000000..36f3050a --- /dev/null +++ b/wlauto/exceptions.py @@ -0,0 +1,143 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from wlauto.utils.misc import get_traceback, TimeoutError # NOQA pylint: disable=W0611 + + +class WAError(Exception): + """Base class for all Workload Automation exceptions.""" + pass + + +class NotFoundError(WAError): + """Raised when the specified item is not found.""" + pass + + +class ValidationError(WAError): + """Raised on failure to validate an extension.""" + pass + + +class DeviceError(WAError): + """General Device error.""" + pass + + +class DeviceNotRespondingError(WAError): + """The device is not responding.""" + + def __init__(self, device): + super(DeviceNotRespondingError, self).__init__('Device {} is not responding.'.format(device)) + + +class WorkloadError(WAError): + """General Workload error.""" + pass + + +class HostError(WAError): + """Problem with the host on which WA is running.""" + pass + + +class ModuleError(WAError): + """ + Problem with a module. + + .. note:: Modules for specific extension types should raise execeptions + appropriate to that extension. E.g. a ``Device`` module should raise + ``DeviceError``. This is intended for situation where a module is + unsure (and/or doesn't care) what its owner is. + + """ + pass + + +class InstrumentError(WAError): + """General Instrument error.""" + pass + + +class ResultProcessorError(WAError): + """General ResultProcessor error.""" + pass + + +class ResourceError(WAError): + """General Resolver error.""" + pass + + +class CommandError(WAError): + """Raised by commands when they have encountered an error condition + during execution.""" + pass + + +class ToolError(WAError): + """Raised by tools when they have encountered an error condition + during execution.""" + pass + + +class LoaderError(WAError): + """Raised when there is an error loading an extension or + an external resource. Apart form the usual message, the __init__ + takes an exc_info parameter which should be the result of + sys.exc_info() for the original exception (if any) that + caused the error.""" + + def __init__(self, message, exc_info=None): + super(LoaderError, self).__init__(message) + self.exc_info = exc_info + + def __str__(self): + if self.exc_info: + orig = self.exc_info[1] + orig_name = type(orig).__name__ + if isinstance(orig, WAError): + reason = 'because of:\n{}: {}'.format(orig_name, orig) + else: + reason = 'because of:\n{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig) + return '\n'.join([self.message, reason]) + else: + return self.message + + +class ConfigError(WAError): + """Raised when configuration provided is invalid. This error suggests that + the user should modify their config and try again.""" + pass + + +class WorkerThreadError(WAError): + """ + This should get raised in the main thread if a non-WAError-derived exception occurs on + a worker/background thread. If a WAError-derived exception is raised in the worker, then + it that exception should be re-raised on the main thread directly -- the main point of this is + to preserve the backtrace in the output, and backtrace doesn't get output for WAErrors. + + """ + + def __init__(self, thread, exc_info): + self.thread = thread + self.exc_info = exc_info + orig = self.exc_info[1] + orig_name = type(orig).__name__ + message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread) + message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig) + super(WorkerThreadError, self).__init__(message) diff --git a/wlauto/external/README b/wlauto/external/README new file mode 100644 index 00000000..16d22a8e --- /dev/null +++ b/wlauto/external/README @@ -0,0 +1,74 @@ +This directory contains external libraries and standalone utilities which have +been written/modified to work with Workload Automation (and thus need to be +included with WA rather than obtained from orignal sources). + + +bbench_server +============= + +This is a small sever that is used to detect when ``bbench`` workload has completed. +``bbench`` navigates though a bunch of web pages in a browser using javascript. +It will cause the browser to sent a GET request to the port the bbench_server is +listening on, indicating the end of workload. + + +daq_server +========== + +Contains Daq server files that will run on a Windows machine. Please refer to +daq instrument documentation. + + +louie (third party) +===== + +Python package that is itself a fork (and now, a replacement for) pydispatcher. +This library provides a signal dispatching mechanism. This has been modified for +WA to add prioritization to callbacks. + + +pmu_logger +========== + +Source for the kernel driver that enable the logging of CCI counters to ftrace +on periodic basis. This driver is required by the ``cci_pmu_logger`` instrument. + + +readenergy +========== + +Outputs Juno internal energy/power/voltage/current measurments by reading APB +regesiters from memory. This is used by ``juno_energy`` instrument. + + +revent +====== + +This is a tool that is used to both record and playback key press and screen tap +events. It is used to record UI manipulation for some workloads (such as games) +where it is not possible to use the Android UI Automator. + +The tools is also included in binary form in wlauto/common/. In order to build +the tool from source, you will need to have Android NDK in your PATH. + + +stacktracer.py (third party) +============== + +A module based on an ActiveState recipe that allows tracing thread stacks during +execution of a Python program. This is used through the ``--debug`` flag in WA +to ease debuging multi-threaded parts of the code. + + +terminalsize.py (third party) +=============== + +Implements a platform-agnostic way of determining terminal window size. Taken +from a public Github gist. + + +uiauto +====== + +Contains the utilities library for UI automation. + diff --git a/wlauto/external/bbench_server/build.sh b/wlauto/external/bbench_server/build.sh new file mode 100755 index 00000000..0c36467a --- /dev/null +++ b/wlauto/external/bbench_server/build.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +BUILD_COMMAND=ndk-build + +if [[ $(which $BUILD_COMMAND) ]] ; then + $BUILD_COMMAND + if [[ $? ]]; then + echo Coping to ../../workloads/bbench/ + cp libs/armeabi/bbench_server ../../workloads/bbench/bin/armeabi/bbench_server + fi +else + echo Please make sure you have Android NDK in your PATH. + exit 1 +fi + diff --git a/wlauto/external/bbench_server/jni/Android.mk b/wlauto/external/bbench_server/jni/Android.mk new file mode 100644 index 00000000..d6d40a08 --- /dev/null +++ b/wlauto/external/bbench_server/jni/Android.mk @@ -0,0 +1,9 @@ +LOCAL_PATH:= $(call my-dir) + +include $(CLEAR_VARS) +LOCAL_SRC_FILES:= bbench_server.cpp +LOCAL_MODULE := bbench_server +LOCAL_MODULE_TAGS := optional +LOCAL_STATIC_LIBRARIES := libc +LOCAL_SHARED_LIBRARIES := +include $(BUILD_EXECUTABLE) diff --git a/wlauto/external/bbench_server/jni/bbench_server.cpp b/wlauto/external/bbench_server/jni/bbench_server.cpp new file mode 100755 index 00000000..9b1e87d4 --- /dev/null +++ b/wlauto/external/bbench_server/jni/bbench_server.cpp @@ -0,0 +1,151 @@ +/* Copyright 2012-2015 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +/**************************************************************************/ +/* Simple HTTP server program that will return on accepting connection */ +/**************************************************************************/ + +/* Tested on Android ICS browser and FireFox browser */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SERVERPORT "3030" + +void ExitOnError(int condition, const char *msg) +{ + if(condition) { printf("Server: %s\n", msg); exit(1);} +} + +void *GetInetAddr(struct sockaddr *sa) +{ + if (sa->sa_family == AF_INET) + { + return &(((struct sockaddr_in*)sa)->sin_addr); + } + else + { + return &(((struct sockaddr_in6*)sa)->sin6_addr); + } +} + +int main(int argc, char *argv[]) +{ + + socklen_t addr_size; + struct addrinfo hints, *res; + int server_fd, client_fd; + int retval; + int timeout_in_seconds; + + // Get the timeout value in seconds + if(argc < 2) + { + printf("Usage %s \n", argv[0]); + exit(1); + } + else + { + timeout_in_seconds = atoi(argv[1]); + printf("Server: Waiting for connection on port %s with timeout of %d seconds\n", SERVERPORT, timeout_in_seconds); + + } + + /**************************************************************************/ + /* Listen to a socket */ + /**************************************************************************/ + memset(&hints, 0, sizeof hints); + hints.ai_family = AF_UNSPEC; // use IPv4 or IPv6, whichever + hints.ai_socktype = SOCK_STREAM; + hints.ai_flags = AI_PASSIVE; // fill in my IP for me + + getaddrinfo(NULL, SERVERPORT, &hints, &res); + + + server_fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); + ExitOnError(server_fd < 0, "Socket creation failed"); + + retval = bind(server_fd, res->ai_addr, res->ai_addrlen); + ExitOnError(retval < 0, "Bind failed"); + + retval = listen(server_fd, 10); + ExitOnError(retval < 0, "Listen failed"); + + /**************************************************************************/ + /* Wait for connection to arrive or time out */ + /**************************************************************************/ + fd_set readfds; + FD_ZERO(&readfds); + FD_SET(server_fd, &readfds); + + // Timeout parameter + timeval tv; + tv.tv_sec = timeout_in_seconds; + tv.tv_usec = 0; + + int ret = select(server_fd+1, &readfds, NULL, NULL, &tv); + ExitOnError(ret <= 0, "No connection established, timed out"); + ExitOnError(FD_ISSET(server_fd, &readfds) == 0, "Error occured in select"); + + /**************************************************************************/ + /* Accept connection and print the information */ + /**************************************************************************/ + { + struct sockaddr_storage client_addr; + char client_addr_string[INET6_ADDRSTRLEN]; + addr_size = sizeof client_addr; + client_fd = accept(server_fd, (struct sockaddr *)&client_addr, &addr_size); + ExitOnError(client_fd < 0, "Accept failed"); + + inet_ntop(client_addr.ss_family, + GetInetAddr((struct sockaddr *)&client_addr), + client_addr_string, + sizeof client_addr_string); + printf("Server: Received connection from %s\n", client_addr_string); + } + + + /**************************************************************************/ + /* Send a acceptable HTTP response */ + /**************************************************************************/ + { + + char response[] = "HTTP/1.1 200 OK\r\n" + "Content-Type: text/html\r\n" + "Connection: close\r\n" + "\r\n" + "" + "Local Server: Connection Accepted" + "" + ""; + int bytes_sent; + bytes_sent = send(client_fd, response, strlen(response), 0); + ExitOnError(bytes_sent < 0, "Sending Response failed"); + } + + + close(client_fd); + close(server_fd); + return 0; +} diff --git a/wlauto/external/daq_server/daqpower-1.0.1.tar.gz b/wlauto/external/daq_server/daqpower-1.0.1.tar.gz new file mode 100644 index 00000000..671a45e8 Binary files /dev/null and b/wlauto/external/daq_server/daqpower-1.0.1.tar.gz differ diff --git a/wlauto/external/daq_server/src/MANIFEST.in b/wlauto/external/daq_server/src/MANIFEST.in new file mode 100644 index 00000000..e69de29b diff --git a/wlauto/external/daq_server/src/README b/wlauto/external/daq_server/src/README new file mode 100644 index 00000000..e69de29b diff --git a/wlauto/external/daq_server/src/build.sh b/wlauto/external/daq_server/src/build.sh new file mode 100755 index 00000000..ef3be06e --- /dev/null +++ b/wlauto/external/daq_server/src/build.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +python setup.py sdist +rm -rf build +rm -f MANIFEST +if [[ -d dist ]]; then + mv dist/*.tar.gz .. + rm -rf dist +fi +find . -iname \*.pyc -delete diff --git a/wlauto/external/daq_server/src/daqpower/__init__.py b/wlauto/external/daq_server/src/daqpower/__init__.py new file mode 100644 index 00000000..ed442117 --- /dev/null +++ b/wlauto/external/daq_server/src/daqpower/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +__version__ = '1.0.1' diff --git a/wlauto/external/daq_server/src/daqpower/client.py b/wlauto/external/daq_server/src/daqpower/client.py new file mode 100644 index 00000000..b129dc77 --- /dev/null +++ b/wlauto/external/daq_server/src/daqpower/client.py @@ -0,0 +1,380 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=E1101,E1103 +import os +import sys + +from twisted.internet import reactor +from twisted.internet.protocol import Protocol, ClientFactory, ReconnectingClientFactory +from twisted.internet.error import ConnectionLost, ConnectionDone +from twisted.protocols.basic import LineReceiver + +if __name__ == '__main__': # for debugging + sys.path.append(os.path.join(os.path.dirname(__file__), '..')) +from daqpower import log +from daqpower.common import DaqServerRequest, DaqServerResponse, Status +from daqpower.config import get_config_parser + + +__all__ = ['execute_command', 'run_send_command', 'Status'] + + +class Command(object): + + def __init__(self, name, **params): + self.name = name + self.params = params + + +class CommandResult(object): + + def __init__(self): + self.status = None + self.message = None + self.data = None + + def __str__(self): + return '{} {}'.format(self.status, self.message) + + +class CommandExecutorProtocol(Protocol): + + def __init__(self, command, timeout=10, retries=1): + self.command = command + self.sent_request = None + self.waiting_for_response = False + self.keep_going = None + self.ports_to_pull = None + self.factory = None + self.timeoutCallback = None + self.timeout = timeout + self.retries = retries + self.retry_count = 0 + + def connectionMade(self): + if self.command.name == 'get_data': + self.sendRequest('list_port_files') + else: + self.sendRequest(self.command.name, **self.command.params) + + def connectionLost(self, reason=ConnectionDone): + if isinstance(reason, ConnectionLost): + self.errorOut('connection lost: {}'.format(reason)) + elif self.waiting_for_response: + self.errorOut('Server closed connection without sending a response.') + else: + log.debug('connection terminated.') + + def sendRequest(self, command, **params): + self.sent_request = DaqServerRequest(command, params) + request_string = self.sent_request.serialize() + log.debug('sending request: {}'.format(request_string)) + self.transport.write(''.join([request_string, '\r\n'])) + self.timeoutCallback = reactor.callLater(self.timeout, self.requestTimedOut) + self.waiting_for_response = True + + def dataReceived(self, data): + self.keep_going = False + if self.waiting_for_response: + self.waiting_for_response = False + self.timeoutCallback.cancel() + try: + response = DaqServerResponse.deserialize(data) + except Exception, e: # pylint: disable=W0703 + self.errorOut('Invalid response: {} ({})'.format(data, e)) + else: + if response.status != Status.ERROR: + self.processResponse(response) # may set self.keep_going + if not self.keep_going: + self.commandCompleted(response.status, response.message, response.data) + else: + self.errorOut(response.message) + else: + self.errorOut('unexpected data received: {}\n'.format(data)) + + def processResponse(self, response): + if self.sent_request.command in ['list_ports', 'list_port_files']: + self.processPortsResponse(response) + elif self.sent_request.command == 'list_devices': + self.processDevicesResponse(response) + elif self.sent_request.command == 'pull': + self.processPullResponse(response) + + def processPortsResponse(self, response): + if 'ports' not in response.data: + self.errorOut('Response did not containt ports data: {} ({}).'.format(response, response.data)) + ports = response.data['ports'] + response.data = ports + if self.command.name == 'get_data': + if ports: + self.ports_to_pull = ports + self.sendPullRequest(self.ports_to_pull.pop()) + else: + response.status = Status.OKISH + response.message = 'No ports were returned.' + + def processDevicesResponse(self, response): + if 'devices' not in response.data: + self.errorOut('Response did not containt devices data: {} ({}).'.format(response, response.data)) + ports = response.data['devices'] + response.data = ports + + def sendPullRequest(self, port_id): + self.sendRequest('pull', port_id=port_id) + self.keep_going = True + + def processPullResponse(self, response): + if 'port_number' not in response.data: + self.errorOut('Response does not contain port number: {} ({}).'.format(response, response.data)) + port_number = response.data.pop('port_number') + filename = self.sent_request.params['port_id'] + '.csv' + self.factory.initiateFileTransfer(filename, port_number) + if self.ports_to_pull: + self.sendPullRequest(self.ports_to_pull.pop()) + + def commandCompleted(self, status, message=None, data=None): + self.factory.result.status = status + self.factory.result.message = message + self.factory.result.data = data + self.transport.loseConnection() + + def requestTimedOut(self): + self.retry_count += 1 + if self.retry_count > self.retries: + self.errorOut("Request timed out; server failed to respond.") + else: + log.debug('Retrying...') + self.connectionMade() + + def errorOut(self, message): + self.factory.errorOut(message) + + +class CommandExecutorFactory(ClientFactory): + + protocol = CommandExecutorProtocol + wait_delay = 1 + + def __init__(self, config, command, timeout=10, retries=1): + self.config = config + self.command = command + self.timeout = timeout + self.retries = retries + self.result = CommandResult() + self.done = False + self.transfers_in_progress = {} + if command.name == 'get_data': + if 'output_directory' not in command.params: + self.errorOut('output_directory not specifed for get_data command.') + self.output_directory = command.params['output_directory'] + if not os.path.isdir(self.output_directory): + log.debug('Creating output directory {}'.format(self.output_directory)) + os.makedirs(self.output_directory) + + def buildProtocol(self, addr): + protocol = CommandExecutorProtocol(self.command, self.timeout, self.retries) + protocol.factory = self + return protocol + + def initiateFileTransfer(self, filename, port): + log.debug('Downloading {} from port {}'.format(filename, port)) + filepath = os.path.join(self.output_directory, filename) + session = FileReceiverFactory(filepath, self) + connector = reactor.connectTCP(self.config.host, port, session) + self.transfers_in_progress[session] = connector + + def transferComplete(self, session): + connector = self.transfers_in_progress[session] + log.debug('Transfer on port {} complete.'.format(connector.port)) + del self.transfers_in_progress[session] + + def clientConnectionLost(self, connector, reason): + if self.transfers_in_progress: + log.debug('Waiting for the transfer(s) to complete.') + self.waitForTransfersToCompleteAndExit() + + def clientConnectionFailed(self, connector, reason): + self.result.status = Status.ERROR + self.result.message = 'Could not connect to server.' + self.waitForTransfersToCompleteAndExit() + + def waitForTransfersToCompleteAndExit(self): + if self.transfers_in_progress: + reactor.callLater(self.wait_delay, self.waitForTransfersToCompleteAndExit) + else: + log.debug('Stopping the reactor.') + reactor.stop() + + def errorOut(self, message): + self.result.status = Status.ERROR + self.result.message = message + reactor.crash() + + def __str__(self): + return ''.format(self.command.name) + + __repr__ = __str__ + + +class FileReceiver(LineReceiver): # pylint: disable=W0223 + + def __init__(self, path): + self.path = path + self.fh = None + self.factory = None + + def connectionMade(self): + if os.path.isfile(self.path): + log.warning('overriding existing file.') + os.remove(self.path) + self.fh = open(self.path, 'w') + + def connectionLost(self, reason=ConnectionDone): + if self.fh: + self.fh.close() + + def lineReceived(self, line): + line = line.rstrip('\r\n') + '\n' + self.fh.write(line) + + +class FileReceiverFactory(ReconnectingClientFactory): + + def __init__(self, path, owner): + self.path = path + self.owner = owner + + def buildProtocol(self, addr): + protocol = FileReceiver(self.path) + protocol.factory = self + self.resetDelay() + return protocol + + def clientConnectionLost(self, conector, reason): + if isinstance(reason, ConnectionLost): + log.error('Connection lost: {}'.format(reason)) + ReconnectingClientFactory.clientConnectionLost(self, conector, reason) + else: + self.owner.transferComplete(self) + + def clientConnectionFailed(self, conector, reason): + if isinstance(reason, ConnectionLost): + log.error('Connection failed: {}'.format(reason)) + ReconnectingClientFactory.clientConnectionFailed(self, conector, reason) + + def __str__(self): + return ''.format(self.path) + + __repr__ = __str__ + + +def execute_command(server_config, command, **kwargs): + before_fds = _get_open_fds() # see the comment in the finally clause below + if isinstance(command, basestring): + command = Command(command, **kwargs) + timeout = 300 if command.name in ['stop', 'pull'] else 10 + factory = CommandExecutorFactory(server_config, command, timeout) + + # reactors aren't designed to be re-startable. In order to be + # able to call execute_command multiple times, we need to froce + # re-installation of the reactor; hence this hackery. + # TODO: look into implementing restartable reactors. According to the + # Twisted FAQ, there is no good reason why there isn't one: + # http://twistedmatrix.com/trac/wiki/FrequentlyAskedQuestions#WhycanttheTwistedsreactorberestarted + from twisted.internet import default + del sys.modules['twisted.internet.reactor'] + default.install() + global reactor # pylint: disable=W0603 + reactor = sys.modules['twisted.internet.reactor'] + + try: + reactor.connectTCP(server_config.host, server_config.port, factory) + reactor.run() + return factory.result + finally: + # re-startable reactor hack part 2. + # twisted hijacks SIGINT and doesn't bother to un-hijack it when the reactor + # stops. So we have to do it for it *rolls eye*. + import signal + signal.signal(signal.SIGINT, signal.default_int_handler) + # OK, the reactor is also leaking file descriptors. Tracking down all + # of them is non trivial, so instead we're just comparing the before + # and after lists of open FDs for the current process, and closing all + # new ones, as execute_command should never leave anything open after + # it exits (even when downloading data files from the server). + # TODO: This is way too hacky even compared to the rest of this function. + # Additionally, the current implementation ties this to UNIX, + # so in the long run, we need to do this properly and get the FDs + # from the reactor. + after_fds = _get_open_fds() + for fd in (after_fds - before_fds): + try: + os.close(int(fd[1:])) + except OSError: + pass + # Below is the alternative code that gets FDs from the reactor, however + # at the moment it doesn't seem to get everything, which is why code + # above is used instead. + #for fd in readtor._selectables: + # os.close(fd) + #reactor._poller.close() + + +def _get_open_fds(): + if os.name == 'posix': + import subprocess + pid = os.getpid() + procs = subprocess.check_output( + [ "lsof", '-w', '-Ff', "-p", str( pid ) ] ) + return set(procs.split()) + else: + # TODO: Implement the Windows equivalent. + return [] + + +def run_send_command(): + """Main entry point when running as a script -- should not be invoked form another module.""" + parser = get_config_parser() + parser.add_argument('command') + parser.add_argument('-o', '--output-directory', metavar='DIR', default='.', + help='Directory used to output data files (defaults to the current directory).') + parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False) + args = parser.parse_args() + if not args.device_config.labels: + args.device_config.labels = ['PORT_{}'.format(i) for i in xrange(len(args.device_config.resistor_values))] + + if args.verbose: + log.start_logging('DEBUG') + else: + log.start_logging('INFO', fmt='%(levelname)-8s %(message)s') + + if args.command == 'configure': + args.device_config.validate() + command = Command(args.command, config=args.device_config) + elif args.command == 'get_data': + command = Command(args.command, output_directory=args.output_directory) + else: + command = Command(args.command) + + result = execute_command(args.server_config, command) + print result + if result.data: + print result.data + + +if __name__ == '__main__': + run_send_command() diff --git a/wlauto/external/daq_server/src/daqpower/common.py b/wlauto/external/daq_server/src/daqpower/common.py new file mode 100644 index 00000000..3e64c16e --- /dev/null +++ b/wlauto/external/daq_server/src/daqpower/common.py @@ -0,0 +1,99 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=E1101 +import json + + +class Serializer(json.JSONEncoder): + + def default(self, o): # pylint: disable=E0202 + if isinstance(o, Serializable): + return o.serialize() + if isinstance(o, Enum.EnumEntry): + return o.name + return json.JSONEncoder.default(self, o) + + +class Serializable(object): + + @classmethod + def deserialize(cls, text): + return cls(**json.loads(text)) + + def serialize(self, d=None): + if d is None: + d = self.__dict__ + return json.dumps(d, cls=Serializer) + + +class DaqServerRequest(Serializable): + + def __init__(self, command, params=None): # pylint: disable=W0231 + self.command = command + self.params = params or {} + + +class DaqServerResponse(Serializable): + + def __init__(self, status, message=None, data=None): # pylint: disable=W0231 + self.status = status + self.message = message.strip().replace('\r\n', ' ') if message else '' + self.data = data or {} + + def __str__(self): + return '{} {}'.format(self.status, self.message or '') + + +class Enum(object): + """ + Assuming MyEnum = Enum('A', 'B'), + + MyEnum.A and MyEnum.B are valid values. + + a = MyEnum.A + (a == MyEnum.A) == True + (a in MyEnum) == True + + MyEnum('A') == MyEnum.A + + str(MyEnum.A) == 'A' + + """ + + class EnumEntry(object): + def __init__(self, name): + self.name = name + def __str__(self): + return self.name + def __cmp__(self, other): + return cmp(self.name, str(other)) + + def __init__(self, *args): + for a in args: + setattr(self, a, self.EnumEntry(a)) + + def __call__(self, value): + if value not in self.__dict__: + raise ValueError('Not enum value: {}'.format(value)) + return self.__dict__[value] + + def __iter__(self): + for e in self.__dict__: + yield self.__dict__[e] + + +Status = Enum('OK', 'OKISH', 'ERROR') diff --git a/wlauto/external/daq_server/src/daqpower/config.py b/wlauto/external/daq_server/src/daqpower/config.py new file mode 100644 index 00000000..bfc3280f --- /dev/null +++ b/wlauto/external/daq_server/src/daqpower/config.py @@ -0,0 +1,154 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import argparse + +from daqpower.common import Serializable + + +class ConfigurationError(Exception): + """Raised when configuration passed into DaqServer is invaid.""" + pass + + +class DeviceConfiguration(Serializable): + """Encapulates configuration for the DAQ, typically, passed from + the client.""" + + valid_settings = ['device_id', 'v_range', 'dv_range', 'sampling_rate', 'resistor_values', 'labels'] + + default_device_id = 'Dev1' + default_v_range = 2.5 + default_dv_range = 0.2 + default_sampling_rate = 10000 + # Channel map used in DAQ 6363 and similar. + default_channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23) + + @property + def number_of_ports(self): + return len(self.resistor_values) + + def __init__(self, **kwargs): # pylint: disable=W0231 + try: + self.device_id = kwargs.pop('device_id') or self.default_device_id + self.v_range = float(kwargs.pop('v_range') or self.default_v_range) + self.dv_range = float(kwargs.pop('dv_range') or self.default_dv_range) + self.sampling_rate = int(kwargs.pop('sampling_rate') or self.default_sampling_rate) + self.resistor_values = kwargs.pop('resistor_values') or [] + self.channel_map = kwargs.pop('channel_map') or self.default_channel_map + self.labels = (kwargs.pop('labels') or + ['PORT_{}.csv'.format(i) for i in xrange(len(self.resistor_values))]) + except KeyError, e: + raise ConfigurationError('Missing config: {}'.format(e.message)) + if kwargs: + raise ConfigurationError('Unexpected config: {}'.format(kwargs)) + + def validate(self): + if not self.number_of_ports: + raise ConfigurationError('No resistor values were specified.') + if not len(self.resistor_values) == len(self.labels): + message = 'The number of resistors ({}) does not match the number of labels ({})' + raise ConfigurationError(message.format(len(self.resistor_values), len(self.labels))) + + def __str__(self): + return self.serialize() + + __repr__ = __str__ + + +class ServerConfiguration(object): + """Client-side server configuration.""" + + valid_settings = ['host', 'port'] + + default_host = '127.0.0.1' + default_port = 45677 + + def __init__(self, **kwargs): + self.host = kwargs.pop('host', None) or self.default_host + self.port = kwargs.pop('port', None) or self.default_port + if kwargs: + raise ConfigurationError('Unexpected config: {}'.format(kwargs)) + + def validate(self): + if not self.host: + raise ConfigurationError('Server host not specified.') + if not self.port: + raise ConfigurationError('Server port not specified.') + elif not isinstance(self.port, int): + raise ConfigurationError('Server port must be an integer.') + + +class UpdateDeviceConfig(argparse.Action): + + def __call__(self, parser, namespace, values, option_string=None): + setting = option_string.strip('-').replace('-', '_') + if setting not in DeviceConfiguration.valid_settings: + raise ConfigurationError('Unkown option: {}'.format(option_string)) + setattr(namespace._device_config, setting, values) + + +class UpdateServerConfig(argparse.Action): + + def __call__(self, parser, namespace, values, option_string=None): + setting = option_string.strip('-').replace('-', '_') + if setting not in namespace.server_config.valid_settings: + raise ConfigurationError('Unkown option: {}'.format(option_string)) + setattr(namespace.server_config, setting, values) + + +class ConfigNamespace(object): + + class _N(object): + def __init__(self): + self.device_id = None + self.v_range = None + self.dv_range = None + self.sampling_rate = None + self.resistor_values = None + self.labels = None + self.channel_map = None + + @property + def device_config(self): + return DeviceConfiguration(**self._device_config.__dict__) + + def __init__(self): + self._device_config = self._N() + self.server_config = ServerConfiguration() + + +class ConfigArgumentParser(argparse.ArgumentParser): + + def parse_args(self, *args, **kwargs): + kwargs['namespace'] = ConfigNamespace() + return super(ConfigArgumentParser, self).parse_args(*args, **kwargs) + + +def get_config_parser(server=True, device=True): + parser = ConfigArgumentParser() + if device: + parser.add_argument('--device-id', action=UpdateDeviceConfig) + parser.add_argument('--v-range', action=UpdateDeviceConfig, type=float) + parser.add_argument('--dv-range', action=UpdateDeviceConfig, type=float) + parser.add_argument('--sampling-rate', action=UpdateDeviceConfig, type=int) + parser.add_argument('--resistor-values', action=UpdateDeviceConfig, type=float, nargs='*') + parser.add_argument('--labels', action=UpdateDeviceConfig, nargs='*') + if server: + parser.add_argument('--host', action=UpdateServerConfig) + parser.add_argument('--port', action=UpdateServerConfig, type=int) + return parser + diff --git a/wlauto/external/daq_server/src/daqpower/daq.py b/wlauto/external/daq_server/src/daqpower/daq.py new file mode 100644 index 00000000..12689541 --- /dev/null +++ b/wlauto/external/daq_server/src/daqpower/daq.py @@ -0,0 +1,265 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Creates a new DAQ device class. This class assumes that there is a +DAQ connected and mapped as Dev1. It assumes a specific syndesmology on the DAQ (it is not +meant to be a generic DAQ interface). The following diagram shows the wiring for one DaqDevice +port:: + +Port 0 +======== +| A0+ <--- Vr -------------------------| +| | +| A0- <--- GND -------------------// | +| | +| A1+ <--- V+ ------------|-------V+ | +| r | | +| A1- <--- Vr --/\/\/\----| | +| | | +| | | +| |--------------------------| +======== + +:number_of_ports: The number of ports connected on the DAQ. Each port requires 2 DAQ Channels + one for the source voltage and one for the Voltage drop over the + resistor r (V+ - Vr) allows us to detect the current. +:resistor_value: The resistance of r. Typically a few milliOhm +:downsample: The number of samples combined to create one Power point. If set to one + each sample corresponds to one reported power point. +:sampling_rate: The rate at which DAQ takes a sample from each channel. + +""" +# pylint: disable=F0401,E1101,W0621 +import os +import sys +import csv +import time +import threading +from Queue import Queue, Empty + +import numpy + +from PyDAQmx import Task +from PyDAQmx.DAQmxFunctions import DAQmxGetSysDevNames +from PyDAQmx.DAQmxTypes import int32, byref, create_string_buffer +from PyDAQmx.DAQmxConstants import (DAQmx_Val_Diff, DAQmx_Val_Volts, DAQmx_Val_GroupByScanNumber, DAQmx_Val_Auto, + DAQmx_Val_Acquired_Into_Buffer, DAQmx_Val_Rising, DAQmx_Val_ContSamps) + +from daqpower import log + +def list_available_devices(): + """Returns the list of DAQ devices visible to the driver.""" + bufsize = 2048 # Should be plenty for all but the most pathalogical of situations. + buf = create_string_buffer('\000' * bufsize) + DAQmxGetSysDevNames(buf, bufsize) + return buf.value.split(',') + + +class ReadSamplesTask(Task): + + def __init__(self, config, consumer): + Task.__init__(self) + self.config = config + self.consumer = consumer + self.sample_buffer_size = (self.config.sampling_rate + 1) * self.config.number_of_ports * 2 + self.samples_read = int32() + self.remainder = [] + # create voltage channels + for i in xrange(0, 2 * self.config.number_of_ports, 2): + self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i]), + '', DAQmx_Val_Diff, + -config.v_range, config.v_range, + DAQmx_Val_Volts, None) + self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i + 1]), + '', DAQmx_Val_Diff, + -config.dv_range, config.dv_range, + DAQmx_Val_Volts, None) + # configure sampling rate + self.CfgSampClkTiming('', + self.config.sampling_rate, + DAQmx_Val_Rising, + DAQmx_Val_ContSamps, + self.config.sampling_rate) + # register callbacks + self.AutoRegisterEveryNSamplesEvent(DAQmx_Val_Acquired_Into_Buffer, self.config.sampling_rate // 2, 0) + self.AutoRegisterDoneEvent(0) + + def EveryNCallback(self): + samples_buffer = numpy.zeros((self.sample_buffer_size,), dtype=numpy.float64) + self.ReadAnalogF64(DAQmx_Val_Auto, 0.0, DAQmx_Val_GroupByScanNumber, samples_buffer, + self.sample_buffer_size, byref(self.samples_read), None) + self.consumer.write((samples_buffer, self.samples_read.value)) + + def DoneCallback(self, status): # pylint: disable=W0613,R0201 + return 0 # The function should return an integer + + +class AsyncWriter(threading.Thread): + + def __init__(self, wait_period=1): + super(AsyncWriter, self).__init__() + self.daemon = True + self.wait_period = wait_period + self.running = threading.Event() + self._stop_signal = threading.Event() + self._queue = Queue() + + def write(self, stuff): + if self._stop_signal.is_set(): + raise IOError('Attempting to writer to {} after it has been closed.'.format(self.__class__.__name__)) + self._queue.put(stuff) + + def do_write(self, stuff): + raise NotImplementedError() + + def run(self): + self.running.set() + while True: + if self._stop_signal.is_set() and self._queue.empty(): + break + try: + self.do_write(self._queue.get(block=True, timeout=self.wait_period)) + except Empty: + pass # carry on + self.running.clear() + + def stop(self): + self._stop_signal.set() + + def wait(self): + while self.running.is_set(): + time.sleep(self.wait_period) + + +class PortWriter(object): + + def __init__(self, path): + self.path = path + self.fh = open(path, 'w', 0) + self.writer = csv.writer(self.fh) + self.writer.writerow(['power', 'voltage']) + + def write(self, row): + self.writer.writerow(row) + + def close(self): + self.fh.close() + + def __del__(self): + self.close() + + +class SamplePorcessorError(Exception): + pass + + +class SampleProcessor(AsyncWriter): + + def __init__(self, resistor_values, output_directory, labels): + super(SampleProcessor, self).__init__() + self.resistor_values = resistor_values + self.output_directory = output_directory + self.labels = labels + self.number_of_ports = len(resistor_values) + if len(self.labels) != self.number_of_ports: + message = 'Number of labels ({}) does not match number of ports ({}).' + raise SamplePorcessorError(message.format(len(self.labels), self.number_of_ports)) + self.port_writers = [] + + def do_write(self, sample_tuple): + samples, number_of_samples = sample_tuple + for i in xrange(0, number_of_samples * self.number_of_ports * 2, self.number_of_ports * 2): + for j in xrange(self.number_of_ports): + V = float(samples[i + 2 * j]) + DV = float(samples[i + 2 * j + 1]) + P = V * (DV / self.resistor_values[j]) + self.port_writers[j].write([P, V]) + + def start(self): + for label in self.labels: + port_file = self.get_port_file_path(label) + writer = PortWriter(port_file) + self.port_writers.append(writer) + super(SampleProcessor, self).start() + + def stop(self): + super(SampleProcessor, self).stop() + self.wait() + for writer in self.port_writers: + writer.close() + + def get_port_file_path(self, port_id): + if port_id in self.labels: + return os.path.join(self.output_directory, port_id + '.csv') + else: + raise SamplePorcessorError('Invalid port ID: {}'.format(port_id)) + + def __del__(self): + self.stop() + + +class DaqRunner(object): + + @property + def number_of_ports(self): + return self.config.number_of_ports + + def __init__(self, config, output_directory): + self.config = config + self.processor = SampleProcessor(config.resistor_values, output_directory, config.labels) + self.task = ReadSamplesTask(config, self.processor) + self.is_running = False + + def start(self): + log.debug('Starting sample processor.') + self.processor.start() + log.debug('Starting DAQ Task.') + self.task.StartTask() + self.is_running = True + log.debug('Runner started.') + + def stop(self): + self.is_running = False + log.debug('Stopping DAQ Task.') + self.task.StopTask() + log.debug('Stopping sample processor.') + self.processor.stop() + log.debug('Runner stopped.') + + def get_port_file_path(self, port_id): + return self.processor.get_port_file_path(port_id) + + +if __name__ == '__main__': + from collections import namedtuple + DeviceConfig = namedtuple('DeviceConfig', ['device_id', 'channel_map', 'resistor_values', + 'v_range', 'dv_range', 'sampling_rate', + 'number_of_ports', 'labels']) + channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23) + resistor_values = [0.005] + labels = ['PORT_0'] + dev_config = DeviceConfig('Dev1', channel_map, resistor_values, 2.5, 0.2, 10000, len(resistor_values), labels) + if not len(sys.argv) == 3: + print 'Usage: {} OUTDIR DURATION'.format(os.path.basename(__file__)) + sys.exit(1) + output_directory = sys.argv[1] + duration = float(sys.argv[2]) + + print "Avialable devices:", list_availabe_devices() + runner = DaqRunner(dev_config, output_directory) + runner.start() + time.sleep(duration) + runner.stop() diff --git a/wlauto/external/daq_server/src/daqpower/log.py b/wlauto/external/daq_server/src/daqpower/log.py new file mode 100644 index 00000000..c9b215ae --- /dev/null +++ b/wlauto/external/daq_server/src/daqpower/log.py @@ -0,0 +1,53 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import logging + +from twisted.python import log + +__all__ = ['debug', 'info', 'warning', 'error', 'critical', 'start_logging'] + +debug = lambda x: log.msg(x, logLevel=logging.DEBUG) +info = lambda x: log.msg(x, logLevel=logging.INFO) +warning = lambda x: log.msg(x, logLevel=logging.WARNING) +error = lambda x: log.msg(x, logLevel=logging.ERROR) +critical = lambda x: log.msg(x, logLevel=logging.CRITICAL) + + +class CustomLoggingObserver(log.PythonLoggingObserver): + + def emit(self, eventDict): + if 'logLevel' in eventDict: + level = eventDict['logLevel'] + elif eventDict['isError']: + level = logging.ERROR + else: + # All of that just just to override this one line from + # default INFO level... + level = logging.DEBUG + text = log.textFromEventDict(eventDict) + if text is None: + return + self.logger.log(level, text) + + +logObserver = CustomLoggingObserver() +logObserver.start() + + +def start_logging(level, fmt='%(asctime)s %(levelname)-8s: %(message)s'): + logging.basicConfig(level=getattr(logging, level), format=fmt) + diff --git a/wlauto/external/daq_server/src/daqpower/server.py b/wlauto/external/daq_server/src/daqpower/server.py new file mode 100644 index 00000000..9aac51a2 --- /dev/null +++ b/wlauto/external/daq_server/src/daqpower/server.py @@ -0,0 +1,480 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=E1101,W0613 +from __future__ import division +import os +import sys +import socket +import argparse +import shutil +import time +from datetime import datetime + +from zope.interface import implements +from twisted.protocols.basic import LineReceiver +from twisted.internet.protocol import Factory, Protocol +from twisted.internet import reactor, interfaces +from twisted.internet.error import ConnectionLost, ConnectionDone + + +if __name__ == "__main__": # for debugging + sys.path.append(os.path.join(os.path.dirname(__file__), '..')) +from daqpower import log +from daqpower.config import DeviceConfiguration +from daqpower.common import DaqServerRequest, DaqServerResponse, Status +try: + from daqpower.daq import DaqRunner, list_available_devices +except ImportError: + # May be using debug mode. + DaqRunner = None + list_available_devices = lambda : ['Dev1'] + + +class ProtocolError(Exception): + pass + + +class DummyDaqRunner(object): + """Dummy stub used when running in debug mode.""" + + num_rows = 200 + + @property + def number_of_ports(self): + return self.config.number_of_ports + + def __init__(self, config, output_directory): + log.info('Creating runner with {} {}'.format(config, output_directory)) + self.config = config + self.output_directory = output_directory + self.is_running = False + + def start(self): + import csv, random + log.info('runner started') + for i in xrange(self.config.number_of_ports): + rows = [['power', 'voltage']] + [[random.gauss(1.0, 1.0), random.gauss(1.0, 0.1)] + for j in xrange(self.num_rows)] + with open(self.get_port_file_path(self.config.labels[i]), 'wb') as wfh: + writer = csv.writer(wfh) + writer.writerows(rows) + + self.is_running = True + + def stop(self): + self.is_running = False + log.info('runner stopped') + + def get_port_file_path(self, port_id): + if port_id in self.config.labels: + return os.path.join(self.output_directory, '{}.csv'.format(port_id)) + else: + raise Exception('Invalid port id: {}'.format(port_id)) + + +class DaqServer(object): + + def __init__(self, base_output_directory): + self.base_output_directory = os.path.abspath(base_output_directory) + if os.path.isdir(self.base_output_directory): + log.info('Using output directory: {}'.format(self.base_output_directory)) + else: + log.info('Creating new output directory: {}'.format(self.base_output_directory)) + os.makedirs(self.base_output_directory) + self.runner = None + self.output_directory = None + self.labels = None + + def configure(self, config_string): + message = None + if self.runner: + message = 'Configuring a new session before previous session has been terminated.' + log.warning(message) + if self.runner.is_running: + self.runner.stop() + config = DeviceConfiguration.deserialize(config_string) + config.validate() + self.output_directory = self._create_output_directory() + self.labels = config.labels + log.info('Writing port files to {}'.format(self.output_directory)) + self.runner = DaqRunner(config, self.output_directory) + return message + + def start(self): + if self.runner: + if not self.runner.is_running: + self.runner.start() + else: + message = 'Calling start() before stop() has been called. Data up to this point will be lost.' + log.warning(message) + self.runner.stop() + self.runner.start() + return message + else: + raise ProtocolError('Start called before a session has been configured.') + + def stop(self): + if self.runner: + if self.runner.is_running: + self.runner.stop() + else: + message = 'Attempting to stop() before start() was invoked.' + log.warning(message) + self.runner.stop() + return message + else: + raise ProtocolError('Stop called before a session has been configured.') + + def list_devices(self): + return list_available_devices() + + def list_ports(self): + return self.labels + + def list_port_files(self): + if not self.runner: + raise ProtocolError('Attempting to list port files before session has been configured.') + ports_with_files = [] + for port_id in self.labels: + path = self.get_port_file_path(port_id) + if os.path.isfile(path): + ports_with_files.append(port_id) + return ports_with_files + + def get_port_file_path(self, port_id): + if not self.runner: + raise ProtocolError('Attepting to get port file path before session has been configured.') + return self.runner.get_port_file_path(port_id) + + def terminate(self): + message = None + if self.runner: + if self.runner.is_running: + message = 'Terminating session before runner has been stopped.' + log.warning(message) + self.runner.stop() + self.runner = None + if self.output_directory and os.path.isdir(self.output_directory): + shutil.rmtree(self.output_directory) + self.output_directory = None + log.info('Session terminated.') + else: # Runner has not been created. + message = 'Attempting to close session before it has been configured.' + log.warning(message) + return message + + def _create_output_directory(self): + basename = datetime.now().strftime('%Y-%m-%d_%H%M%S%f') + dirname = os.path.join(self.base_output_directory, basename) + os.makedirs(dirname) + return dirname + + def __del__(self): + if self.runner: + self.runner.stop() + + def __str__(self): + return '({})'.format(self.base_output_directory) + + __repr__ = __str__ + + +class DaqControlProtocol(LineReceiver): # pylint: disable=W0223 + + def __init__(self, daq_server): + self.daq_server = daq_server + self.factory = None + + def lineReceived(self, line): + line = line.strip() + log.info('Received: {}'.format(line)) + try: + request = DaqServerRequest.deserialize(line) + except Exception, e: # pylint: disable=W0703 + self.sendError('Received bad request ({}: {})'.format(e.__class__.__name__, e.message)) + else: + self.processRequest(request) + + def processRequest(self, request): + try: + if request.command == 'configure': + self.configure(request) + elif request.command == 'start': + self.start(request) + elif request.command == 'stop': + self.stop(request) + elif request.command == 'list_devices': + self.list_devices(request) + elif request.command == 'list_ports': + self.list_ports(request) + elif request.command == 'list_port_files': + self.list_port_files(request) + elif request.command == 'pull': + self.pull_port_data(request) + elif request.command == 'close': + self.terminate(request) + else: + self.sendError('Received unknown command: {}'.format(request.command)) + except Exception, e: # pylint: disable=W0703 + self.sendError('{}: {}'.format(e.__class__.__name__, e.message)) + + def configure(self, request): + if 'config' in request.params: + result = self.daq_server.configure(request.params['config']) + if not result: + self.sendResponse(Status.OK) + else: + self.sendResponse(Status.OKISH, message=result) + else: + self.sendError('Invalid config; config string not provided.') + + def start(self, request): + result = self.daq_server.start() + if not result: + self.sendResponse(Status.OK) + else: + self.sendResponse(Status.OKISH, message=result) + + def stop(self, request): + result = self.daq_server.stop() + if not result: + self.sendResponse(Status.OK) + else: + self.sendResponse(Status.OKISH, message=result) + + def pull_port_data(self, request): + if 'port_id' in request.params: + port_id = request.params['port_id'] + port_file = self.daq_server.get_port_file_path(port_id) + if os.path.isfile(port_file): + port = self._initiate_file_transfer(port_file) + self.sendResponse(Status.OK, data={'port_number': port}) + else: + self.sendError('File for port {} does not exist.'.format(port_id)) + else: + self.sendError('Invalid pull request; port id not provided.') + + def list_devices(self, request): + devices = self.daq_server.list_devices() + self.sendResponse(Status.OK, data={'devices': devices}) + + def list_ports(self, request): + port_labels = self.daq_server.list_ports() + self.sendResponse(Status.OK, data={'ports': port_labels}) + + def list_port_files(self, request): + port_labels = self.daq_server.list_port_files() + self.sendResponse(Status.OK, data={'ports': port_labels}) + + def terminate(self, request): + status = Status.OK + message = '' + if self.factory.transfer_sessions: + message = 'Terminating with file tranfer sessions in progress. ' + log.warning(message) + for session in self.factory.transfer_sessions: + self.factory.transferComplete(session) + message += self.daq_server.terminate() or '' + if message: + status = Status.OKISH + self.sendResponse(status, message) + + def sendError(self, message): + log.error(message) + self.sendResponse(Status.ERROR, message) + + def sendResponse(self, status, message=None, data=None): + response = DaqServerResponse(status, message=message, data=data) + self.sendLine(response.serialize()) + + def sendLine(self, line): + log.info('Responding: {}'.format(line)) + LineReceiver.sendLine(self, line.replace('\r\n','')) + + def _initiate_file_transfer(self, filepath): + sender_factory = FileSenderFactory(filepath, self.factory) + connector = reactor.listenTCP(0, sender_factory) + self.factory.transferInitiated(sender_factory, connector) + return connector.getHost().port + + +class DaqFactory(Factory): + + protocol = DaqControlProtocol + check_alive_period = 5 * 60 + max_transfer_lifetime = 30 * 60 + + def __init__(self, server): + self.server = server + self.transfer_sessions = {} + + def buildProtocol(self, addr): + proto = DaqControlProtocol(self.server) + proto.factory = self + reactor.callLater(self.check_alive_period, self.pulse) + return proto + + def clientConnectionLost(self, connector, reason): + log.msg('client connection lost: {}.'.format(reason)) + if not isinstance(reason, ConnectionLost): + log.msg('ERROR: Client terminated connection mid-transfer.') + for session in self.transfer_sessions: + self.transferComplete(session) + + def transferInitiated(self, session, connector): + self.transfer_sessions[session] = (time.time(), connector) + + def transferComplete(self, session, reason='OK'): + if reason != 'OK': + log.error(reason) + self.transfer_sessions[session][1].stopListening() + del self.transfer_sessions[session] + + def pulse(self): + """Close down any file tranfer sessions that have been open for too long.""" + current_time = time.time() + for session in self.transfer_sessions: + start_time, conn = self.transfer_sessions[session] + if (current_time - start_time) > self.max_transfer_lifetime: + message = '{} session on port {} timed out' + self.transferComplete(session, message.format(session, conn.getHost().port)) + if self.transfer_sessions: + reactor.callLater(self.check_alive_period, self.pulse) + + def __str__(self): + return ''.format(self.server) + + __repr__ = __str__ + + +class FileReader(object): + + implements(interfaces.IPushProducer) + + def __init__(self, filepath): + self.fh = open(filepath) + self.proto = None + self.done = False + self._paused = True + + def setProtocol(self, proto): + self.proto = proto + + def resumeProducing(self): + if not self.proto: + raise ProtocolError('resumeProducing called with no protocol set.') + self._paused = False + try: + while not self._paused: + line = self.fh.next().rstrip('\n') + '\r\n' + self.proto.transport.write(line) + except StopIteration: + log.debug('Sent everything.') + self.stopProducing() + + def pauseProducing(self): + self._paused = True + + def stopProducing(self): + self.done = True + self.fh.close() + self.proto.transport.unregisterProducer() + self.proto.transport.loseConnection() + + +class FileSenderProtocol(Protocol): + + def __init__(self, reader): + self.reader = reader + self.factory = None + + def connectionMade(self): + self.transport.registerProducer(self.reader, True) + self.reader.resumeProducing() + + def connectionLost(self, reason=ConnectionDone): + if self.reader.done: + self.factory.transferComplete() + else: + self.reader.pauseProducing() + self.transport.unregisterProducer() + + +class FileSenderFactory(Factory): + + @property + def done(self): + if self.reader: + return self.reader.done + else: + return None + + def __init__(self, path, owner): + self.path = os.path.abspath(path) + self.reader = None + self.owner = owner + + def buildProtocol(self, addr): + if not self.reader: + self.reader = FileReader(self.path) + proto = FileSenderProtocol(self.reader) + proto.factory = self + self.reader.setProtocol(proto) + return proto + + def transferComplete(self): + self.owner.transferComplete(self) + + def __hash__(self): + return hash(self.path) + + def __str__(self): + return ''.format(self.path) + + __repr__ = __str__ + + +def run_server(): + parser = argparse.ArgumentParser() + parser.add_argument('-d', '--directory', help='Working directory', metavar='DIR', default='.') + parser.add_argument('-p', '--port', help='port the server will listen on.', + metavar='PORT', default=45677, type=int) + parser.add_argument('--debug', help='Run in debug mode (no DAQ connected).', + action='store_true', default=False) + parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False) + args = parser.parse_args() + + if args.debug: + global DaqRunner # pylint: disable=W0603 + DaqRunner = DummyDaqRunner + else: + if not DaqRunner: + raise ImportError('DaqRunner') + if args.verbose or args.debug: + log.start_logging('DEBUG') + else: + log.start_logging('INFO') + + server = DaqServer(args.directory) + reactor.listenTCP(args.port, DaqFactory(server)).getHost() + hostname = socket.gethostbyname(socket.gethostname()) + log.info('Listening on {}:{}'.format(hostname, args.port)) + reactor.run() + + +if __name__ == "__main__": + run_server() diff --git a/wlauto/external/daq_server/src/scripts/run-daq-server b/wlauto/external/daq_server/src/scripts/run-daq-server new file mode 100644 index 00000000..b20d6caf --- /dev/null +++ b/wlauto/external/daq_server/src/scripts/run-daq-server @@ -0,0 +1,3 @@ +#!/usr/bin/env python +from daqpower.server import run_server +run_server() diff --git a/wlauto/external/daq_server/src/scripts/send-daq-command b/wlauto/external/daq_server/src/scripts/send-daq-command new file mode 100644 index 00000000..a4656a67 --- /dev/null +++ b/wlauto/external/daq_server/src/scripts/send-daq-command @@ -0,0 +1,3 @@ +#!/usr/bin/env python +from daqpower.client import run_send_command +run_send_command() diff --git a/wlauto/external/daq_server/src/setup.py b/wlauto/external/daq_server/src/setup.py new file mode 100644 index 00000000..3c892aa8 --- /dev/null +++ b/wlauto/external/daq_server/src/setup.py @@ -0,0 +1,52 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import warnings +from distutils.core import setup + +import daqpower + + +warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'") + +params = dict( + name='daqpower', + version=daqpower.__version__, + packages=[ + 'daqpower', + ], + scripts=[ + 'scripts/run-daq-server', + 'scripts/send-daq-command', + ], + url='N/A', + maintainer='workload-automation', + maintainer_email='workload-automation@arm.com', + install_requires=[ + 'twisted', + 'PyDAQmx', + ], + # https://pypi.python.org/pypi?%3Aaction=list_classifiers + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Environment :: Console', + 'License :: Other/Proprietary License', + 'Operating System :: Unix', + 'Programming Language :: Python :: 2.7', + ], +) + +setup(**params) diff --git a/wlauto/external/louie/LICENSE b/wlauto/external/louie/LICENSE new file mode 100644 index 00000000..5b432357 --- /dev/null +++ b/wlauto/external/louie/LICENSE @@ -0,0 +1,12 @@ +This directory contains Louie package that has been modified by ARM Ltd. +Original Louie package is licensed under BSD license. ARM Ltd. changes are +licensed under Apache version 2 license. + +Original Louie package may be found here: + +https://pypi.python.org/pypi/Louie/1.1 + +The text of the BSD License may be viewed here: + +http://opensource.org/licenses/bsd-license.php + diff --git a/wlauto/external/louie/__init__.py b/wlauto/external/louie/__init__.py new file mode 100644 index 00000000..c269dd27 --- /dev/null +++ b/wlauto/external/louie/__init__.py @@ -0,0 +1,46 @@ +__all__ = [ + 'dispatcher', + 'error', + 'plugin', + 'robustapply', + 'saferef', + 'sender', + 'signal', + 'version', + + 'connect', + 'disconnect', + 'get_all_receivers', + 'reset', + 'send', + 'send_exact', + 'send_minimal', + 'send_robust', + + 'install_plugin', + 'remove_plugin', + 'Plugin', + 'QtWidgetPlugin', + 'TwistedDispatchPlugin', + + 'Anonymous', + 'Any', + + 'All', + 'Signal', + ] + +import louie.dispatcher, louie.error, louie.plugin, louie.robustapply, \ + louie.saferef, louie.sender, louie.signal, louie.version + +from louie.dispatcher import \ + connect, disconnect, get_all_receivers, reset, \ + send, send_exact, send_minimal, send_robust + +from louie.plugin import \ + install_plugin, remove_plugin, Plugin, \ + QtWidgetPlugin, TwistedDispatchPlugin + +from louie.sender import Anonymous, Any + +from louie.signal import All, Signal diff --git a/wlauto/external/louie/dispatcher.py b/wlauto/external/louie/dispatcher.py new file mode 100644 index 00000000..1136b3f1 --- /dev/null +++ b/wlauto/external/louie/dispatcher.py @@ -0,0 +1,591 @@ +"""Multiple-producer-multiple-consumer signal-dispatching. + +``dispatcher`` is the core of Louie, providing the primary API and the +core logic for the system. + +Internal attributes: + +- ``WEAKREF_TYPES``: Tuple of types/classes which represent weak + references to receivers, and thus must be dereferenced on retrieval + to retrieve the callable object + +- ``connections``:: + + { senderkey (id) : { signal : [receivers...] } } + +- ``senders``: Used for cleaning up sender references on sender + deletion:: + + { senderkey (id) : weakref(sender) } + +- ``senders_back``: Used for cleaning up receiver references on receiver + deletion:: + + { receiverkey (id) : [senderkey (id)...] } +""" + +import os +import weakref + +try: + set +except NameError: + from sets import Set as set, ImmutableSet as frozenset + +from louie import error +from louie import robustapply +from louie import saferef +from louie.sender import Any, Anonymous +from louie.signal import All +from prioritylist import PriorityList + + +# Support for statistics. +if __debug__: + connects = 0 + disconnects = 0 + sends = 0 + + def print_stats(): + print ('\n' + 'Louie connects: %i\n' + 'Louie disconnects: %i\n' + 'Louie sends: %i\n' + '\n') % (connects, disconnects, sends) + + if 'PYDISPATCH_STATS' in os.environ: + import atexit + atexit.register(print_stats) + + + +WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref) + + +connections = {} +senders = {} +senders_back = {} +plugins = [] + +def reset(): + """Reset the state of Louie. + + Useful during unit testing. Should be avoided otherwise. + """ + global connections, senders, senders_back, plugins + connections = {} + senders = {} + senders_back = {} + plugins = [] + + +def connect(receiver, signal=All, sender=Any, weak=True, priority=0): + """Connect ``receiver`` to ``sender`` for ``signal``. + + - ``receiver``: A callable Python object which is to receive + messages/signals/events. Receivers must be hashable objects. + + If weak is ``True``, then receiver must be weak-referencable (more + precisely ``saferef.safe_ref()`` must be able to create a + reference to the receiver). + + Receivers are fairly flexible in their specification, as the + machinery in the ``robustapply`` module takes care of most of the + details regarding figuring out appropriate subsets of the sent + arguments to apply to a given receiver. + + Note: If ``receiver`` is itself a weak reference (a callable), it + will be de-referenced by the system's machinery, so *generally* + weak references are not suitable as receivers, though some use + might be found for the facility whereby a higher-level library + passes in pre-weakrefed receiver references. + + - ``signal``: The signal to which the receiver should respond. + + If ``All``, receiver will receive all signals from the indicated + sender (which might also be ``All``, but is not necessarily + ``All``). + + Otherwise must be a hashable Python object other than ``None`` + (``DispatcherError`` raised on ``None``). + + - ``sender``: The sender to which the receiver should respond. + + If ``Any``, receiver will receive the indicated signals from any + sender. + + If ``Anonymous``, receiver will only receive indicated signals + from ``send``/``send_exact`` which do not specify a sender, or + specify ``Anonymous`` explicitly as the sender. + + Otherwise can be any python object. + + - ``weak``: Whether to use weak references to the receiver. + + By default, the module will attempt to use weak references to + the receiver objects. If this parameter is ``False``, then strong + references will be used. + + - ``priority``: specifies the priority by which a reciever should + get notified + + Returns ``None``, may raise ``DispatcherTypeError``. + """ + if signal is None: + raise error.DispatcherTypeError( + 'Signal cannot be None (receiver=%r sender=%r)' + % (receiver, sender)) + if weak: + receiver = saferef.safe_ref(receiver, on_delete=_remove_receiver) + senderkey = id(sender) + if connections.has_key(senderkey): + signals = connections[senderkey] + else: + connections[senderkey] = signals = {} + # Keep track of senders for cleanup. + # Is Anonymous something we want to clean up? + if sender not in (None, Anonymous, Any): + def remove(object, senderkey=senderkey): + _remove_sender(senderkey=senderkey) + # Skip objects that can not be weakly referenced, which means + # they won't be automatically cleaned up, but that's too bad. + try: + weak_sender = weakref.ref(sender, remove) + senders[senderkey] = weak_sender + except: + pass + receiver_id = id(receiver) + # get current set, remove any current references to + # this receiver in the set, including back-references + if signals.has_key(signal): + receivers = signals[signal] + _remove_old_back_refs(senderkey, signal, receiver, receivers) + else: + receivers = signals[signal] = PriorityList() + try: + current = senders_back.get(receiver_id) + if current is None: + senders_back[receiver_id] = current = [] + if senderkey not in current: + current.append(senderkey) + except: + pass + receivers.add(receiver, priority) + # Update stats. + if __debug__: + global connects + connects += 1 + + +def disconnect(receiver, signal=All, sender=Any, weak=True): + """Disconnect ``receiver`` from ``sender`` for ``signal``. + + - ``receiver``: The registered receiver to disconnect. + + - ``signal``: The registered signal to disconnect. + + - ``sender``: The registered sender to disconnect. + + - ``weak``: The weakref state to disconnect. + + ``disconnect`` reverses the process of ``connect``, the semantics for + the individual elements are logically equivalent to a tuple of + ``(receiver, signal, sender, weak)`` used as a key to be deleted + from the internal routing tables. (The actual process is slightly + more complex but the semantics are basically the same). + + Note: Using ``disconnect`` is not required to cleanup routing when + an object is deleted; the framework will remove routes for deleted + objects automatically. It's only necessary to disconnect if you + want to stop routing to a live object. + + Returns ``None``, may raise ``DispatcherTypeError`` or + ``DispatcherKeyError``. + """ + if signal is None: + raise error.DispatcherTypeError( + 'Signal cannot be None (receiver=%r sender=%r)' + % (receiver, sender)) + if weak: + receiver = saferef.safe_ref(receiver) + senderkey = id(sender) + try: + signals = connections[senderkey] + receivers = signals[signal] + except KeyError: + raise error.DispatcherKeyError( + 'No receivers found for signal %r from sender %r' + % (signal, sender) + ) + try: + # also removes from receivers + _remove_old_back_refs(senderkey, signal, receiver, receivers) + except ValueError: + raise error.DispatcherKeyError( + 'No connection to receiver %s for signal %s from sender %s' + % (receiver, signal, sender) + ) + _cleanup_connections(senderkey, signal) + # Update stats. + if __debug__: + global disconnects + disconnects += 1 + + +def get_receivers(sender=Any, signal=All): + """Get list of receivers from global tables. + + This function allows you to retrieve the raw list of receivers + from the connections table for the given sender and signal pair. + + Note: There is no guarantee that this is the actual list stored in + the connections table, so the value should be treated as a simple + iterable/truth value rather than, for instance a list to which you + might append new records. + + Normally you would use ``live_receivers(get_receivers(...))`` to + retrieve the actual receiver objects as an iterable object. + """ + try: + return connections[id(sender)][signal] + except KeyError: + return [] + + +def live_receivers(receivers): + """Filter sequence of receivers to get resolved, live receivers. + + This is a generator which will iterate over the passed sequence, + checking for weak references and resolving them, then returning + all live receivers. + """ + for receiver in receivers: + if isinstance(receiver, WEAKREF_TYPES): + # Dereference the weak reference. + receiver = receiver() + if receiver is not None: + # Check installed plugins to make sure this receiver is + # live. + live = True + for plugin in plugins: + if not plugin.is_live(receiver): + live = False + break + if live: + yield receiver + + +def get_all_receivers(sender=Any, signal=All): + """Get list of all receivers from global tables. + + This gets all receivers which should receive the given signal from + sender, each receiver should be produced only once by the + resulting generator. + """ + yielded = set() + for receivers in ( + # Get receivers that receive *this* signal from *this* sender. + get_receivers(sender, signal), + # Add receivers that receive *all* signals from *this* sender. + get_receivers(sender, All), + # Add receivers that receive *this* signal from *any* sender. + get_receivers(Any, signal), + # Add receivers that receive *all* signals from *any* sender. + get_receivers(Any, All), + ): + for receiver in receivers: + if receiver: # filter out dead instance-method weakrefs + try: + if not receiver in yielded: + yielded.add(receiver) + yield receiver + except TypeError: + # dead weakrefs raise TypeError on hash... + pass + + +def send(signal=All, sender=Anonymous, *arguments, **named): + """Send ``signal`` from ``sender`` to all connected receivers. + + - ``signal``: (Hashable) signal value; see ``connect`` for details. + + - ``sender``: The sender of the signal. + + If ``Any``, only receivers registered for ``Any`` will receive the + message. + + If ``Anonymous``, only receivers registered to receive messages + from ``Anonymous`` or ``Any`` will receive the message. + + Otherwise can be any Python object (normally one registered with + a connect if you actually want something to occur). + + - ``arguments``: Positional arguments which will be passed to *all* + receivers. Note that this may raise ``TypeError`` if the receivers + do not allow the particular arguments. Note also that arguments + are applied before named arguments, so they should be used with + care. + + - ``named``: Named arguments which will be filtered according to the + parameters of the receivers to only provide those acceptable to + the receiver. + + Return a list of tuple pairs ``[(receiver, response), ...]`` + + If any receiver raises an error, the error propagates back through + send, terminating the dispatch loop, so it is quite possible to + not have all receivers called if a raises an error. + """ + # Call each receiver with whatever arguments it can accept. + # Return a list of tuple pairs [(receiver, response), ... ]. + responses = [] + for receiver in live_receivers(get_all_receivers(sender, signal)): + # Wrap receiver using installed plugins. + original = receiver + for plugin in plugins: + receiver = plugin.wrap_receiver(receiver) + response = robustapply.robust_apply( + receiver, original, + signal=signal, + sender=sender, + *arguments, + **named + ) + responses.append((receiver, response)) + # Update stats. + if __debug__: + global sends + sends += 1 + return responses + + +def send_minimal(signal=All, sender=Anonymous, *arguments, **named): + """Like ``send``, but does not attach ``signal`` and ``sender`` + arguments to the call to the receiver.""" + # Call each receiver with whatever arguments it can accept. + # Return a list of tuple pairs [(receiver, response), ... ]. + responses = [] + for receiver in live_receivers(get_all_receivers(sender, signal)): + # Wrap receiver using installed plugins. + original = receiver + for plugin in plugins: + receiver = plugin.wrap_receiver(receiver) + response = robustapply.robust_apply( + receiver, original, + *arguments, + **named + ) + responses.append((receiver, response)) + # Update stats. + if __debug__: + global sends + sends += 1 + return responses + + +def send_exact(signal=All, sender=Anonymous, *arguments, **named): + """Send ``signal`` only to receivers registered for exact message. + + ``send_exact`` allows for avoiding ``Any``/``Anonymous`` registered + handlers, sending only to those receivers explicitly registered + for a particular signal on a particular sender. + """ + responses = [] + for receiver in live_receivers(get_receivers(sender, signal)): + # Wrap receiver using installed plugins. + original = receiver + for plugin in plugins: + receiver = plugin.wrap_receiver(receiver) + response = robustapply.robust_apply( + receiver, original, + signal=signal, + sender=sender, + *arguments, + **named + ) + responses.append((receiver, response)) + return responses + + +def send_robust(signal=All, sender=Anonymous, *arguments, **named): + """Send ``signal`` from ``sender`` to all connected receivers catching + errors + + - ``signal``: (Hashable) signal value, see connect for details + + - ``sender``: The sender of the signal. + + If ``Any``, only receivers registered for ``Any`` will receive the + message. + + If ``Anonymous``, only receivers registered to receive messages + from ``Anonymous`` or ``Any`` will receive the message. + + Otherwise can be any Python object (normally one registered with + a connect if you actually want something to occur). + + - ``arguments``: Positional arguments which will be passed to *all* + receivers. Note that this may raise ``TypeError`` if the receivers + do not allow the particular arguments. Note also that arguments + are applied before named arguments, so they should be used with + care. + + - ``named``: Named arguments which will be filtered according to the + parameters of the receivers to only provide those acceptable to + the receiver. + + Return a list of tuple pairs ``[(receiver, response), ... ]`` + + If any receiver raises an error (specifically, any subclass of + ``Exception``), the error instance is returned as the result for + that receiver. + """ + # Call each receiver with whatever arguments it can accept. + # Return a list of tuple pairs [(receiver, response), ... ]. + responses = [] + for receiver in live_receivers(get_all_receivers(sender, signal)): + original = receiver + for plugin in plugins: + receiver = plugin.wrap_receiver(receiver) + try: + response = robustapply.robust_apply( + receiver, original, + signal=signal, + sender=sender, + *arguments, + **named + ) + except Exception, err: + responses.append((receiver, err)) + else: + responses.append((receiver, response)) + return responses + + +def _remove_receiver(receiver): + """Remove ``receiver`` from connections.""" + if not senders_back: + # During module cleanup the mapping will be replaced with None. + return False + backKey = id(receiver) + for senderkey in senders_back.get(backKey, ()): + try: + signals = connections[senderkey].keys() + except KeyError: + pass + else: + for signal in signals: + try: + receivers = connections[senderkey][signal] + except KeyError: + pass + else: + try: + receivers.remove(receiver) + except Exception: + pass + _cleanup_connections(senderkey, signal) + try: + del senders_back[backKey] + except KeyError: + pass + + +def _cleanup_connections(senderkey, signal): + """Delete empty signals for ``senderkey``. Delete ``senderkey`` if + empty.""" + try: + receivers = connections[senderkey][signal] + except: + pass + else: + if not receivers: + # No more connected receivers. Therefore, remove the signal. + try: + signals = connections[senderkey] + except KeyError: + pass + else: + del signals[signal] + if not signals: + # No more signal connections. Therefore, remove the sender. + _remove_sender(senderkey) + + +def _remove_sender(senderkey): + """Remove ``senderkey`` from connections.""" + _remove_back_refs(senderkey) + try: + del connections[senderkey] + except KeyError: + pass + # Senderkey will only be in senders dictionary if sender + # could be weakly referenced. + try: + del senders[senderkey] + except: + pass + + +def _remove_back_refs(senderkey): + """Remove all back-references to this ``senderkey``.""" + try: + signals = connections[senderkey] + except KeyError: + signals = None + else: + for signal, receivers in signals.iteritems(): + for receiver in receivers: + _kill_back_ref(receiver, senderkey) + + +def _remove_old_back_refs(senderkey, signal, receiver, receivers): + """Kill old ``senders_back`` references from ``receiver``. + + This guards against multiple registration of the same receiver for + a given signal and sender leaking memory as old back reference + records build up. + + Also removes old receiver instance from receivers. + """ + try: + index = receivers.index(receiver) + # need to scan back references here and remove senderkey + except ValueError: + return False + else: + old_receiver = receivers[index] + del receivers[index] + found = 0 + signals = connections.get(signal) + if signals is not None: + for sig, recs in connections.get(signal, {}).iteritems(): + if sig != signal: + for rec in recs: + if rec is old_receiver: + found = 1 + break + if not found: + _kill_back_ref(old_receiver, senderkey) + return True + return False + + +def _kill_back_ref(receiver, senderkey): + """Do actual removal of back reference from ``receiver`` to + ``senderkey``.""" + receiverkey = id(receiver) + senders = senders_back.get(receiverkey, ()) + while senderkey in senders: + try: + senders.remove(senderkey) + except: + break + if not senders: + try: + del senders_back[receiverkey] + except KeyError: + pass + return True + + diff --git a/wlauto/external/louie/error.py b/wlauto/external/louie/error.py new file mode 100644 index 00000000..04f98ea6 --- /dev/null +++ b/wlauto/external/louie/error.py @@ -0,0 +1,22 @@ +"""Error types for Louie.""" + + +class LouieError(Exception): + """Base class for all Louie errors""" + + +class DispatcherError(LouieError): + """Base class for all Dispatcher errors""" + + +class DispatcherKeyError(KeyError, DispatcherError): + """Error raised when unknown (sender, signal) specified""" + + +class DispatcherTypeError(TypeError, DispatcherError): + """Error raised when inappropriate signal-type specified (None)""" + + +class PluginTypeError(TypeError, LouieError): + """Error raise when trying to install more than one plugin of a + certain type.""" diff --git a/wlauto/external/louie/plugin.py b/wlauto/external/louie/plugin.py new file mode 100644 index 00000000..c186f2f9 --- /dev/null +++ b/wlauto/external/louie/plugin.py @@ -0,0 +1,108 @@ +"""Common plugins for Louie.""" + +from louie import dispatcher +from louie import error + + +def install_plugin(plugin): + cls = plugin.__class__ + for p in dispatcher.plugins: + if p.__class__ is cls: + raise error.PluginTypeError( + 'Plugin of type %r already installed.' % cls) + dispatcher.plugins.append(plugin) + +def remove_plugin(plugin): + dispatcher.plugins.remove(plugin) + + +class Plugin(object): + """Base class for Louie plugins. + + Plugins are used to extend or alter the behavior of Louie + in a uniform way without having to modify the Louie code + itself. + """ + + def is_live(self, receiver): + """Return True if the receiver is still live. + + Only called for receivers who have already been determined to + be live by default Louie semantics. + """ + return True + + def wrap_receiver(self, receiver): + """Return a callable that passes arguments to the receiver. + + Useful when you want to change the behavior of all receivers. + """ + return receiver + + +class QtWidgetPlugin(Plugin): + """A Plugin for Louie that knows how to handle Qt widgets + when using PyQt built with SIP 4 or higher. + + Weak references are not useful when dealing with QWidget + instances, because even after a QWidget is closed and destroyed, + only the C++ object is destroyed. The Python 'shell' object + remains, but raises a RuntimeError when an attempt is made to call + an underlying QWidget method. + + This plugin alleviates this behavior, and if a QWidget instance is + found that is just an empty shell, it prevents Louie from + dispatching to any methods on those objects. + """ + + def __init__(self): + try: + import qt + except ImportError: + self.is_live = self._is_live_no_qt + else: + self.qt = qt + + def is_live(self, receiver): + """If receiver is a method on a QWidget, only return True if + it hasn't been destroyed.""" + if (hasattr(receiver, 'im_self') and + isinstance(receiver.im_self, self.qt.QWidget) + ): + try: + receiver.im_self.x() + except RuntimeError: + return False + return True + + def _is_live_no_qt(self, receiver): + return True + + +class TwistedDispatchPlugin(Plugin): + """Plugin for Louie that wraps all receivers in callables + that return Twisted Deferred objects. + + When the wrapped receiver is called, it adds a call to the actual + receiver to the reactor event loop, and returns a Deferred that is + called back with the result. + """ + + def __init__(self): + # Don't import reactor ourselves, but make access to it + # easier. + from twisted import internet + from twisted.internet.defer import Deferred + self._internet = internet + self._Deferred = Deferred + + def wrap_receiver(self, receiver): + def wrapper(*args, **kw): + d = self._Deferred() + def called(dummy): + return receiver(*args, **kw) + d.addCallback(called) + self._internet.reactor.callLater(0, d.callback, None) + return d + return wrapper + diff --git a/wlauto/external/louie/prioritylist.py b/wlauto/external/louie/prioritylist.py new file mode 100644 index 00000000..7a6f51eb --- /dev/null +++ b/wlauto/external/louie/prioritylist.py @@ -0,0 +1,128 @@ +"""OrderedList class + +This class keeps its elements ordered according to their priority. +""" +from collections import defaultdict +import numbers +from bisect import insort + +class PriorityList(object): + + def __init__(self): + """ + Returns an OrderedReceivers object that externaly behaves + like a list but it maintains the order of its elements + according to their priority. + """ + self.elements = defaultdict(list) + self.is_ordered = True + self.priorities = [] + self.size = 0 + self._cached_elements = None + + def __del__(self): + pass + + def __iter__(self): + """ + this method makes PriorityList class iterable + """ + self._order_elements() + for priority in reversed(self.priorities): # highest priority first + for element in self.elements[priority]: + yield element + + def __getitem__(self, index): + self._order_elements() + return self._to_list()[index] + + def __delitem__(self, index): + self._order_elements() + if isinstance(index, numbers.Integral): + index = int(index) + if index < 0: + index_range = [len(self)+index] + else: + index_range = [index] + elif isinstance(index, slice): + index_range = range(index.start or 0, index.stop, index.step or 1) + else: + raise ValueError('Invalid index {}'.format(index)) + current_global_offset = 0 + priority_counts = {priority : count for (priority, count) in + zip(self.priorities, [len(self.elements[p]) for p in self.priorities])} + for priority in self.priorities: + if not index_range: + break + priority_offset = 0 + while index_range: + del_index = index_range[0] + if priority_counts[priority] + current_global_offset <= del_index: + current_global_offset += priority_counts[priority] + break + within_priority_index = del_index - (current_global_offset + priority_offset) + self._delete(priority, within_priority_index) + priority_offset += 1 + index_range.pop(0) + + def __len__(self): + return self.size + + def add(self, new_element, priority=0, force_ordering=True): + """ + adds a new item in the list. + + - ``new_element`` the element to be inserted in the PriorityList + - ``priority`` is the priority of the element which specifies its + order withing the List + - ``force_ordering`` indicates whether elements should be ordered + right now. If set to False, ordering happens on demand (lazy) + """ + self._add_element(new_element, priority) + if priority not in self.priorities: + self._add_priority(priority, force_ordering) + + def index(self, element): + return self._to_list().index(element) + + def remove(self, element): + index = self.index(element) + self.__delitem__(index) + + def _order_elements(self): + if not self.is_ordered: + self.priorities = sorted(self.priorities) + self.is_ordered = True + + def _to_list(self): + if self._cached_elements == None: + self._order_elements() + self._cached_elements = [] + for priority in self.priorities: + self._cached_elements += self.elements[priority] + return self._cached_elements + + def _add_element(self, element, priority): + self.elements[priority].append(element) + self.size += 1 + self._cached_elements = None + + def _delete(self, priority, priority_index): + del self.elements[priority][priority_index] + self.size -= 1 + if len(self.elements[priority]) == 0: + self.priorities.remove(priority) + self._cached_elements = None + + def _add_priority(self, priority, force_ordering): + if force_ordering and self.is_ordered: + insort(self.priorities, priority) + elif not force_ordering: + self.priorities.append(priority) + self.is_ordered = False + elif not self.is_ordered: + self.priorities.append(priority) + self._order_elements() + else: + raise AssertionError('Should never get here.') + diff --git a/wlauto/external/louie/robustapply.py b/wlauto/external/louie/robustapply.py new file mode 100644 index 00000000..f932b875 --- /dev/null +++ b/wlauto/external/louie/robustapply.py @@ -0,0 +1,58 @@ +"""Robust apply mechanism. + +Provides a function 'call', which can sort out what arguments a given +callable object can take, and subset the given arguments to match only +those which are acceptable. +""" + +def function(receiver): + """Get function-like callable object for given receiver. + + returns (function_or_method, codeObject, fromMethod) + + If fromMethod is true, then the callable already has its first + argument bound. + """ + if hasattr(receiver, '__call__'): + # receiver is a class instance; assume it is callable. + # Reassign receiver to the actual method that will be called. + c = receiver.__call__ + if hasattr(c, 'im_func') or hasattr(c, 'im_code'): + receiver = c + if hasattr(receiver, 'im_func'): + # receiver is an instance-method. + return receiver, receiver.im_func.func_code, 1 + elif not hasattr(receiver, 'func_code'): + raise ValueError( + 'unknown reciever type %s %s' % (receiver, type(receiver))) + return receiver, receiver.func_code, 0 + + +def robust_apply(receiver, signature, *arguments, **named): + """Call receiver with arguments and appropriate subset of named. + ``signature`` is the callable used to determine the call signature + of the receiver, in case ``receiver`` is a callable wrapper of the + actual receiver.""" + signature, code_object, startIndex = function(signature) + acceptable = code_object.co_varnames[ + startIndex + len(arguments): + code_object.co_argcount + ] + for name in code_object.co_varnames[ + startIndex:startIndex + len(arguments) + ]: + if named.has_key(name): + raise TypeError( + 'Argument %r specified both positionally ' + 'and as a keyword for calling %r' + % (name, signature) + ) + if not (code_object.co_flags & 8): + # fc does not have a **kwds type parameter, therefore + # remove unacceptable arguments. + for arg in named.keys(): + if arg not in acceptable: + del named[arg] + return receiver(*arguments, **named) + + diff --git a/wlauto/external/louie/saferef.py b/wlauto/external/louie/saferef.py new file mode 100644 index 00000000..c3e98c0a --- /dev/null +++ b/wlauto/external/louie/saferef.py @@ -0,0 +1,179 @@ +"""Refactored 'safe reference from dispatcher.py""" + +import weakref +import traceback + + +def safe_ref(target, on_delete=None): + """Return a *safe* weak reference to a callable target. + + - ``target``: The object to be weakly referenced, if it's a bound + method reference, will create a BoundMethodWeakref, otherwise + creates a simple weakref. + + - ``on_delete``: If provided, will have a hard reference stored to + the callable to be called after the safe reference goes out of + scope with the reference object, (either a weakref or a + BoundMethodWeakref) as argument. + """ + if hasattr(target, 'im_self'): + if target.im_self is not None: + # Turn a bound method into a BoundMethodWeakref instance. + # Keep track of these instances for lookup by disconnect(). + assert hasattr(target, 'im_func'), ( + "safe_ref target %r has im_self, but no im_func, " + "don't know how to create reference" + % target + ) + reference = BoundMethodWeakref(target=target, on_delete=on_delete) + return reference + if callable(on_delete): + return weakref.ref(target, on_delete) + else: + return weakref.ref(target) + + +class BoundMethodWeakref(object): + """'Safe' and reusable weak references to instance methods. + + BoundMethodWeakref objects provide a mechanism for referencing a + bound method without requiring that the method object itself + (which is normally a transient object) is kept alive. Instead, + the BoundMethodWeakref object keeps weak references to both the + object and the function which together define the instance method. + + Attributes: + + - ``key``: The identity key for the reference, calculated by the + class's calculate_key method applied to the target instance method. + + - ``deletion_methods``: Sequence of callable objects taking single + argument, a reference to this object which will be called when + *either* the target object or target function is garbage + collected (i.e. when this object becomes invalid). These are + specified as the on_delete parameters of safe_ref calls. + + - ``weak_self``: Weak reference to the target object. + + - ``weak_func``: Weak reference to the target function. + + Class Attributes: + + - ``_all_instances``: Class attribute pointing to all live + BoundMethodWeakref objects indexed by the class's + calculate_key(target) method applied to the target objects. + This weak value dictionary is used to short-circuit creation so + that multiple references to the same (object, function) pair + produce the same BoundMethodWeakref instance. + """ + + _all_instances = weakref.WeakValueDictionary() + + def __new__(cls, target, on_delete=None, *arguments, **named): + """Create new instance or return current instance. + + Basically this method of construction allows us to + short-circuit creation of references to already- referenced + instance methods. The key corresponding to the target is + calculated, and if there is already an existing reference, + that is returned, with its deletion_methods attribute updated. + Otherwise the new instance is created and registered in the + table of already-referenced methods. + """ + key = cls.calculate_key(target) + current = cls._all_instances.get(key) + if current is not None: + current.deletion_methods.append(on_delete) + return current + else: + base = super(BoundMethodWeakref, cls).__new__(cls) + cls._all_instances[key] = base + base.__init__(target, on_delete, *arguments, **named) + return base + + def __init__(self, target, on_delete=None): + """Return a weak-reference-like instance for a bound method. + + - ``target``: The instance-method target for the weak reference, + must have im_self and im_func attributes and be + reconstructable via the following, which is true of built-in + instance methods:: + + target.im_func.__get__( target.im_self ) + + - ``on_delete``: Optional callback which will be called when + this weak reference ceases to be valid (i.e. either the + object or the function is garbage collected). Should take a + single argument, which will be passed a pointer to this + object. + """ + def remove(weak, self=self): + """Set self.isDead to True when method or instance is destroyed.""" + methods = self.deletion_methods[:] + del self.deletion_methods[:] + try: + del self.__class__._all_instances[self.key] + except KeyError: + pass + for function in methods: + try: + if callable(function): + function(self) + except Exception: + try: + traceback.print_exc() + except AttributeError, e: + print ('Exception during saferef %s ' + 'cleanup function %s: %s' % (self, function, e)) + self.deletion_methods = [on_delete] + self.key = self.calculate_key(target) + self.weak_self = weakref.ref(target.im_self, remove) + self.weak_func = weakref.ref(target.im_func, remove) + self.self_name = str(target.im_self) + self.func_name = str(target.im_func.__name__) + + def calculate_key(cls, target): + """Calculate the reference key for this reference. + + Currently this is a two-tuple of the id()'s of the target + object and the target function respectively. + """ + return (id(target.im_self), id(target.im_func)) + calculate_key = classmethod(calculate_key) + + def __str__(self): + """Give a friendly representation of the object.""" + return "%s(%s.%s)" % ( + self.__class__.__name__, + self.self_name, + self.func_name, + ) + + __repr__ = __str__ + + def __nonzero__(self): + """Whether we are still a valid reference.""" + return self() is not None + + def __cmp__(self, other): + """Compare with another reference.""" + if not isinstance(other, self.__class__): + return cmp(self.__class__, type(other)) + return cmp(self.key, other.key) + + def __call__(self): + """Return a strong reference to the bound method. + + If the target cannot be retrieved, then will return None, + otherwise returns a bound instance method for our object and + function. + + Note: You may call this method any number of times, as it does + not invalidate the reference. + """ + target = self.weak_self() + if target is not None: + function = self.weak_func() + if function is not None: + return function.__get__(target) + return None diff --git a/wlauto/external/louie/sender.py b/wlauto/external/louie/sender.py new file mode 100644 index 00000000..aac6c79c --- /dev/null +++ b/wlauto/external/louie/sender.py @@ -0,0 +1,39 @@ +"""Sender classes.""" + + +class _SENDER(type): + """Base metaclass for sender classes.""" + + def __str__(cls): + return '' % (cls.__name__, ) + + +class Any(object): + """Used to represent either 'any sender'. + + The Any class can be used with connect, disconnect, send, or + sendExact to denote that the sender paramater should react to any + sender, not just a particular sender. + """ + + __metaclass__ = _SENDER + + +class Anonymous(object): + """Singleton used to signal 'anonymous sender'. + + The Anonymous class is used to signal that the sender of a message + is not specified (as distinct from being 'any sender'). + Registering callbacks for Anonymous will only receive messages + sent without senders. Sending with anonymous will only send + messages to those receivers registered for Any or Anonymous. + + Note: The default sender for connect is Any, while the default + sender for send is Anonymous. This has the effect that if you do + not specify any senders in either function then all messages are + routed as though there was a single sender (Anonymous) being used + everywhere. + """ + + __metaclass__ = _SENDER + diff --git a/wlauto/external/louie/signal.py b/wlauto/external/louie/signal.py new file mode 100644 index 00000000..0379151a --- /dev/null +++ b/wlauto/external/louie/signal.py @@ -0,0 +1,30 @@ +"""Signal class. + +This class is provided as a way to consistently define and document +signal types. Signal classes also have a useful string +representation. + +Louie does not require you to use a subclass of Signal for signals. +""" + + +class _SIGNAL(type): + """Base metaclass for signal classes.""" + + def __str__(cls): + return '' % (cls.__name__, ) + + +class Signal(object): + + __metaclass__ = _SIGNAL + + +class All(Signal): + """Used to represent 'all signals'. + + The All class can be used with connect, disconnect, send, or + sendExact to denote that the signal should react to all signals, + not just a particular signal. + """ + diff --git a/wlauto/external/louie/test/__init__.py b/wlauto/external/louie/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/wlauto/external/louie/test/conftest.py b/wlauto/external/louie/test/conftest.py new file mode 100644 index 00000000..3b241af8 --- /dev/null +++ b/wlauto/external/louie/test/conftest.py @@ -0,0 +1,5 @@ +import sys +import os + +sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) + diff --git a/wlauto/external/louie/test/fixture.py b/wlauto/external/louie/test/fixture.py new file mode 100644 index 00000000..e69de29b diff --git a/wlauto/external/louie/test/test_dispatcher.py b/wlauto/external/louie/test/test_dispatcher.py new file mode 100644 index 00000000..b58f1c9f --- /dev/null +++ b/wlauto/external/louie/test/test_dispatcher.py @@ -0,0 +1,154 @@ +import unittest + +import louie +from louie import dispatcher + + +def x(a): + return a + + +class Dummy(object): + pass + + +class Callable(object): + + def __call__(self, a): + return a + + def a(self, a): + return a + + +class TestDispatcher(unittest.TestCase): + + def setUp(self): + louie.reset() + + def _isclean(self): + """Assert that everything has been cleaned up automatically""" + assert len(dispatcher.senders_back) == 0, dispatcher.senders_back + assert len(dispatcher.connections) == 0, dispatcher.connections + assert len(dispatcher.senders) == 0, dispatcher.senders + + def test_Exact(self): + a = Dummy() + signal = 'this' + louie.connect(x, signal, a) + expected = [(x, a)] + result = louie.send('this', a, a=a) + assert result == expected, ( + "Send didn't return expected result:\n\texpected:%s\n\tgot:%s" + % (expected, result)) + louie.disconnect(x, signal, a) + assert len(list(louie.get_all_receivers(a, signal))) == 0 + self._isclean() + + def test_AnonymousSend(self): + a = Dummy() + signal = 'this' + louie.connect(x, signal) + expected = [(x, a)] + result = louie.send(signal, None, a=a) + assert result == expected, ( + "Send didn't return expected result:\n\texpected:%s\n\tgot:%s" + % (expected, result)) + louie.disconnect(x, signal) + assert len(list(louie.get_all_receivers(None, signal))) == 0 + self._isclean() + + def test_AnyRegistration(self): + a = Dummy() + signal = 'this' + louie.connect(x, signal, louie.Any) + expected = [(x, a)] + result = louie.send('this', object(), a=a) + assert result == expected, ( + "Send didn't return expected result:\n\texpected:%s\n\tgot:%s" + % (expected, result)) + louie.disconnect(x, signal, louie.Any) + expected = [] + result = louie.send('this', object(), a=a) + assert result == expected, ( + "Send didn't return expected result:\n\texpected:%s\n\tgot:%s" + % (expected, result)) + assert len(list(louie.get_all_receivers(louie.Any, signal))) == 0 + self._isclean() + + def test_AllRegistration(self): + a = Dummy() + signal = 'this' + louie.connect(x, louie.All, a) + expected = [(x, a)] + result = louie.send('this', a, a=a) + assert result == expected, ( + "Send didn't return expected result:\n\texpected:%s\n\tgot:%s" + % (expected, result)) + louie.disconnect(x, louie.All, a) + assert len(list(louie.get_all_receivers(a, louie.All))) == 0 + self._isclean() + + def test_GarbageCollected(self): + a = Callable() + b = Dummy() + signal = 'this' + louie.connect(a.a, signal, b) + expected = [] + del a + result = louie.send('this', b, a=b) + assert result == expected, ( + "Send didn't return expected result:\n\texpected:%s\n\tgot:%s" + % (expected, result)) + assert len(list(louie.get_all_receivers(b, signal))) == 0, ( + "Remaining handlers: %s" % (louie.get_all_receivers(b, signal),)) + self._isclean() + + def test_GarbageCollectedObj(self): + class x: + def __call__(self, a): + return a + a = Callable() + b = Dummy() + signal = 'this' + louie.connect(a, signal, b) + expected = [] + del a + result = louie.send('this', b, a=b) + assert result == expected, ( + "Send didn't return expected result:\n\texpected:%s\n\tgot:%s" + % (expected, result)) + assert len(list(louie.get_all_receivers(b, signal))) == 0, ( + "Remaining handlers: %s" % (louie.get_all_receivers(b, signal),)) + self._isclean() + + def test_MultipleRegistration(self): + a = Callable() + b = Dummy() + signal = 'this' + louie.connect(a, signal, b) + louie.connect(a, signal, b) + louie.connect(a, signal, b) + louie.connect(a, signal, b) + louie.connect(a, signal, b) + louie.connect(a, signal, b) + result = louie.send('this', b, a=b) + assert len(result) == 1, result + assert len(list(louie.get_all_receivers(b, signal))) == 1, ( + "Remaining handlers: %s" % (louie.get_all_receivers(b, signal),)) + del a + del b + del result + self._isclean() + + def test_robust(self): + """Test the sendRobust function.""" + def fails(): + raise ValueError('this') + a = object() + signal = 'this' + louie.connect(fails, louie.All, a) + result = louie.send_robust('this', a, a=a) + err = result[0][1] + assert isinstance(err, ValueError) + assert err.args == ('this', ) diff --git a/wlauto/external/louie/test/test_plugin.py b/wlauto/external/louie/test/test_plugin.py new file mode 100644 index 00000000..d8321d31 --- /dev/null +++ b/wlauto/external/louie/test/test_plugin.py @@ -0,0 +1,145 @@ +"""Louie plugin tests.""" + +import unittest + +import louie + +try: + import qt + if not hasattr(qt.qApp, 'for_testing'): + _app = qt.QApplication([]) + _app.for_testing = True + qt.qApp = _app +except ImportError: + qt = None + + +class ReceiverBase(object): + + def __init__(self): + self.args = [] + self.live = True + + def __call__(self, arg): + self.args.append(arg) + +class Receiver1(ReceiverBase): + pass + +class Receiver2(ReceiverBase): + pass + + +class Plugin1(louie.Plugin): + + def is_live(self, receiver): + """ReceiverBase instances are only live if their `live` + attribute is True""" + if isinstance(receiver, ReceiverBase): + return receiver.live + return True + + +class Plugin2(louie.Plugin): + + def is_live(self, receiver): + """Pretend all Receiver2 instances are not live.""" + if isinstance(receiver, Receiver2): + return False + return True + + +def test_only_one_instance(): + louie.reset() + plugin1a = Plugin1() + plugin1b = Plugin1() + louie.install_plugin(plugin1a) + # XXX: Move these tests into test cases so we can use unittest's + # 'assertRaises' method. + try: + louie.install_plugin(plugin1b) + except louie.error.PluginTypeError: + pass + else: + raise Exception('PluginTypeError not raised') + + +def test_is_live(): + louie.reset() + # Create receivers. + receiver1a = Receiver1() + receiver1b = Receiver1() + receiver2a = Receiver2() + receiver2b = Receiver2() + # Connect signals. + louie.connect(receiver1a, 'sig') + louie.connect(receiver1b, 'sig') + louie.connect(receiver2a, 'sig') + louie.connect(receiver2b, 'sig') + # Check reception without plugins. + louie.send('sig', arg='foo') + assert receiver1a.args == ['foo'] + assert receiver1b.args == ['foo'] + assert receiver2a.args == ['foo'] + assert receiver2b.args == ['foo'] + # Install plugin 1. + plugin1 = Plugin1() + louie.install_plugin(plugin1) + # Make some receivers not live. + receiver1a.live = False + receiver2b.live = False + # Check reception. + louie.send('sig', arg='bar') + assert receiver1a.args == ['foo'] + assert receiver1b.args == ['foo', 'bar'] + assert receiver2a.args == ['foo', 'bar'] + assert receiver2b.args == ['foo'] + # Remove plugin 1, install plugin 2. + plugin2 = Plugin2() + louie.remove_plugin(plugin1) + louie.install_plugin(plugin2) + # Check reception. + louie.send('sig', arg='baz') + assert receiver1a.args == ['foo', 'baz'] + assert receiver1b.args == ['foo', 'bar', 'baz'] + assert receiver2a.args == ['foo', 'bar'] + assert receiver2b.args == ['foo'] + # Install plugin 1 alongside plugin 2. + louie.install_plugin(plugin1) + # Check reception. + louie.send('sig', arg='fob') + assert receiver1a.args == ['foo', 'baz'] + assert receiver1b.args == ['foo', 'bar', 'baz', 'fob'] + assert receiver2a.args == ['foo', 'bar'] + assert receiver2b.args == ['foo'] + + +if qt is not None: + def test_qt_plugin(): + louie.reset() + # Create receivers. + class Receiver(qt.QWidget): + def __init__(self): + qt.QObject.__init__(self) + self.args = [] + def receive(self, arg): + self.args.append(arg) + receiver1 = Receiver() + receiver2 = Receiver() + # Connect signals. + louie.connect(receiver1.receive, 'sig') + louie.connect(receiver2.receive, 'sig') + # Destroy receiver2 so only a shell is left. + receiver2.close(True) + # Check reception without plugins. + louie.send('sig', arg='foo') + assert receiver1.args == ['foo'] + assert receiver2.args == ['foo'] + # Install plugin. + plugin = louie.QtWidgetPlugin() + louie.install_plugin(plugin) + # Check reception with plugins. + louie.send('sig', arg='bar') + assert receiver1.args == ['foo', 'bar'] + assert receiver2.args == ['foo'] + diff --git a/wlauto/external/louie/test/test_prioritydispatcher.py b/wlauto/external/louie/test/test_prioritydispatcher.py new file mode 100644 index 00000000..061ed07b --- /dev/null +++ b/wlauto/external/louie/test/test_prioritydispatcher.py @@ -0,0 +1,41 @@ +import unittest + +import louie +from louie import dispatcher + +class Callable(object): + + def __init__(self, val): + self.val = val + + def __call__(self): + return self.val + + +one = Callable(1) +two = Callable(2) +three = Callable(3) + +class TestPriorityDispatcher(unittest.TestCase): + + def test_ConnectNotify(self): + louie.connect( + two, + 'one', + priority=200 + ) + louie.connect( + one, + 'one', + priority=100 + ) + louie.connect( + three, + 'one', + priority=300 + ) + result = [ i[1] for i in louie.send('one')] + if not result == [1, 2, 3]: + print result + assert(False) + diff --git a/wlauto/external/louie/test/test_prioritylist.py b/wlauto/external/louie/test/test_prioritylist.py new file mode 100644 index 00000000..7dccc7d4 --- /dev/null +++ b/wlauto/external/louie/test/test_prioritylist.py @@ -0,0 +1,62 @@ +import unittest + +import louie.prioritylist +from louie.prioritylist import PriorityList + +#def populate_list(plist): + +class TestPriorityList(unittest.TestCase): + + def test_Insert(self): + pl = PriorityList() + elements = {3: "element 3", + 2: "element 2", + 1: "element 1", + 5: "element 5", + 4: "element 4" + } + for key in elements: + pl.add(elements[key], priority=key) + + match = zip(sorted(elements.values()), pl[:]) + for pair in match: + assert(pair[0]==pair[1]) + + def test_Delete(self): + pl = PriorityList() + elements = {2: "element 3", + 1: "element 2", + 0: "element 1", + 4: "element 5", + 3: "element 4" + } + for key in elements: + pl.add(elements[key], priority=key) + del elements[2] + del pl[2] + match = zip(sorted(elements.values()) , pl[:]) + for pair in match: + assert(pair[0]==pair[1]) + + def test_Multiple(self): + pl = PriorityList() + pl.add('1', 1) + pl.add('2.1', 2) + pl.add('3', 3) + pl.add('2.2', 2) + it = iter(pl) + assert(it.next() == '1') + assert(it.next() == '2.1') + assert(it.next() == '2.2') + assert(it.next() == '3') + + def test_IteratorBreak(self): + pl = PriorityList() + pl.add('1', 1) + pl.add('2.1', 2) + pl.add('3', 3) + pl.add('2.2', 2) + for i in pl: + if i == '2.1': + break + assert(pl.index('3') == 3) diff --git a/wlauto/external/louie/test/test_robustapply.py b/wlauto/external/louie/test/test_robustapply.py new file mode 100644 index 00000000..ce2d9cc6 --- /dev/null +++ b/wlauto/external/louie/test/test_robustapply.py @@ -0,0 +1,34 @@ +import unittest + +from louie.robustapply import robust_apply + + +def no_argument(): + pass + + +def one_argument(blah): + pass + + +def two_arguments(blah, other): + pass + + +class TestRobustApply(unittest.TestCase): + + def test_01(self): + robust_apply(no_argument, no_argument) + + def test_02(self): + self.assertRaises(TypeError, robust_apply, no_argument, no_argument, + 'this' ) + + def test_03(self): + self.assertRaises(TypeError, robust_apply, one_argument, one_argument) + + def test_04(self): + """Raise error on duplication of a particular argument""" + self.assertRaises(TypeError, robust_apply, one_argument, one_argument, + 'this', blah='that') + diff --git a/wlauto/external/louie/test/test_saferef.py b/wlauto/external/louie/test/test_saferef.py new file mode 100644 index 00000000..778c1c6e --- /dev/null +++ b/wlauto/external/louie/test/test_saferef.py @@ -0,0 +1,83 @@ +import unittest + +from louie.saferef import safe_ref + + +class _Sample1(object): + def x(self): + pass + + +def _sample2(obj): + pass + + +class _Sample3(object): + def __call__(self, obj): + pass + + +class TestSaferef(unittest.TestCase): + + # XXX: The original tests had a test for closure, and it had an + # off-by-one problem, perhaps due to scope issues. It has been + # removed from this test suite. + + def setUp(self): + ts = [] + ss = [] + for x in xrange(5000): + t = _Sample1() + ts.append(t) + s = safe_ref(t.x, self._closure) + ss.append(s) + ts.append(_sample2) + ss.append(safe_ref(_sample2, self._closure)) + for x in xrange(30): + t = _Sample3() + ts.append(t) + s = safe_ref(t, self._closure) + ss.append(s) + self.ts = ts + self.ss = ss + self.closure_count = 0 + + def tearDown(self): + if hasattr(self, 'ts'): + del self.ts + if hasattr(self, 'ss'): + del self.ss + + def test_In(self): + """Test the `in` operator for safe references (cmp)""" + for t in self.ts[:50]: + assert safe_ref(t.x) in self.ss + + def test_Valid(self): + """Test that the references are valid (return instance methods)""" + for s in self.ss: + assert s() + + def test_ShortCircuit(self): + """Test that creation short-circuits to reuse existing references""" + sd = {} + for s in self.ss: + sd[s] = 1 + for t in self.ts: + if hasattr(t, 'x'): + assert sd.has_key(safe_ref(t.x)) + else: + assert sd.has_key(safe_ref(t)) + + def test_Representation(self): + """Test that the reference object's representation works + + XXX Doesn't currently check the results, just that no error + is raised + """ + repr(self.ss[-1]) + + def _closure(self, ref): + """Dumb utility mechanism to increment deletion counter""" + self.closure_count += 1 + diff --git a/wlauto/external/louie/version.py b/wlauto/external/louie/version.py new file mode 100644 index 00000000..e3b0f6a7 --- /dev/null +++ b/wlauto/external/louie/version.py @@ -0,0 +1,8 @@ +"""Louie version information.""" + + +NAME = 'Louie' +DESCRIPTION = 'Signal dispatching mechanism' +VERSION = '1.1' + + diff --git a/wlauto/external/pmu_logger/Makefile b/wlauto/external/pmu_logger/Makefile new file mode 100755 index 00000000..ca7b2674 --- /dev/null +++ b/wlauto/external/pmu_logger/Makefile @@ -0,0 +1,7 @@ +# To build the pmu_logger module use the following command line +# make ARCH=arm CROSS_COMPILE=arm-linux-gnueabi- -C ../kernel/out SUBDIRS=$PWD modules +# where +# CROSS_COMPILE - prefix of the arm linux compiler +# -C - location of the configured kernel source tree + +obj-m := pmu_logger.o \ No newline at end of file diff --git a/wlauto/external/pmu_logger/README b/wlauto/external/pmu_logger/README new file mode 100755 index 00000000..9f3952a2 --- /dev/null +++ b/wlauto/external/pmu_logger/README @@ -0,0 +1,35 @@ +The pmu_logger module provides the ability to periodically trace CCI PMU counters. The trace destinations can be ftrace buffer and/or kernel logs. This file gives a quick overview of the funcationality provided by the module and how to use it. + +The pmu_logger module creates a directory in the debugfs filesystem called cci_pmu_logger which can be used to enable/disable the counters and control the events that are counted. + +To configure the events being counted write the corresponding event id to the counter* files. The list of CCI PMU events can be found at http://arminfo.emea.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0470d/CJHICFBF.html. + +The "period_jiffies" can be used to control the periodicity of tracing. It accepts values in kernel jiffies. + +To enable tracing, write a 1 to "control". To disable write another 1 to "control". The files "enable_console" and "enable_ftrace" control where the trace is written to. To check if the counters are currently running or not, you can read the control file. + +The current values of the counters can be read from the "values" file. + +Eg. To trace, A15 and A7 snoop hit rate every 10 jiffies the following command are required - + + +trace-cmd reset + +echo 0x63 > counter0 +echo 0x6A > counter1 +echo 0x83 > counter2 +echo 0x8A > counter3 + +echo 10 > period_jiffies + +trace-cmd start -b 20000 -e "sched:sched_wakeup" + +echo 1 > control + +# perform the activity for which you would like to collect the CCI PMU trace. + +trace-cmd stop && trace-cmd extract + +echo 1 > control + +trace-cmd report trace.dat | grep print # shows the trace of the CCI PMU counters along with the cycle counter values. \ No newline at end of file diff --git a/wlauto/external/pmu_logger/pmu_logger.c b/wlauto/external/pmu_logger/pmu_logger.c new file mode 100755 index 00000000..47497a10 --- /dev/null +++ b/wlauto/external/pmu_logger/pmu_logger.c @@ -0,0 +1,294 @@ +/* Copyright 2013-2015 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + + +/* + * pmu_logger.c - Kernel module to log the CCI PMU counters + */ + +#include +#include +#include +#include +#include +#include + +#define MODULE_NAME "cci_pmu_logger" + +// CCI_BASE needs to be modified to point to the mapped location of CCI in +// memory on your device. +#define CCI_BASE 0x2C090000 // TC2 +//#define CCI_BASE 0x10D20000 +#define CCI_SIZE 0x00010000 + +#define PMCR 0x100 + +#define PMCR_CEN (1 << 0) +#define PMCR_RST (1 << 1) +#define PMCR_CCR (1 << 2) +#define PMCR_CCD (1 << 3) +#define PMCR_EX (1 << 4) +#define PMCR_DP (1 << 5) + +#define CC_BASE 0x9000 +#define PC0_BASE 0xA000 +#define PC1_BASE 0xB000 +#define PC2_BASE 0xC000 +#define PC3_BASE 0xD000 + +#define PC_ESR 0x0 +#define CNT_VALUE 0x4 +#define CNT_CONTROL 0x8 + +#define CNT_ENABLE (1 << 0) + +u32 counter0_event = 0x6A; +u32 counter1_event = 0x63; +u32 counter2_event = 0x8A; +u32 counter3_event = 0x83; + +u32 enable_console = 0; +u32 enable_ftrace = 1; + +void *cci_base = 0; + +static struct dentry *module_debugfs_root; +static int enabled = false; + +u32 delay = 10; //jiffies. This translates to 1 sample every 100 ms +struct timer_list timer; + +static void call_after_delay(void) +{ + timer.expires = jiffies + delay; + add_timer(&timer); +} + + +static void setup_and_call_after_delay(void (*fn)(unsigned long)) +{ + init_timer(&timer); + timer.data = (unsigned long)&timer; + timer.function = fn; + + call_after_delay(); +} + +static void print_counter_configuration(void) +{ + if (enable_ftrace) + trace_printk("Counter_0: %02x Counter_1: %02x Counter_2: %02x Counter_3: %02x\n", \ + counter0_event, counter1_event, counter2_event, counter3_event); + + if (enable_console) + printk("Counter_0: %02x Counter_1: %02x Counter_2: %02x Counter_3: %02x\n", \ + counter0_event, counter1_event, counter2_event, counter3_event); +} + +static void initialize_cci_pmu(void) +{ + u32 val; + + // Select the events counted + iowrite32(counter0_event, cci_base + PC0_BASE + PC_ESR); + iowrite32(counter1_event, cci_base + PC1_BASE + PC_ESR); + iowrite32(counter2_event, cci_base + PC2_BASE + PC_ESR); + iowrite32(counter3_event, cci_base + PC3_BASE + PC_ESR); + + // Enable the individual PMU counters + iowrite32(CNT_ENABLE, cci_base + PC0_BASE + CNT_CONTROL); + iowrite32(CNT_ENABLE, cci_base + PC1_BASE + CNT_CONTROL); + iowrite32(CNT_ENABLE, cci_base + PC2_BASE + CNT_CONTROL); + iowrite32(CNT_ENABLE, cci_base + PC3_BASE + CNT_CONTROL); + iowrite32(CNT_ENABLE, cci_base + CC_BASE + CNT_CONTROL); + + // Reset the counters and configure the Cycle Count Divider + val = ioread32(cci_base + PMCR); + iowrite32(val | PMCR_RST | PMCR_CCR | PMCR_CCD, cci_base + PMCR); +} + +static void enable_cci_pmu_counters(void) +{ + u32 val = ioread32(cci_base + PMCR); + iowrite32(val | PMCR_CEN, cci_base + PMCR); +} + +static void disable_cci_pmu_counters(void) +{ + u32 val = ioread32(cci_base + PMCR); + iowrite32(val & ~PMCR_CEN, cci_base + PMCR); +} + +static void trace_values(unsigned long arg) +{ + u32 cycles; + u32 counter[4]; + + cycles = ioread32(cci_base + CC_BASE + CNT_VALUE); + counter[0] = ioread32(cci_base + PC0_BASE + CNT_VALUE); + counter[1] = ioread32(cci_base + PC1_BASE + CNT_VALUE); + counter[2] = ioread32(cci_base + PC2_BASE + CNT_VALUE); + counter[3] = ioread32(cci_base + PC3_BASE + CNT_VALUE); + + if (enable_ftrace) + trace_printk("Cycles: %08x Counter_0: %08x" + " Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \ + cycles, counter[0], counter[1], counter[2], counter[3]); + + if (enable_console) + printk("Cycles: %08x Counter_0: %08x" + " Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \ + cycles, counter[0], counter[1], counter[2], counter[3]); + + if (enabled) { + u32 val; + // Reset the counters + val = ioread32(cci_base + PMCR); + iowrite32(val | PMCR_RST | PMCR_CCR, cci_base + PMCR); + + call_after_delay(); + } +} + +static ssize_t read_control(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + char status[16]; + /* printk(KERN_DEBUG "%s\n", __func__); */ + + if (enabled) + snprintf(status, 16, "enabled\n"); + else + snprintf(status, 16, "disabled\n"); + + return simple_read_from_buffer(buf, count, ppos, status, strlen(status)); +} + +static ssize_t write_control(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + if (enabled) { + disable_cci_pmu_counters(); + enabled = false; + } else { + initialize_cci_pmu(); + enable_cci_pmu_counters(); + enabled = true; + + print_counter_configuration(); + setup_and_call_after_delay(trace_values); + } + + return count; +} + +static ssize_t read_values(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + char values[256]; + /* u32 val; */ + + snprintf(values, 256, "Cycles: %08x Counter_0: %08x" + " Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \ + ioread32(cci_base + CC_BASE + CNT_VALUE), \ + ioread32(cci_base + PC0_BASE + CNT_VALUE), \ + ioread32(cci_base + PC1_BASE + CNT_VALUE), \ + ioread32(cci_base + PC2_BASE + CNT_VALUE), \ + ioread32(cci_base + PC3_BASE + CNT_VALUE)); + + return simple_read_from_buffer(buf, count, ppos, values, strlen(values)); +} + +static const struct file_operations control_fops = { + .owner = THIS_MODULE, + .read = read_control, + .write = write_control, +}; + +static const struct file_operations value_fops = { + .owner = THIS_MODULE, + .read = read_values, +}; + +static int __init pmu_logger_init(void) +{ + struct dentry *retval; + + module_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL); + if (!module_debugfs_root || IS_ERR(module_debugfs_root)) { + printk(KERN_ERR "error creating debugfs dir.\n"); + goto out; + } + + retval = debugfs_create_file("control", S_IRUGO | S_IWUGO, module_debugfs_root, NULL, &control_fops); + if (!retval) + goto out; + + retval = debugfs_create_file("values", S_IRUGO, module_debugfs_root, NULL, &value_fops); + if (!retval) + goto out; + + retval = debugfs_create_bool("enable_console", S_IRUGO | S_IWUGO, module_debugfs_root, &enable_console); + if (!retval) + goto out; + + retval = debugfs_create_bool("enable_ftrace", S_IRUGO | S_IWUGO, module_debugfs_root, &enable_ftrace); + if (!retval) + goto out; + + retval = debugfs_create_u32("period_jiffies", S_IRUGO | S_IWUGO, module_debugfs_root, &delay); + if (!retval) + goto out; + + retval = debugfs_create_x32("counter0", S_IRUGO | S_IWUGO, module_debugfs_root, &counter0_event); + if (!retval) + goto out; + retval = debugfs_create_x32("counter1", S_IRUGO | S_IWUGO, module_debugfs_root, &counter1_event); + if (!retval) + goto out; + retval = debugfs_create_x32("counter2", S_IRUGO | S_IWUGO, module_debugfs_root, &counter2_event); + if (!retval) + goto out; + retval = debugfs_create_x32("counter3", S_IRUGO | S_IWUGO, module_debugfs_root, &counter3_event); + if (!retval) + goto out; + + cci_base = ioremap(CCI_BASE, CCI_SIZE); + if (!cci_base) + goto out; + + printk(KERN_INFO "CCI PMU Logger loaded.\n"); + return 0; + +out: + debugfs_remove_recursive(module_debugfs_root); + return 1; +} + +static void __exit pmu_logger_exit(void) +{ + if (module_debugfs_root) { + debugfs_remove_recursive(module_debugfs_root); + module_debugfs_root = NULL; + } + if (cci_base) + iounmap(cci_base); + + printk(KERN_INFO "CCI PMU Logger removed.\n"); +} + +module_init(pmu_logger_init); +module_exit(pmu_logger_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Punit Agrawal"); +MODULE_DESCRIPTION("logger for CCI PMU counters"); diff --git a/wlauto/external/pmu_logger/pmu_logger.ko b/wlauto/external/pmu_logger/pmu_logger.ko new file mode 100644 index 00000000..84164383 Binary files /dev/null and b/wlauto/external/pmu_logger/pmu_logger.ko differ diff --git a/wlauto/external/readenergy/Makefile b/wlauto/external/readenergy/Makefile new file mode 100644 index 00000000..76a25594 --- /dev/null +++ b/wlauto/external/readenergy/Makefile @@ -0,0 +1,11 @@ +# To build: +# +# CROSS_COMPILE=aarch64-linux-gnu- make +# +CROSS_COMPILE?=aarch64-linux-gnu- +CC=$(CROSS_COMPILE)gcc +CFLAGS='-Wl,-static -Wl,-lc' + +readenergy: readenergy.c + $(CC) $(CFLAGS) readenergy.c -o readenergy + cp readenergy ../../instrumentation/juno_energy/readenergy diff --git a/wlauto/external/readenergy/readenergy b/wlauto/external/readenergy/readenergy new file mode 100755 index 00000000..c26991c2 Binary files /dev/null and b/wlauto/external/readenergy/readenergy differ diff --git a/wlauto/external/readenergy/readenergy.c b/wlauto/external/readenergy/readenergy.c new file mode 100644 index 00000000..cc945f7f --- /dev/null +++ b/wlauto/external/readenergy/readenergy.c @@ -0,0 +1,345 @@ +/* Copyright 2014-2015 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + + +/* + * readenergy.c + * + * Reads APB energy registers in Juno and outputs the measurements (converted to appropriate units). + * +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// The following values obtained from Juno TRM 2014/03/04 section 4.5 + +// Location of APB registers in memory +#define APB_BASE_MEMORY 0x1C010000 +// APB energy counters start at offset 0xD0 from the base APB address. +#define BASE_INDEX 0xD0 / 4 +// the one-past last APB counter +#define APB_SIZE 0x120 + +// Masks specifying the bits that contain the actual counter values +#define CMASK 0xFFF +#define VMASK 0xFFF +#define PMASK 0xFFFFFF + +// Sclaing factor (divisor) or getting measured values from counters +#define SYS_ADC_CH0_PM1_SYS_SCALE 761 +#define SYS_ADC_CH1_PM2_A57_SCALE 381 +#define SYS_ADC_CH2_PM3_A53_SCALE 761 +#define SYS_ADC_CH3_PM4_GPU_SCALE 381 +#define SYS_ADC_CH4_VSYS_SCALE 1622 +#define SYS_ADC_CH5_VA57_SCALE 1622 +#define SYS_ADC_CH6_VA53_SCALE 1622 +#define SYS_ADC_CH7_VGPU_SCALE 1622 +#define SYS_POW_CH04_SYS_SCALE (SYS_ADC_CH0_PM1_SYS_SCALE * SYS_ADC_CH4_VSYS_SCALE) +#define SYS_POW_CH15_A57_SCALE (SYS_ADC_CH1_PM2_A57_SCALE * SYS_ADC_CH5_VA57_SCALE) +#define SYS_POW_CH26_A53_SCALE (SYS_ADC_CH2_PM3_A53_SCALE * SYS_ADC_CH6_VA53_SCALE) +#define SYS_POW_CH37_GPU_SCALE (SYS_ADC_CH3_PM4_GPU_SCALE * SYS_ADC_CH7_VGPU_SCALE) +#define SYS_ENM_CH0_SYS_SCALE 12348030000 +#define SYS_ENM_CH1_A57_SCALE 6174020000 +#define SYS_ENM_CH0_A53_SCALE 12348030000 +#define SYS_ENM_CH0_GPU_SCALE 6174020000 + +// Original values prior to re-callibrations. +/*#define SYS_ADC_CH0_PM1_SYS_SCALE 819.2*/ +/*#define SYS_ADC_CH1_PM2_A57_SCALE 409.6*/ +/*#define SYS_ADC_CH2_PM3_A53_SCALE 819.2*/ +/*#define SYS_ADC_CH3_PM4_GPU_SCALE 409.6*/ +/*#define SYS_ADC_CH4_VSYS_SCALE 1638.4*/ +/*#define SYS_ADC_CH5_VA57_SCALE 1638.4*/ +/*#define SYS_ADC_CH6_VA53_SCALE 1638.4*/ +/*#define SYS_ADC_CH7_VGPU_SCALE 1638.4*/ +/*#define SYS_POW_CH04_SYS_SCALE (SYS_ADC_CH0_PM1_SYS_SCALE * SYS_ADC_CH4_VSYS_SCALE)*/ +/*#define SYS_POW_CH15_A57_SCALE (SYS_ADC_CH1_PM2_A57_SCALE * SYS_ADC_CH5_VA57_SCALE)*/ +/*#define SYS_POW_CH26_A53_SCALE (SYS_ADC_CH2_PM3_A53_SCALE * SYS_ADC_CH6_VA53_SCALE)*/ +/*#define SYS_POW_CH37_GPU_SCALE (SYS_ADC_CH3_PM4_GPU_SCALE * SYS_ADC_CH7_VGPU_SCALE)*/ +/*#define SYS_ENM_CH0_SYS_SCALE 13421772800.0*/ +/*#define SYS_ENM_CH1_A57_SCALE 6710886400.0*/ +/*#define SYS_ENM_CH0_A53_SCALE 13421772800.0*/ +/*#define SYS_ENM_CH0_GPU_SCALE 6710886400.0*/ + +// Ignore individual errors but if see too many, abort. +#define ERROR_THRESHOLD 10 + +// Default counter poll period (in milliseconds). +#define DEFAULT_PERIOD 100 + +// A single reading from the energy meter. The values are the proper readings converted +// to appropriate units (e.g. Watts for power); they are *not* raw counter values. +struct reading +{ + double sys_adc_ch0_pm1_sys; + double sys_adc_ch1_pm2_a57; + double sys_adc_ch2_pm3_a53; + double sys_adc_ch3_pm4_gpu; + double sys_adc_ch4_vsys; + double sys_adc_ch5_va57; + double sys_adc_ch6_va53; + double sys_adc_ch7_vgpu; + double sys_pow_ch04_sys; + double sys_pow_ch15_a57; + double sys_pow_ch26_a53; + double sys_pow_ch37_gpu; + double sys_enm_ch0_sys; + double sys_enm_ch1_a57; + double sys_enm_ch0_a53; + double sys_enm_ch0_gpu; +}; + +inline uint64_t join_64bit_register(uint32_t *buffer, int index) +{ + uint64_t result = 0; + result |= buffer[index]; + result |= (uint64_t)(buffer[index+1]) << 32; + return result; +} + +int nsleep(const struct timespec *req, struct timespec *rem) +{ + struct timespec temp_rem; + if (nanosleep(req, rem) == -1) + { + if (errno == EINTR) + { + nsleep(rem, &temp_rem); + } + else + { + return errno; + } + } + else + { + return 0; + } +} + +void print_help() +{ + fprintf(stderr, "Usage: readenergy [-t PERIOD] -o OUTFILE\n\n" + "Read Juno energy counters every PERIOD milliseconds, writing them\n" + "to OUTFILE in CSV format until SIGTERM is received.\n\n" + "Parameters:\n" + " PERIOD is the counter poll period in milliseconds.\n" + " (Defaults to 100 milliseconds.)\n" + " OUTFILE is the output file path\n"); +} + +// debugging only... +inline void dprint(char *msg) +{ + fprintf(stderr, "%s\n", msg); + sync(); +} + +// -------------------------------------- config ---------------------------------------------------- + +struct config +{ + struct timespec period; + char *output_file; +}; + +void config_init_period_from_millis(struct config *this, long millis) +{ + this->period.tv_sec = (time_t)(millis / 1000); + this->period.tv_nsec = (millis % 1000) * 1000000; +} + +void config_init(struct config *this, int argc, char *argv[]) +{ + this->output_file = NULL; + config_init_period_from_millis(this, DEFAULT_PERIOD); + + int opt; + while ((opt = getopt(argc, argv, "ht:o:")) != -1) + { + switch(opt) + { + case 't': + config_init_period_from_millis(this, atol(optarg)); + break; + case 'o': + this->output_file = optarg; + break; + case 'h': + print_help(); + exit(EXIT_SUCCESS); + break; + default: + fprintf(stderr, "ERROR: Unexpected option %s\n\n", opt); + print_help(); + exit(EXIT_FAILURE); + } + } + + if (this->output_file == NULL) + { + fprintf(stderr, "ERROR: Mandatory -o option not specified.\n\n"); + print_help(); + exit(EXIT_FAILURE); + } +} + +// -------------------------------------- /config --------------------------------------------------- + +// -------------------------------------- emeter ---------------------------------------------------- + +struct emeter +{ + int fd; + FILE *out; + void *mmap_base; +}; + +void emeter_init(struct emeter *this, char *outfile) +{ + this->out = fopen(outfile, "w"); + if (this->out == NULL) + { + fprintf(stderr, "ERROR: Could not open output file %s; got %s\n", outfile, strerror(errno)); + exit(EXIT_FAILURE); + } + + this->fd = open("/dev/mem", O_RDONLY); + if(this->fd < 0) + { + fprintf(stderr, "ERROR: Can't open /dev/mem; got %s\n", strerror(errno)); + fclose(this->out); + exit(EXIT_FAILURE); + } + + this->mmap_base = mmap(NULL, APB_SIZE, PROT_READ, MAP_SHARED, this->fd, APB_BASE_MEMORY); + if (this->mmap_base == MAP_FAILED) + { + fprintf(stderr, "ERROR: mmap failed; got %s\n", strerror(errno)); + close(this->fd); + fclose(this->out); + exit(EXIT_FAILURE); + } + + fprintf(this->out, "sys_curr,a57_curr,a53_curr,gpu_curr," + "sys_volt,a57_volt,a53_volt,gpu_volt," + "sys_pow,a57_pow,a53_pow,gpu_pow," + "sys_cenr,a57_cenr,a53_cenr,gpu_cenr\n"); +} + +void emeter_read_measurements(struct emeter *this, struct reading *reading) +{ + uint32_t *buffer = (uint32_t *)this->mmap_base; + reading->sys_adc_ch0_pm1_sys = (double)(CMASK & buffer[BASE_INDEX+0]) / SYS_ADC_CH0_PM1_SYS_SCALE; + reading->sys_adc_ch1_pm2_a57 = (double)(CMASK & buffer[BASE_INDEX+1]) / SYS_ADC_CH1_PM2_A57_SCALE; + reading->sys_adc_ch2_pm3_a53 = (double)(CMASK & buffer[BASE_INDEX+2]) / SYS_ADC_CH2_PM3_A53_SCALE; + reading->sys_adc_ch3_pm4_gpu = (double)(CMASK & buffer[BASE_INDEX+3]) / SYS_ADC_CH3_PM4_GPU_SCALE; + reading->sys_adc_ch4_vsys = (double)(VMASK & buffer[BASE_INDEX+4]) / SYS_ADC_CH4_VSYS_SCALE; + reading->sys_adc_ch5_va57 = (double)(VMASK & buffer[BASE_INDEX+5]) / SYS_ADC_CH5_VA57_SCALE; + reading->sys_adc_ch6_va53 = (double)(VMASK & buffer[BASE_INDEX+6]) / SYS_ADC_CH6_VA53_SCALE; + reading->sys_adc_ch7_vgpu = (double)(VMASK & buffer[BASE_INDEX+7]) / SYS_ADC_CH7_VGPU_SCALE; + reading->sys_pow_ch04_sys = (double)(PMASK & buffer[BASE_INDEX+8]) / SYS_POW_CH04_SYS_SCALE; + reading->sys_pow_ch15_a57 = (double)(PMASK & buffer[BASE_INDEX+9]) / SYS_POW_CH15_A57_SCALE; + reading->sys_pow_ch26_a53 = (double)(PMASK & buffer[BASE_INDEX+10]) / SYS_POW_CH26_A53_SCALE; + reading->sys_pow_ch37_gpu = (double)(PMASK & buffer[BASE_INDEX+11]) / SYS_POW_CH37_GPU_SCALE; + reading->sys_enm_ch0_sys = (double)join_64bit_register(buffer, BASE_INDEX+12) / SYS_ENM_CH0_SYS_SCALE; + reading->sys_enm_ch1_a57 = (double)join_64bit_register(buffer, BASE_INDEX+14) / SYS_ENM_CH1_A57_SCALE; + reading->sys_enm_ch0_a53 = (double)join_64bit_register(buffer, BASE_INDEX+16) / SYS_ENM_CH0_A53_SCALE; + reading->sys_enm_ch0_gpu = (double)join_64bit_register(buffer, BASE_INDEX+18) / SYS_ENM_CH0_GPU_SCALE; +} + +void emeter_take_reading(struct emeter *this) +{ + static struct reading reading; + int error_count = 0; + emeter_read_measurements(this, &reading); + int ret = fprintf(this->out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n", + reading.sys_adc_ch0_pm1_sys, + reading.sys_adc_ch1_pm2_a57, + reading.sys_adc_ch2_pm3_a53, + reading.sys_adc_ch3_pm4_gpu, + reading.sys_adc_ch4_vsys, + reading.sys_adc_ch5_va57, + reading.sys_adc_ch6_va53, + reading.sys_adc_ch7_vgpu, + reading.sys_pow_ch04_sys, + reading.sys_pow_ch15_a57, + reading.sys_pow_ch26_a53, + reading.sys_pow_ch37_gpu, + reading.sys_enm_ch0_sys, + reading.sys_enm_ch1_a57, + reading.sys_enm_ch0_a53, + reading.sys_enm_ch0_gpu); + if (ret < 0) + { + fprintf(stderr, "ERROR: while writing a meter reading: %s\n", strerror(errno)); + if (++error_count > ERROR_THRESHOLD) + exit(EXIT_FAILURE); + } +} + +void emeter_finalize(struct emeter *this) +{ + if (munmap(this->mmap_base, APB_SIZE) == -1) + { + // Report the error but don't bother doing anything else, as we're not gonna do + // anything with emeter after this point anyway. + fprintf(stderr, "ERROR: munmap failed; got %s\n", strerror(errno)); + } + close(this->fd); + fclose(this->out); +} + +// -------------------------------------- /emeter ---------------------------------------------------- + +int done = 0; + +void term_handler(int signum) +{ + done = 1; +} + +int main(int argc, char *argv[]) +{ + struct sigaction action; + memset(&action, 0, sizeof(struct sigaction)); + action.sa_handler = term_handler; + sigaction(SIGTERM, &action, NULL); + + struct config config; + struct emeter emeter; + config_init(&config, argc, argv); + emeter_init(&emeter, config.output_file); + + struct timespec remaining; + while (!done) + { + emeter_take_reading(&emeter); + nsleep(&config.period, &remaining); + } + + emeter_finalize(&emeter); + return EXIT_SUCCESS; +} diff --git a/wlauto/external/revent/Makefile b/wlauto/external/revent/Makefile new file mode 100644 index 00000000..dbbfea75 --- /dev/null +++ b/wlauto/external/revent/Makefile @@ -0,0 +1,12 @@ +# CROSS_COMPILE=aarch64-linux-gnu- make +# +CC=gcc +CFLAGS=-static -lc + +revent: revent.c + $(CROSS_COMPILE)$(CC) $(CFLAGS) revent.c -o revent + +clean: + rm -rf revent + +.PHONY: clean diff --git a/wlauto/external/revent/revent.c b/wlauto/external/revent/revent.c new file mode 100644 index 00000000..368e0617 --- /dev/null +++ b/wlauto/external/revent/revent.c @@ -0,0 +1,598 @@ +/* Copyright 2012-2015 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef ANDROID +#include +#endif + + +#define die(args...) do { \ + fprintf(stderr, "ERROR: "); \ + fprintf(stderr, args); \ + exit(EXIT_FAILURE); \ +} while(0) + +#define dprintf(args...) if (verbose) printf(args) + + +#define INPDEV_MAX_DEVICES 16 +#define INPDEV_MAX_PATH 30 + + +#ifndef ANDROID +int strlcpy(char *dest, char *source, size_t size) +{ + strncpy(dest, source, size-1); + dest[size-1] = '\0'; + return size; +} +#endif + +typedef enum { + FALSE=0, + TRUE +} bool_t; + +typedef enum { + RECORD=0, + REPLAY, + DUMP, + INFO, + INVALID +} revent_mode_t; + +typedef struct { + revent_mode_t mode; + int record_time; + int device_number; + char *file; +} revent_args_t; + +typedef struct { + size_t id_pathc; /* Count of total paths so far. */ + char id_pathv[INPDEV_MAX_DEVICES][INPDEV_MAX_PATH]; /* List of paths matching pattern. */ +} inpdev_t; + +typedef struct { + int dev_idx; + struct input_event event; +} replay_event_t; + +typedef struct { + int num_fds; + int num_events; + int *fds; + replay_event_t *events; +} replay_buffer_t; + + +bool_t verbose = FALSE; + + +bool_t is_numeric(char *string) +{ + int len = strlen(string); + + int i = 0; + while(i < len) + { + if(!isdigit(string[i])) + return FALSE; + i++; + } + + return TRUE; +} + +off_t get_file_size(const char *filename) { + struct stat st; + + if (stat(filename, &st) == 0) + return st.st_size; + + die("Cannot determine size of %s: %s\n", filename, strerror(errno)); +} + +int inpdev_init(inpdev_t **inpdev, int devid) +{ + int i; + int fd; + int num_devices; + + *inpdev = malloc(sizeof(inpdev_t)); + (*inpdev)->id_pathc = 0; + + if (devid == -1) { + // device id was not specified so we want to record from all available input devices. + for(i = 0; i < INPDEV_MAX_DEVICES; ++i) + { + sprintf((*inpdev)->id_pathv[(*inpdev)->id_pathc], "/dev/input/event%d", i); + fd = open((*inpdev)->id_pathv[(*inpdev)->id_pathc], O_RDONLY); + if(fd > 0) + { + close(fd); + dprintf("opened %s\n", (*inpdev)->id_pathv[(*inpdev)->id_pathc]); + (*inpdev)->id_pathc++; + } + else + { + dprintf("could not open %s\n", (*inpdev)->id_pathv[(*inpdev)->id_pathc]); + } + } + } + else { + // device id was specified so record just that device. + sprintf((*inpdev)->id_pathv[0], "/dev/input/event%d", devid); + fd = open((*inpdev)->id_pathv[0], O_RDONLY); + if(fd > 0) + { + close(fd); + dprintf("opened %s\n", (*inpdev)->id_pathv[0]); + (*inpdev)->id_pathc++; + } + else + { + die("could not open %s\n", (*inpdev)->id_pathv[0]); + } + } + + return 0; +} + +int inpdev_close(inpdev_t *inpdev) +{ + free(inpdev); + return 0; +} + +void printDevProperties(const char* aDev) +{ + int fd = -1; + char name[256]= "Unknown"; + if ((fd = open(aDev, O_RDONLY)) < 0) + die("could not open %s\n", aDev); + + if(ioctl(fd, EVIOCGNAME(sizeof(name)), name) < 0) + die("evdev ioctl failed on %s\n", aDev); + + printf("The device on %s says its name is %s\n", + aDev, name); + close(fd); +} + +void dump(const char *logfile) +{ + int fdin = open(logfile, O_RDONLY); + if (fdin < 0) die("Could not open eventlog %s\n", logfile); + + int nfds; + size_t rb = read(fdin, &nfds, sizeof(nfds)); + if (rb != sizeof(nfds)) die("problems reading eventlog\n"); + int *fds = malloc(sizeof(int)*nfds); + if (!fds) die("out of memory\n"); + + int len; + int i; + char buf[INPDEV_MAX_PATH]; + + inpdev_t *inpdev = malloc(sizeof(inpdev_t)); + inpdev->id_pathc = 0; + for (i=0; iid_pathv[inpdev->id_pathc], buf, INPDEV_MAX_PATH); + inpdev->id_pathv[inpdev->id_pathc][INPDEV_MAX_PATH-1] = '\0'; + inpdev->id_pathc++; + } + + struct input_event ev; + int count = 0; + while(1) { + int idx; + rb = read(fdin, &idx, sizeof(idx)); + if (rb != sizeof(idx)) break; + rb = read(fdin, &ev, sizeof(ev)); + if (rb < (int)sizeof(ev)) break; + + printf("%10u.%-6u %30s type %2d code %3d value %4d\n", + (unsigned int)ev.time.tv_sec, (unsigned int)ev.time.tv_usec, + inpdev->id_pathv[idx], ev.type, ev.code, ev.value); + count++; + } + + printf("\nTotal: %d events\n", count); + close(fdin); + free(inpdev); +} + +int replay_buffer_init(replay_buffer_t **buffer, const char *logfile) +{ + *buffer = malloc(sizeof(replay_buffer_t)); + replay_buffer_t *buff = *buffer; + off_t fsize = get_file_size(logfile); + buff->events = (replay_event_t *)malloc((size_t)fsize); + if (!buff->events) + die("out of memory\n"); + + int fdin = open(logfile, O_RDONLY); + if (fdin < 0) + die("Could not open eventlog %s\n", logfile); + + size_t rb = read(fdin, &(buff->num_fds), sizeof(buff->num_fds)); + if (rb!=sizeof(buff->num_fds)) + die("problems reading eventlog\n"); + + buff->fds = malloc(sizeof(int) * buff->num_fds); + if (!buff->fds) + die("out of memory\n"); + + int len, i; + char path_buff[256]; // should be more than enough + for (i = 0; i < buff->num_fds; i++) { + memset(path_buff, 0, sizeof(path_buff)); + rb = read(fdin, &len, sizeof(len)); + if (rb!=sizeof(len)) + die("problems reading eventlog\n"); + + rb = read(fdin, &path_buff[0], len); + if (rb != len) + die("problems reading eventlog\n"); + + buff->fds[i] = open(path_buff, O_WRONLY | O_NDELAY); + if (buff->fds[i] < 0) + die("could not open device file %s\n", path_buff); + } + + struct timeval start_time; + replay_event_t rep_ev; + buff->num_events = 0; + while(1) { + int idx; + rb = read(fdin, &rep_ev, sizeof(rep_ev)); + if (rb < (int)sizeof(rep_ev)) + break; + + if (buff->num_events == 0) { + start_time = rep_ev.event.time; + } + timersub(&(rep_ev.event.time), &start_time, &(rep_ev.event.time)); + memcpy(&(buff->events[buff->num_events]), &rep_ev, sizeof(rep_ev)); + buff->num_events++; + } + close(fdin); + return 0; +} + +int replay_buffer_close(replay_buffer_t *buff) +{ + free(buff->fds); + free(buff->events); + free(buff); + return 0; +} + +int replay_buffer_play(replay_buffer_t *buff) +{ + int i = 0, rb; + struct timeval start_time, now, desired_time, last_event_delta, delta; + memset(&last_event_delta, 0, sizeof(struct timeval)); + gettimeofday(&start_time, NULL); + + while (i < buff->num_events) { + gettimeofday(&now, NULL); + timeradd(&start_time, &last_event_delta, &desired_time); + + if (timercmp(&desired_time, &now, >)) { + timersub(&desired_time, &now, &delta); + useconds_t d = (useconds_t)delta.tv_sec * 1000000 + delta.tv_usec; + dprintf("now %u.%u desiredtime %u.%u sleeping %u uS\n", + (unsigned int)now.tv_sec, (unsigned int)now.tv_usec, + (unsigned int)desired_time.tv_sec, (unsigned int)desired_time.tv_usec, d); + usleep(d); + } + + int idx = (buff->events[i]).dev_idx; + struct input_event ev = (buff->events[i]).event; + while((i < buff->num_events) && !timercmp(&ev.time, &last_event_delta, !=)) { + rb = write(buff->fds[idx], &ev, sizeof(ev)); + if (rb!=sizeof(ev)) + die("problems writing\n"); + dprintf("replayed event: type %d code %d value %d\n", ev.type, ev.code, ev.value); + + i++; + idx = (buff->events[i]).dev_idx; + ev = (buff->events[i]).event; + } + last_event_delta = ev.time; + } +} + +void replay(const char *logfile) +{ + replay_buffer_t *replay_buffer; + replay_buffer_init(&replay_buffer, logfile); +#ifdef ANDROID + __android_log_write(ANDROID_LOG_INFO, "REVENT", "Replay starting"); +#endif + replay_buffer_play(replay_buffer); +#ifdef ANDROID + __android_log_write(ANDROID_LOG_INFO, "REVENT", "Replay complete"); +#endif + replay_buffer_close(replay_buffer); +} + +void record(inpdev_t *inpdev, int delay, const char *logfile) +{ + fd_set readfds; + FILE* fdout; + struct input_event ev; + int i; + int maxfd = 0; + int keydev=0; + + int* fds = malloc(sizeof(int)*inpdev->id_pathc); + if (!fds) die("out of memory\n"); + + fdout = fopen(logfile, "wb"); + if (!fdout) die("Could not open eventlog %s\n", logfile); + + fwrite(&inpdev->id_pathc, sizeof(inpdev->id_pathc), 1, fdout); + for (i=0; iid_pathc; i++) { + int len = strlen(inpdev->id_pathv[i]); + fwrite(&len, sizeof(len), 1, fdout); + fwrite(inpdev->id_pathv[i], len, 1, fdout); + } + + for (i=0; i < inpdev->id_pathc; i++) + { + fds[i] = open(inpdev->id_pathv[i], O_RDONLY); + if (fds[i]>maxfd) maxfd = fds[i]; + dprintf("opened %s with %d\n", inpdev->id_pathv[i], fds[i]); + if (fds[i]<0) die("could not open \%s\n", inpdev->id_pathv[i]); + } + + int count =0; + struct timeval tout; + while(1) + { + FD_ZERO(&readfds); + FD_SET(STDIN_FILENO, &readfds); + for (i=0; i < inpdev->id_pathc; i++) + FD_SET(fds[i], &readfds); + /* wait for input */ + tout.tv_sec = delay; + tout.tv_usec = 0; + int r = select(maxfd+1, &readfds, NULL, NULL, &tout); + /* dprintf("got %d (err %d)\n", r, errno); */ + if (!r) break; + if (FD_ISSET(STDIN_FILENO, &readfds)) { + // in this case the key down for the return key will be recorded + // so we need to up the key up + memset(&ev, 0, sizeof(ev)); + ev.type = EV_KEY; + ev.code = KEY_ENTER; + ev.value = 0; + gettimeofday(&ev.time, NULL); + fwrite(&keydev, sizeof(keydev), 1, fdout); + fwrite(&ev, sizeof(ev), 1, fdout); + memset(&ev, 0, sizeof(ev)); // SYN + gettimeofday(&ev.time, NULL); + fwrite(&keydev, sizeof(keydev), 1, fdout); + fwrite(&ev, sizeof(ev), 1, fdout); + dprintf("added fake return exiting...\n"); + break; + } + + for (i=0; i < inpdev->id_pathc; i++) + { + if (FD_ISSET(fds[i], &readfds)) + { + dprintf("Got event from %s\n", inpdev->id_pathv[i]); + memset(&ev, 0, sizeof(ev)); + size_t rb = read(fds[i], (void*) &ev, sizeof(ev)); + dprintf("%d event: type %d code %d value %d\n", + (unsigned int)rb, ev.type, ev.code, ev.value); + if (ev.type == EV_KEY && ev.code == KEY_ENTER && ev.value == 1) + keydev = i; + fwrite(&i, sizeof(i), 1, fdout); + fwrite(&ev, sizeof(ev), 1, fdout); + count++; + } + } + } + + for (i=0; i < inpdev->id_pathc; i++) + { + close(fds[i]); + } + + fclose(fdout); + free(fds); + dprintf("Recorded %d events\n", count); +} + + +void usage() +{ + printf("usage:\n revent [-h] [-v] COMMAND [OPTIONS] \n" + "\n" + " Options:\n" + " -h print this help message and quit.\n" + " -v enable verbose output.\n" + "\n" + " Commands:\n" + " record [-t SECONDS] [-d DEVICE] FILE\n" + " Record input event. stops after return on STDIN (or, optionally, \n" + " a fixed delay)\n" + "\n" + " FILE file into which events will be recorded.\n" + " -t SECONDS time, in seconds, for which to record events.\n" + " if not specifed, recording will continue until\n" + " return key is pressed.\n" + " -d DEVICE the number of the input device form which\n" + " events will be recoreded. If not specified, \n" + " all available inputs will be used.\n" + "\n" + " replay FILE\n" + " replays previously recorded events from the specified file.\n" + "\n" + " FILE file into which events will be recorded.\n" + "\n" + " dump FILE\n" + " dumps the contents of the specified event log to STDOUT in\n" + " human-readable form.\n" + "\n" + " FILE event log which will be dumped.\n" + "\n" + " info\n" + " shows info about each event char device\n" + "\n" + ); +} + +void revent_args_init(revent_args_t **rargs, int argc, char** argv) +{ + *rargs = malloc(sizeof(revent_args_t)); + revent_args_t *revent_args = *rargs; + revent_args->mode = INVALID; + revent_args->record_time = INT_MAX; + revent_args->device_number = -1; + revent_args->file = NULL; + + int opt; + while ((opt = getopt(argc, argv, "ht:d:v")) != -1) + { + switch (opt) { + case 'h': + usage(); + exit(0); + break; + case 't': + if (is_numeric(optarg)) { + revent_args->record_time = atoi(optarg); + dprintf("timeout: %d\n", revent_args->record_time); + } else { + die("-t parameter must be numeric; got %s.\n", optarg); + } + break; + case 'd': + if (is_numeric(optarg)) { + revent_args->device_number = atoi(optarg); + dprintf("device: %d\n", revent_args->device_number); + } else { + die("-d parameter must be numeric; got %s.\n", optarg); + } + break; + case 'v': + verbose = TRUE; + break; + default: + die("Unexpected option: %c", opt); + } + } + + int next_arg = optind; + if (next_arg == argc) { + usage(); + die("Must specify a command.\n"); + } + if (!strcmp(argv[next_arg], "record")) + revent_args->mode = RECORD; + else if (!strcmp(argv[next_arg], "replay")) + revent_args->mode = REPLAY; + else if (!strcmp(argv[next_arg], "dump")) + revent_args->mode = DUMP; + else if (!strcmp(argv[next_arg], "info")) + revent_args->mode = INFO; + else { + usage(); + die("Unknown command -- %s\n", argv[next_arg]); + } + next_arg++; + + if (next_arg != argc) { + revent_args->file = argv[next_arg]; + dprintf("file: %s\n", revent_args->file); + next_arg++; + if (next_arg != argc) { + die("Trailling arguments (use -h for help).\n"); + } + } + + if ((revent_args->mode != RECORD) && (revent_args->record_time != INT_MAX)) { + die("-t parameter is only valid for \"record\" command.\n"); + } + if ((revent_args->mode != RECORD) && (revent_args->device_number != -1)) { + die("-d parameter is only valid for \"record\" command.\n"); + } + if ((revent_args->mode == INFO) && (revent_args->file != NULL)) { + die("File path cannot be specified for \"info\" command.\n"); + } + if (((revent_args->mode == RECORD) || (revent_args->mode == REPLAY)) && (revent_args->file == NULL)) { + die("Must specify a file for recording/replaying (use -h for help).\n"); + } +} + +int revent_args_close(revent_args_t *rargs) +{ + free(rargs); + return 0; +} + +int main(int argc, char** argv) +{ + int i; + char *logfile = NULL; + + revent_args_t *rargs; + revent_args_init(&rargs, argc, argv); + + inpdev_t *inpdev; + inpdev_init(&inpdev, rargs->device_number); + + switch(rargs->mode) { + case RECORD: + record(inpdev, rargs->record_time, rargs->file); + break; + case REPLAY: + replay(rargs->file); + break; + case DUMP: + dump(rargs->file); + break; + case INFO: + for (i = 0; i < inpdev->id_pathc; i++) { + printDevProperties(inpdev->id_pathv[i]); + } + }; + + inpdev_close(inpdev); + revent_args_close(rargs); + return 0; +} + diff --git a/wlauto/external/terminalsize.py b/wlauto/external/terminalsize.py new file mode 100644 index 00000000..32231020 --- /dev/null +++ b/wlauto/external/terminalsize.py @@ -0,0 +1,92 @@ +# Taken from +# https://gist.github.com/jtriley/1108174 +import os +import shlex +import struct +import platform +import subprocess + + +def get_terminal_size(): + """ getTerminalSize() + - get width and height of console + - works on linux,os x,windows,cygwin(windows) + originally retrieved from: + http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python + """ + current_os = platform.system() + tuple_xy = None + if current_os == 'Windows': + tuple_xy = _get_terminal_size_windows() + if tuple_xy is None: + tuple_xy = _get_terminal_size_tput() + # needed for window's python in cygwin's xterm! + if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'): + tuple_xy = _get_terminal_size_linux() + if tuple_xy is None: + print "default" + tuple_xy = (80, 25) # default value + return tuple_xy + + +def _get_terminal_size_windows(): + try: + from ctypes import windll, create_string_buffer + # stdin handle is -10 + # stdout handle is -11 + # stderr handle is -12 + h = windll.kernel32.GetStdHandle(-12) + csbi = create_string_buffer(22) + res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi) + if res: + (bufx, bufy, curx, cury, wattr, + left, top, right, bottom, + maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw) + sizex = right - left + 1 + sizey = bottom - top + 1 + return sizex, sizey + except: + pass + + +def _get_terminal_size_tput(): + # get terminal width + # src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window + try: + cols = int(subprocess.check_call(shlex.split('tput cols'))) + rows = int(subprocess.check_call(shlex.split('tput lines'))) + return (cols, rows) + except: + pass + + +def _get_terminal_size_linux(): + def ioctl_GWINSZ(fd): + try: + import fcntl + import termios + cr = struct.unpack('hh', + fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) + return cr + except: + pass + cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) + if not cr: + try: + fd = os.open(os.ctermid(), os.O_RDONLY) + cr = ioctl_GWINSZ(fd) + os.close(fd) + except: + pass + if not cr: + try: + cr = (os.environ['LINES'], os.environ['COLUMNS']) + except: + return None + return int(cr[1]), int(cr[0]) + + +if __name__ == "__main__": + sizex, sizey = get_terminal_size() + print 'width =', sizex, 'height =', sizey + diff --git a/wlauto/external/uiauto/build.sh b/wlauto/external/uiauto/build.sh new file mode 100755 index 00000000..96b8b7f2 --- /dev/null +++ b/wlauto/external/uiauto/build.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +ant build + +cp bin/classes/com/arm/wlauto/uiauto/BaseUiAutomation.class ../../common diff --git a/wlauto/external/uiauto/build.xml b/wlauto/external/uiauto/build.xml new file mode 100644 index 00000000..478a86cc --- /dev/null +++ b/wlauto/external/uiauto/build.xml @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/wlauto/external/uiauto/project.properties b/wlauto/external/uiauto/project.properties new file mode 100644 index 00000000..a3ee5ab6 --- /dev/null +++ b/wlauto/external/uiauto/project.properties @@ -0,0 +1,14 @@ +# This file is automatically generated by Android Tools. +# Do not modify this file -- YOUR CHANGES WILL BE ERASED! +# +# This file must be checked in Version Control Systems. +# +# To customize properties used by the Ant build system edit +# "ant.properties", and override values to adapt the script to your +# project structure. +# +# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home): +#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt + +# Project target. +target=android-17 diff --git a/wlauto/external/uiauto/src/com/arm/wlauto/uiauto/BaseUiAutomation.java b/wlauto/external/uiauto/src/com/arm/wlauto/uiauto/BaseUiAutomation.java new file mode 100644 index 00000000..4d26100b --- /dev/null +++ b/wlauto/external/uiauto/src/com/arm/wlauto/uiauto/BaseUiAutomation.java @@ -0,0 +1,113 @@ +/* Copyright 2013-2015 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + + +package com.arm.wlauto.uiauto; + +import java.io.File; +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.util.concurrent.TimeoutException; + +import android.app.Activity; +import android.os.Bundle; + +// Import the uiautomator libraries +import com.android.uiautomator.core.UiObject; +import com.android.uiautomator.core.UiObjectNotFoundException; +import com.android.uiautomator.core.UiScrollable; +import com.android.uiautomator.core.UiSelector; +import com.android.uiautomator.testrunner.UiAutomatorTestCase; + +public class BaseUiAutomation extends UiAutomatorTestCase { + + + public void sleep(int second) { + super.sleep(second * 1000); + } + + public boolean takeScreenshot(String name) { + Bundle params = getParams(); + String png_dir = params.getString("workdir"); + + try { + return getUiDevice().takeScreenshot(new File(png_dir, name + ".png")); + } catch(NoSuchMethodError e) { + return true; + } + } + + public void waitText(String text) throws UiObjectNotFoundException { + waitText(text, 600); + } + + public void waitText(String text, int second) throws UiObjectNotFoundException { + UiSelector selector = new UiSelector(); + UiObject text_obj = new UiObject(selector.text(text) + .className("android.widget.TextView")); + waitObject(text_obj, second); + } + + public void waitObject(UiObject obj) throws UiObjectNotFoundException { + waitObject(obj, 600); + } + + public void waitObject(UiObject obj, int second) throws UiObjectNotFoundException { + if (! obj.waitForExists(second * 1000)){ + throw new UiObjectNotFoundException("UiObject is not found: " + + obj.getSelector().toString()); + } + } + + public boolean waitUntilNoObject(UiObject obj, int second) { + return obj.waitUntilGone(second * 1000); + } + + public void clearLogcat() throws Exception { + Runtime.getRuntime().exec("logcat -c"); + } + + public void waitForLogcatText(String searchText, long timeout) throws Exception { + long startTime = System.currentTimeMillis(); + Process process = Runtime.getRuntime().exec("logcat"); + BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); + String line; + + long currentTime = System.currentTimeMillis(); + boolean found = false; + while ((currentTime - startTime) < timeout){ + sleep(2); // poll every two seconds + + while((line=reader.readLine())!=null) { + if (line.contains(searchText)) { + found = true; + break; + } + } + + if (found) { + break; + } + currentTime = System.currentTimeMillis(); + } + + process.destroy(); + + if ((currentTime - startTime) >= timeout) { + throw new TimeoutException("Timed out waiting for Logcat text \"%s\"".format(searchText)); + } + } +} + diff --git a/wlauto/instrumentation/__init__.py b/wlauto/instrumentation/__init__.py new file mode 100644 index 00000000..094b8fa6 --- /dev/null +++ b/wlauto/instrumentation/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from wlauto.core import instrumentation + + +def instrument_is_installed(instrument): + """Returns ``True`` if the specified instrument is installed, and ``False`` + other wise. The insturment maybe specified either as a name or a subclass (or + instance of subclass) of :class:`wlauto.core.Instrument`.""" + return instrumentation.is_installed(instrument) + + +def clear_instrumentation(): + instrumentation.installed = [] diff --git a/wlauto/instrumentation/coreutil/__init__.py b/wlauto/instrumentation/coreutil/__init__.py new file mode 100644 index 00000000..e63f8c3e --- /dev/null +++ b/wlauto/instrumentation/coreutil/__init__.py @@ -0,0 +1,278 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import os +import sys +import re +import time +import shutil +import logging +import threading +import subprocess +import tempfile +import csv + +from wlauto import Instrument, Parameter +from wlauto.core.execution import ExecutionContext +from wlauto.exceptions import InstrumentError, WorkerThreadError +from wlauto.core import signal + + +class CoreUtilization(Instrument): + + name = 'coreutil' + description = """ + Measures CPU core activity during workload execution in terms of the percentage of time a number + of cores were utilized above the specfied threshold. + + This workload generates ``coreutil.csv`` report in the workload's output directory. The report is + formatted as follows:: + + round(float(self.threshold), 2): + count = count + 1 + self.output[count] += 1 + if self.cpu_util[0]: + scale_factor = round((float(1) / len(self.cpu_util[0])) * 100, 6) + else: + scale_factor = 0 + for i in xrange(len(self.output)): + self.output[i] = self.output[i] * scale_factor + with open(self.outfile, 'a+') as tem: + writer = csv.writer(tem) + reader = csv.reader(tem) + if sum(1 for row in reader) == 0: + row = ['workload', 'iteration', ''``, where + 'pnum' is the number of the port. + :daq_device_id: The ID under which the DAQ is registered with the driver. + Defaults to ``'Dev1'``. + :daq_v_range: Specifies the voltage range for the SOC voltage channel on the DAQ + (please refer to :ref:`daq_setup` for details). Defaults to ``2.5``. + :daq_dv_range: Specifies the voltage range for the resistor voltage channel on + the DAQ (please refer to :ref:`daq_setup` for details). + Defaults to ``0.2``. + :daq_sampling_rate: DAQ sampling rate. DAQ will take this many samples each + second. Please note that this maybe limitted by your DAQ model + and then number of ports you're measuring (again, see + :ref:`daq_setup`). Defaults to ``10000``. + :daq_channel_map: Represents mapping from logical AI channel number to physical + connector on the DAQ (varies between DAQ models). The default + assumes DAQ 6363 and similar with AI channels on connectors + 0-7 and 16-23. + + """ + + parameters = [ + Parameter('server_host', kind=str, default='localhost', + description='The host address of the machine that runs the daq Server which the ' + 'insturment communicates with.'), + Parameter('server_port', kind=int, default=56788, + description='The port number for daq Server in which daq insturment communicates ' + 'with.'), + Parameter('device_id', kind=str, default='Dev1', + description='The ID under which the DAQ is registered with the driver.'), + Parameter('v_range', kind=float, default=2.5, + description='Specifies the voltage range for the SOC voltage channel on the DAQ ' + '(please refer to :ref:`daq_setup` for details).'), + Parameter('dv_range', kind=float, default=0.2, + description='Specifies the voltage range for the resistor voltage channel on ' + 'the DAQ (please refer to :ref:`daq_setup` for details).'), + Parameter('sampling_rate', kind=int, default=10000, + description='DAQ sampling rate. DAQ will take this many samples each ' + 'second. Please note that this maybe limitted by your DAQ model ' + 'and then number of ports you\'re measuring (again, see ' + ':ref:`daq_setup`)'), + Parameter('resistor_values', kind=list, mandatory=True, + description='The values of resistors (in Ohms) across which the voltages are measured on ' + 'each port.'), + Parameter('channel_map', kind=list_of_ints, default=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23), + description='Represents mapping from logical AI channel number to physical ' + 'connector on the DAQ (varies between DAQ models). The default ' + 'assumes DAQ 6363 and similar with AI channels on connectors ' + '0-7 and 16-23.'), + Parameter('labels', kind=list_of_strs, + description='List of port labels. If specified, the lenght of the list must match ' + 'the length of ``resistor_values``. Defaults to "PORT_", where ' + '"pnum" is the number of the port.') + ] + + def initialize(self, context): + devices = self._execute_command('list_devices') + if not devices: + raise InstrumentError('DAQ: server did not report any devices registered with the driver.') + self._results = OrderedDict() + + def setup(self, context): + self.logger.debug('Initialising session.') + self._execute_command('configure', config=self.device_config) + + def slow_start(self, context): + self.logger.debug('Starting collecting measurements.') + self._execute_command('start') + + def slow_stop(self, context): + self.logger.debug('Stopping collecting measurements.') + self._execute_command('stop') + + def update_result(self, context): # pylint: disable=R0914 + self.logger.debug('Downloading data files.') + output_directory = _d(os.path.join(context.output_directory, 'daq')) + self._execute_command('get_data', output_directory=output_directory) + for entry in os.listdir(output_directory): + context.add_iteration_artifact('DAQ_{}'.format(os.path.splitext(entry)[0]), + path=os.path.join('daq', entry), + kind='data', + description='DAQ power measurments.') + port = os.path.splitext(entry)[0] + path = os.path.join(output_directory, entry) + key = (context.spec.id, context.workload.name, context.current_iteration) + if key not in self._results: + self._results[key] = {} + with open(path) as fh: + reader = csv.reader(fh) + metrics = reader.next() + data = [map(float, d) for d in zip(*list(reader))] + n = len(data[0]) + means = [s / n for s in map(sum, data)] + for metric, value in zip(metrics, means): + metric_name = '{}_{}'.format(port, metric) + context.result.add_metric(metric_name, round(value, 3), UNITS[metric]) + self._results[key][metric_name] = round(value, 3) + + def teardown(self, context): + self.logger.debug('Terminating session.') + self._execute_command('close') + + def validate(self): + if not daq: + raise ImportError(import_error_mesg) + self._results = None + if self.labels: + if not (len(self.labels) == len(self.resistor_values)): # pylint: disable=superfluous-parens + raise ConfigError('Number of DAQ port labels does not match the number of resistor values.') + else: + self.labels = ['PORT_{}'.format(i) for i, _ in enumerate(self.resistor_values)] + self.server_config = ServerConfiguration(host=self.server_host, + port=self.server_port) + self.device_config = DeviceConfiguration(device_id=self.device_id, + v_range=self.v_range, + dv_range=self.dv_range, + sampling_rate=self.sampling_rate, + resistor_values=self.resistor_values, + channel_map=self.channel_map, + labels=self.labels) + try: + self.server_config.validate() + self.device_config.validate() + except ConfigurationError, ex: + raise ConfigError('DAQ configuration: ' + ex.message) # Re-raise as a WA error + + def before_overall_results_processing(self, context): + if self._results: + headers = ['id', 'workload', 'iteration'] + metrics = sorted(self._results.iteritems().next()[1].keys()) + headers += metrics + rows = [headers] + for key, value in self._results.iteritems(): + rows.append(list(key) + [value[m] for m in metrics]) + + outfile = os.path.join(context.output_directory, 'daq_power.csv') + with open(outfile, 'wb') as fh: + writer = csv.writer(fh) + writer.writerows(rows) + + def _execute_command(self, command, **kwargs): + # pylint: disable=E1101 + result = daq.execute_command(self.server_config, command, **kwargs) + if result.status == daq.Status.OK: + pass # all good + elif result.status == daq.Status.OKISH: + self.logger.debug(result.message) + elif result.status == daq.Status.ERROR: + raise InstrumentError('DAQ: {}'.format(result.message)) + else: + raise InstrumentError('DAQ: Unexpected result: {} - {}'.format(result.status, result.message)) + return result.data diff --git a/wlauto/instrumentation/delay/__init__.py b/wlauto/instrumentation/delay/__init__.py new file mode 100644 index 00000000..e942520e --- /dev/null +++ b/wlauto/instrumentation/delay/__init__.py @@ -0,0 +1,181 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +#pylint: disable=W0613,E1101,E0203,W0201 +import time + +from wlauto import Instrument, Parameter +from wlauto.exceptions import ConfigError, InstrumentError +from wlauto.utils.types import boolean + + +class DelayInstrument(Instrument): + + name = 'delay' + description = """ + This instrument introduces a delay before executing either an iteration + or all iterations for a spec. + + The delay may be specified as either a fixed period or a temperature + threshold that must be reached. + + Optionally, if an active cooling solution is employed to speed up temperature drop between + runs, it may be controlled using this instrument. + + """ + + parameters = [ + Parameter('temperature_file', default='/sys/devices/virtual/thermal/thermal_zone0/temp', + global_alias='thermal_temp_file', + description="""Full path to the sysfile on the device that contains the device's + temperature."""), + Parameter('temperature_timeout', kind=int, default=600, + global_alias='thermal_timeout', + description=""" + The timeout after which the instrument will stop waiting even if the specified threshold + temperature is not reached. If this timeout is hit, then a warning will be logged stating + the actual temperature at which the timeout has ended. + """), + Parameter('temperature_poll_period', kind=int, default=5, + global_alias='thermal_sleep_time', + description="""How long to sleep (in seconds) between polling current device temperature."""), + Parameter('temperature_between_specs', kind=int, default=None, + global_alias='thermal_threshold_between_specs', + description=""" + Temperature (in device-specific units) the device must cool down to before + the iteration spec will be run. + + .. note:: This cannot be specified at the same time as ``fixed_between_specs`` + + """), + Parameter('temperature_between_iterations', kind=int, default=None, + global_alias='thermal_threshold_between_iterations', + description=""" + Temperature (in device-specific units) the device must cool down to before + the next spec will be run. + + .. note:: This cannot be specified at the same time as ``fixed_between_iterations`` + + """), + Parameter('temperature_before_start', kind=int, default=None, + global_alias='thermal_threshold_before_start', + description=""" + Temperature (in device-specific units) the device must cool down to just before + the actual workload execution (after setup has been performed). + + .. note:: This cannot be specified at the same time as ``fixed_between_iterations`` + + """), + Parameter('fixed_between_specs', kind=int, default=None, + global_alias='fixed_delay_between_specs', + description=""" + How long to sleep (in seconds) after all iterations for a workload spec have + executed. + + .. note:: This cannot be specified at the same time as ``temperature_between_specs`` + + """), + Parameter('fixed_between_iterations', kind=int, default=None, + global_alias='fixed_delay_between_iterations', + description=""" + How long to sleep (in seconds) after each iterations for a workload spec has + executed. + + .. note:: This cannot be specified at the same time as ``temperature_between_iterations`` + + """), + Parameter('active_cooling', kind=boolean, default=False, + global_alias='thermal_active_cooling', + description=""" + This instrument supports an active cooling solution while waiting for the device temperature + to drop to the threshold. The solution involves an mbed controlling a fan. The mbed is signaled + over a serial port. If this solution is present in the setup, this should be set to ``True``. + """), + ] + + def initialize(self, context): + if self.temperature_between_iterations == 0: + temp = self.device.get_sysfile_value(self.temperature_file, int) + self.logger.debug('Setting temperature threshold between iterations to {}'.format(temp)) + self.temperature_between_iterations = temp + if self.temperature_between_specs == 0: + temp = self.device.get_sysfile_value(self.temperature_file, int) + self.logger.debug('Setting temperature threshold between workload specs to {}'.format(temp)) + self.temperature_between_specs = temp + + def slow_on_iteration_start(self, context): + if self.active_cooling: + self.device.stop_active_cooling() + if self.fixed_between_iterations: + self.logger.debug('Waiting for a fixed period after iteration...') + time.sleep(self.fixed_between_iterations) + elif self.temperature_between_iterations: + self.logger.debug('Waiting for temperature drop before iteration...') + self.wait_for_temperature(self.temperature_between_iterations) + + def slow_on_spec_start(self, context): + if self.active_cooling: + self.device.stop_active_cooling() + if self.fixed_between_specs: + self.logger.debug('Waiting for a fixed period after spec execution...') + time.sleep(self.fixed_between_specs) + elif self.temperature_between_specs: + self.logger.debug('Waiting for temperature drop before spec execution...') + self.wait_for_temperature(self.temperature_between_specs) + + def very_slow_start(self, context): + if self.active_cooling: + self.device.stop_active_cooling() + if self.temperature_before_start: + self.logger.debug('Waiting for temperature drop before commencing execution...') + self.wait_for_temperature(self.temperature_before_start) + + def wait_for_temperature(self, temperature): + if self.active_cooling: + self.device.start_active_cooling() + self.do_wait_for_temperature(temperature) + self.device.stop_active_cooling() + else: + self.do_wait_for_temperature(temperature) + + def do_wait_for_temperature(self, temperature): + reading = self.device.get_sysfile_value(self.temperature_file, int) + waiting_start_time = time.time() + while reading > temperature: + self.logger.debug('Device temperature: {}'.format(reading)) + if time.time() - waiting_start_time > self.temperature_timeout: + self.logger.warning('Reached timeout; current temperature: {}'.format(reading)) + break + time.sleep(self.temperature_poll_period) + reading = self.device.get_sysfile_value(self.temperature_file, int) + + def validate(self): + if (self.temperature_between_specs is not None and + self.fixed_between_specs is not None): + raise ConfigError('Both fixed delay and thermal threshold specified for specs.') + + if (self.temperature_between_iterations is not None and + self.fixed_between_iterations is not None): + raise ConfigError('Both fixed delay and thermal threshold specified for iterations.') + + if not any([self.temperature_between_specs, self.fixed_between_specs, self.temperature_before_start, + self.temperature_between_iterations, self.fixed_between_iterations]): + raise ConfigError('delay instrument is enabled, but no delay is specified.') + + if self.active_cooling and not self.device.has('active_cooling'): + message = 'Your device does not support active cooling. Did you configure it with an approprite module?' + raise InstrumentError(message) + diff --git a/wlauto/instrumentation/dmesg/__init__.py b/wlauto/instrumentation/dmesg/__init__.py new file mode 100644 index 00000000..2603d8a4 --- /dev/null +++ b/wlauto/instrumentation/dmesg/__init__.py @@ -0,0 +1,62 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import os + +from wlauto import Instrument, Parameter +from wlauto.utils.misc import ensure_file_directory_exists as _f + + +class DmesgInstrument(Instrument): + # pylint: disable=no-member,attribute-defined-outside-init + """ + Collected dmesg output before and during the run. + + """ + + name = 'dmesg' + + parameters = [ + Parameter('loglevel', kind=int, allowed_values=range(8), + description='Set loglevel for console output.') + ] + + loglevel_file = '/proc/sys/kernel/printk' + + def setup(self, context): + if self.loglevel: + self.old_loglevel = self.device.get_sysfile_value(self.loglevel_file) + self.device.set_sysfile_value(self.loglevel_file, self.loglevel, verify=False) + self.before_file = _f(os.path.join(context.output_directory, 'dmesg', 'before')) + self.after_file = _f(os.path.join(context.output_directory, 'dmesg', 'after')) + + def slow_start(self, context): + with open(self.before_file, 'w') as wfh: + wfh.write(self.device.execute('dmesg')) + context.add_artifact('dmesg_before', self.before_file, kind='data') + if self.device.is_rooted: + self.device.execute('dmesg -c', as_root=True) + + def slow_stop(self, context): + with open(self.after_file, 'w') as wfh: + wfh.write(self.device.execute('dmesg')) + context.add_artifact('dmesg_after', self.after_file, kind='data') + + def teardown(self, context): # pylint: disable=unused-argument + if self.loglevel: + self.device.set_sysfile_value(self.loglevel_file, self.old_loglevel, verify=False) + + diff --git a/wlauto/instrumentation/energy_probe/__init__.py b/wlauto/instrumentation/energy_probe/__init__.py new file mode 100644 index 00000000..2a5466c8 --- /dev/null +++ b/wlauto/instrumentation/energy_probe/__init__.py @@ -0,0 +1,145 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init +import os +import subprocess +import signal +import struct +import csv +try: + import pandas +except ImportError: + pandas = None + +from wlauto import Instrument, Parameter, Executable +from wlauto.exceptions import InstrumentError, ConfigError +from wlauto.utils.types import list_of_numbers + + +class EnergyProbe(Instrument): + + name = 'energy_probe' + description = """Collects power traces using the ARM energy probe. + + This instrument requires ``caiman`` utility to be installed in the workload automation + host and be in the PATH. Caiman is part of DS-5 and should be in ``/path/to/DS-5/bin/`` . + Energy probe can simultaneously collect energy from up to 3 power rails. + + To connect the energy probe on a rail, connect the white wire to the pin that is closer to the + Voltage source and the black wire to the pin that is closer to the load (the SoC or the device + you are probing). Between the pins there should be a shunt resistor of known resistance in the + range of 5 to 20 mOhm. The resistance of the shunt resistors is a mandatory parameter + ``resistor_values``. + + .. note:: This instrument can process results a lot faster if python pandas is installed. + """ + + parameters = [ + Parameter('resistor_values', kind=list_of_numbers, default=[], + description="""The value of shunt resistors. This is a mandatory parameter."""), + Parameter('labels', kind=list, default=[], + description="""Meaningful labels for each of the monitored rails."""), + ] + + MAX_CHANNELS = 3 + + def __init__(self, device, **kwargs): + super(EnergyProbe, self).__init__(device, **kwargs) + self.attributes_per_sample = 3 + self.bytes_per_sample = self.attributes_per_sample * 4 + self.attributes = ['power', 'voltage', 'current'] + for i, val in enumerate(self.resistor_values): + self.resistor_values[i] = int(1000 * float(val)) + + def validate(self): + if subprocess.call('which caiman', stdout=subprocess.PIPE, shell=True): + raise InstrumentError('caiman not in PATH. Cannot enable energy probe') + if not self.resistor_values: + raise ConfigError('At least one resistor value must be specified') + if len(self.resistor_values) > self.MAX_CHANNELS: + raise ConfigError('{} Channels where specified when Energy Probe supports up to {}' + .format(len(self.resistor_values), self.MAX_CHANNELS)) + if pandas is None: + self.logger.warning("pandas package will significantly speed up this instrument") + self.logger.warning("to install it try: pip install pandas") + + def setup(self, context): + if not self.labels: + self.labels = ["PORT_{}".format(channel) for channel, _ in enumerate(self.resistor_values)] + self.output_directory = os.path.join(context.output_directory, 'energy_probe') + rstring = "" + for i, rval in enumerate(self.resistor_values): + rstring += '-r {}:{} '.format(i, rval) + self.command = 'caiman -l {} {}'.format(rstring, self.output_directory) + os.makedirs(self.output_directory) + + def start(self, context): + self.logger.debug(self.command) + self.caiman = subprocess.Popen(self.command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + preexec_fn=os.setpgrp, + shell=True) + + def stop(self, context): + os.killpg(self.caiman.pid, signal.SIGTERM) + + def update_result(self, context): # pylint: disable=too-many-locals + num_of_channels = len(self.resistor_values) + processed_data = [[] for _ in xrange(num_of_channels)] + filenames = [os.path.join(self.output_directory, '{}.csv'.format(label)) for label in self.labels] + struct_format = '{}I'.format(num_of_channels * self.attributes_per_sample) + not_a_full_row_seen = False + with open(os.path.join(self.output_directory, "0000000000"), "rb") as bfile: + while True: + data = bfile.read(num_of_channels * self.bytes_per_sample) + if data == '': + break + try: + unpacked_data = struct.unpack(struct_format, data) + except struct.error: + if not_a_full_row_seen: + self.logger.warn('possibly missaligned caiman raw data, row contained {} bytes'.format(len(data))) + continue + else: + not_a_full_row_seen = True + for i in xrange(num_of_channels): + index = i * self.attributes_per_sample + processed_data[i].append({attr: val for attr, val in + zip(self.attributes, unpacked_data[index:index + self.attributes_per_sample])}) + for i, path in enumerate(filenames): + with open(path, 'w') as f: + if pandas is not None: + self._pandas_produce_csv(processed_data[i], f) + else: + self._slow_produce_csv(processed_data[i], f) + + # pylint: disable=R0201 + def _pandas_produce_csv(self, data, f): + dframe = pandas.DataFrame(data) + dframe = dframe / 1000.0 + dframe.to_csv(f) + + def _slow_produce_csv(self, data, f): + new_data = [] + for entry in data: + new_data.append({key: val / 1000.0 for key, val in entry.items()}) + writer = csv.DictWriter(f, self.attributes) + writer.writeheader() + writer.writerows(new_data) + diff --git a/wlauto/instrumentation/fps/__init__.py b/wlauto/instrumentation/fps/__init__.py new file mode 100644 index 00000000..ecdd1bb6 --- /dev/null +++ b/wlauto/instrumentation/fps/__init__.py @@ -0,0 +1,298 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=W0613,E1101 +from __future__ import division +import os +import sys +import time +import csv +import shutil +import threading +import errno +import tempfile + +from distutils.version import LooseVersion + + +from wlauto import Instrument, Parameter, IterationResult +from wlauto.instrumentation import instrument_is_installed +from wlauto.exceptions import (InstrumentError, WorkerThreadError, ConfigError, + DeviceNotRespondingError, TimeoutError) +from wlauto.utils.types import boolean, numeric + +try: + import pandas as pd +except ImportError: + pd = None + + +VSYNC_INTERVAL = 16666667 +EPSYLON = 0.0001 + + +class FpsInstrument(Instrument): + + name = 'fps' + description = """ + Measures Frames Per Second (FPS) and associated metrics for a workload's main View. + + .. note:: This instrument depends on pandas Python library (which is not part of standard + WA dependencies), so you will need to install that first, before you can use it. + + The view is specified by the workload as ``view`` attribute. This defaults + to ``'SurfaceView'`` for game workloads, and ``None`` for non-game + workloads (as for them FPS mesurement usually doesn't make sense). + Individual workloads may override this. + + This instrument adds four metrics to the results: + + :FPS: Frames Per Second. This is the frame rate of the workload. + :frames: The total number of frames rendered during the execution of + the workload. + :janks: The number of "janks" that occured during execution of the + workload. Janks are sudden shifts in frame rate. They result + in a "stuttery" UI. See http://jankfree.org/jank-busters-io + :not_at_vsync: The number of frames that did not render in a single + vsync cycle. + + """ + + parameters = [ + Parameter('drop_threshold', kind=numeric, default=5, + description='Data points below this FPS will be dropped as they ' + 'do not constitute "real" gameplay. The assumption ' + 'being that while actually running, the FPS in the ' + 'game will not drop below X frames per second, ' + 'except on loading screens, menus, etc, which ' + 'should not contribute to FPS calculation. '), + Parameter('keep_raw', kind=boolean, default=False, + description='If set to True, this will keep the raw dumpsys output ' + 'in the results directory (this is maily used for debugging) ' + 'Note: frames.csv with collected frames data will always be ' + 'generated regardless of this setting.'), + Parameter('crash_check', kind=boolean, default=True, + description=""" + Specifies wither the instrument should check for crashed content by examining + frame data. If this is set, ``execution_time`` instrument must also be installed. + The check is performed by using the measured FPS and exection time to estimate the expected + frames cound and comparing that against the measured frames count. The the ratio of + measured/expected is too low, then it is assumed that the content has crashed part way + during the run. What is "too low" is determined by ``crash_threshold``. + + .. note:: This is not 100\% fool-proof. If the crash occurs sufficiently close to + workload's termination, it may not be detected. If this is expected, the + threshold may be adjusted up to compensate. + """), + Parameter('crash_threshold', kind=float, default=0.7, + description=""" + Specifies the threshold used to decided whether a measured/expected frames ration indicates + a content crash. E.g. a value of ``0.75`` means the number of actual frames counted is a + quarter lower than expected, it will treated as a content crash. + """), + ] + + clear_command = 'dumpsys SurfaceFlinger --latency-clear ' + + def __init__(self, device, **kwargs): + super(FpsInstrument, self).__init__(device, **kwargs) + self.collector = None + self.outfile = None + self.is_enabled = True + + def validate(self): + if not pd or LooseVersion(pd.__version__) < LooseVersion('0.13.1'): + message = ('fps instrument requires pandas Python package (version 0.13.1 or higher) to be installed.\n' + 'You can install it with pip, e.g. "sudo pip install pandas"') + raise InstrumentError(message) + if self.crash_check and not instrument_is_installed('execution_time'): + raise ConfigError('execution_time instrument must be installed in order to check for content crash.') + + def setup(self, context): + workload = context.workload + if hasattr(workload, 'view'): + self.outfile = os.path.join(context.output_directory, 'frames.csv') + self.collector = LatencyCollector(self.outfile, self.device, workload.view or '', self.keep_raw, self.logger) + self.device.execute(self.clear_command) + else: + self.logger.debug('Workload does not contain a view; disabling...') + self.is_enabled = False + + def start(self, context): + if self.is_enabled: + self.logger.debug('Starting SurfaceFlinger collection...') + self.collector.start() + + def stop(self, context): + if self.is_enabled and self.collector.is_alive(): + self.logger.debug('Stopping SurfaceFlinger collection...') + self.collector.stop() + + def update_result(self, context): + if self.is_enabled: + data = pd.read_csv(self.outfile) + if not data.empty: # pylint: disable=maybe-no-member + self._update_stats(context, data) + else: + context.result.add_metric('FPS', float('nan')) + context.result.add_metric('frame_count', 0) + context.result.add_metric('janks', 0) + context.result.add_metric('not_at_vsync', 0) + + def slow_update_result(self, context): + result = context.result + if result.has_metric('execution_time'): + self.logger.debug('Checking for crashed content.') + exec_time = result['execution_time'].value + fps = result['FPS'].value + frames = result['frame_count'].value + if all([exec_time, fps, frames]): + expected_frames = fps * exec_time + ratio = frames / expected_frames + self.logger.debug('actual/expected frames: {:.2}'.format(ratio)) + if ratio < self.crash_threshold: + self.logger.error('Content for {} appears to have crashed.'.format(context.spec.label)) + result.status = IterationResult.FAILED + result.add_event('Content crash detected (actual/expected frames: {:.2}).'.format(ratio)) + + def _update_stats(self, context, data): + vsync_interval = self.collector.refresh_period + actual_present_time_deltas = (data.actual_present_time - data.actual_present_time.shift()).drop(0) # pylint: disable=E1103 + vsyncs_to_compose = (actual_present_time_deltas / vsync_interval).apply(lambda x: int(round(x, 0))) + # drop values lower than drop_threshold FPS as real in-game frame + # rate is unlikely to drop below that (except on loading screens + # etc, which should not be factored in frame rate calculation). + keep_filter = (1.0 / (vsyncs_to_compose * (vsync_interval / 1e9))) > self.drop_threshold + filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter] + if not filtered_vsyncs_to_compose.empty: + total_vsyncs = filtered_vsyncs_to_compose.sum() + if total_vsyncs: + frame_count = filtered_vsyncs_to_compose.size + fps = 1e9 * frame_count / (vsync_interval * total_vsyncs) + context.result.add_metric('FPS', fps) + context.result.add_metric('frame_count', frame_count) + else: + context.result.add_metric('FPS', float('nan')) + context.result.add_metric('frame_count', 0) + + vtc_deltas = filtered_vsyncs_to_compose - filtered_vsyncs_to_compose.shift() + vtc_deltas.index = range(0, vtc_deltas.size) + vtc_deltas = vtc_deltas.drop(0).abs() + janks = vtc_deltas.apply(lambda x: (x > EPSYLON) and 1 or 0).sum() + not_at_vsync = vsyncs_to_compose.apply(lambda x: (abs(x - 1.0) > EPSYLON) and 1 or 0).sum() + context.result.add_metric('janks', janks) + context.result.add_metric('not_at_vsync', not_at_vsync) + else: # no filtered_vsyncs_to_compose + context.result.add_metric('FPS', float('nan')) + context.result.add_metric('frame_count', 0) + context.result.add_metric('janks', 0) + context.result.add_metric('not_at_vsync', 0) + + +class LatencyCollector(threading.Thread): + + # Note: the size of the frames buffer for a particular surface is defined + # by NUM_FRAME_RECORDS inside android/services/surfaceflinger/FrameTracker.h. + # At the time of writing, this was hard-coded to 128. So at 60 fps + # (and there is no reason to go above that, as it matches vsync rate + # on pretty much all phones), there is just over 2 seconds' worth of + # frames in there. Hence the sleep time of 2 seconds between dumps. + #command_template = 'while (true); do dumpsys SurfaceFlinger --latency {}; sleep 2; done' + command_template = 'dumpsys SurfaceFlinger --latency {}' + + def __init__(self, outfile, device, activity, keep_raw, logger): + super(LatencyCollector, self).__init__() + self.outfile = outfile + self.device = device + self.command = self.command_template.format(activity) + self.keep_raw = keep_raw + self.logger = logger + self.stop_signal = threading.Event() + self.frames = [] + self.last_ready_time = 0 + self.refresh_period = VSYNC_INTERVAL + self.drop_threshold = self.refresh_period * 1000 + self.exc = None + self.unresponsive_count = 0 + + def run(self): + try: + self.logger.debug('SurfaceFlinger collection started.') + self.stop_signal.clear() + fd, temp_file = tempfile.mkstemp() + self.logger.debug('temp file: {}'.format(temp_file)) + wfh = os.fdopen(fd, 'wb') + try: + while not self.stop_signal.is_set(): + wfh.write(self.device.execute(self.command)) + time.sleep(2) + finally: + wfh.close() + # TODO: this can happen after the run during results processing + with open(temp_file) as fh: + text = fh.read().replace('\r\n', '\n').replace('\r', '\n') + for line in text.split('\n'): + line = line.strip() + if line: + self._process_trace_line(line) + if self.keep_raw: + raw_file = os.path.join(os.path.dirname(self.outfile), 'surfaceflinger.raw') + shutil.copy(temp_file, raw_file) + os.unlink(temp_file) + except (DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703 + raise + except Exception, e: # pylint: disable=W0703 + self.logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e)) + self.exc = WorkerThreadError(self.name, sys.exc_info()) + self.logger.debug('SurfaceFlinger collection stopped.') + + with open(self.outfile, 'w') as wfh: + writer = csv.writer(wfh) + writer.writerow(['desired_present_time', 'actual_present_time', 'frame_ready_time']) + writer.writerows(self.frames) + self.logger.debug('Frames data written.') + + def stop(self): + self.stop_signal.set() + self.join() + if self.unresponsive_count: + message = 'SurfaceFlinger was unrepsonsive {} times.'.format(self.unresponsive_count) + if self.unresponsive_count > 10: + self.logger.warning(message) + else: + self.logger.debug(message) + if self.exc: + raise self.exc # pylint: disable=E0702 + self.logger.debug('FSP collection complete.') + + def _process_trace_line(self, line): + parts = line.split() + if len(parts) == 3: + desired_present_time, actual_present_time, frame_ready_time = map(int, parts) + if frame_ready_time <= self.last_ready_time: + return # duplicate frame + if (frame_ready_time - desired_present_time) > self.drop_threshold: + self.logger.debug('Dropping bogus frame {}.'.format(line)) + return # bogus data + self.last_ready_time = frame_ready_time + self.frames.append((desired_present_time, actual_present_time, frame_ready_time)) + elif len(parts) == 1: + self.refresh_period = int(parts[0]) + self.drop_threshold = self.refresh_period * 10 + elif 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line: + self.unresponsive_count += 1 + else: + self.logger.warning('Unexpected SurfaceFlinger dump output: {}'.format(line)) diff --git a/wlauto/instrumentation/hwmon/__init__.py b/wlauto/instrumentation/hwmon/__init__.py new file mode 100644 index 00000000..598564f0 --- /dev/null +++ b/wlauto/instrumentation/hwmon/__init__.py @@ -0,0 +1,120 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=W0613,E1101 +from __future__ import division +from collections import OrderedDict + +from wlauto import Parameter, Instrument +from wlauto.exceptions import InstrumentError, ConfigError +from wlauto.utils.hwmon import discover_sensors +from wlauto.utils.types import list_of_strs + + +# sensor_kind: (report_type, units, conversion) +HWMON_SENSORS = { + 'energy': ('diff', 'Joules', lambda x: x / 10 ** 6), + 'temp': ('before/after', 'Celsius', lambda x: x / 10 ** 3), +} + +HWMON_SENSOR_PRIORITIES = ['energy', 'temp'] + + +class HwmonInstrument(Instrument): + + name = 'hwmon' + description = """ + Hardware Monitor (hwmon) is a generic Linux kernel subsystem, + providing access to hardware monitoring components like temperature or + voltage/current sensors. + + The following web page has more information: + + http://blogs.arm.com/software-enablement/925-linux-hwmon-power-management-and-arm-ds-5-streamline/ + + You can specify which sensors HwmonInstrument looks for by specifying + hwmon_sensors in your config.py, e.g. :: + + hwmon_sensors = ['energy', 'temp'] + + If this setting is not specified, it will look for all sensors it knows about. + Current valid values are:: + + :energy: Collect energy measurements and report energy consumed + during run execution (the diff of before and after readings) + in Joules. + :temp: Collect temperature measurements and report the before and + after readings in degrees Celsius. + + """ + + parameters = [ + Parameter('sensors', kind=list_of_strs, default=['energy', 'temp'], + description='The kinds of sensors hwmon instrument will look for') + ] + + def __init__(self, device, **kwargs): + super(HwmonInstrument, self).__init__(device, **kwargs) + + if self.sensors: + self.sensor_kinds = {} + for kind in self.sensors: + if kind in HWMON_SENSORS: + self.sensor_kinds[kind] = HWMON_SENSORS[kind] + else: + message = 'Unexpected sensor type: {}; must be in {}'.format(kind, HWMON_SENSORS.keys()) + raise ConfigError(message) + else: + self.sensor_kinds = HWMON_SENSORS + + self.sensors = [] + + def setup(self, context): + self.sensors = [] + self.logger.debug('Searching for HWMON sensors.') + discovered_sensors = discover_sensors(self.device, self.sensor_kinds.keys()) + for sensor in sorted(discovered_sensors, key=lambda s: HWMON_SENSOR_PRIORITIES.index(s.kind)): + self.logger.debug('Adding {}'.format(sensor.filepath)) + self.sensors.append(sensor) + for sensor in self.sensors: + sensor.clear_readings() + + def fast_start(self, context): + for sensor in reversed(self.sensors): + sensor.take_reading() + + def fast_stop(self, context): + for sensor in self.sensors: + sensor.take_reading() + + def update_result(self, context): + for sensor in self.sensors: + try: + report_type, units, conversion = HWMON_SENSORS[sensor.kind] + if report_type == 'diff': + before, after = sensor.readings + diff = conversion(after - before) + context.result.add_metric(sensor.label, diff, units) + elif report_type == 'before/after': + before, after = sensor.readings + context.result.add_metric(sensor.label + ' before', conversion(before), units) + context.result.add_metric(sensor.label + ' after', conversion(after), units) + else: + raise InstrumentError('Unexpected report_type: {}'.format(report_type)) + except ValueError, e: + self.logger.error('Could not collect all {} readings for {}'.format(sensor.kind, sensor.label)) + self.logger.error('Got: {}'.format(e)) + diff --git a/wlauto/instrumentation/juno_energy/__init__.py b/wlauto/instrumentation/juno_energy/__init__.py new file mode 100644 index 00000000..4c1a4a4c --- /dev/null +++ b/wlauto/instrumentation/juno_energy/__init__.py @@ -0,0 +1,77 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=W0613,W0201 +import os +import csv +import time +import threading +import logging +from operator import itemgetter + +from wlauto import Instrument, File, Parameter +from wlauto.exceptions import InstrumentError + + +class JunoEnergy(Instrument): + + name = 'juno_energy' + description = """ + Collects internal energy meter measurements from Juno development board. + + This instrument was created because (at the time of creation) Juno's energy + meter measurements aren't exposed through HWMON or similar standardized mechanism, + necessitating a dedicated instrument to access them. + + This instrument, and the ``readenergy`` executable it relies on are very much tied + to the Juno platform and are not expected to work on other boards. + + """ + + parameters = [ + Parameter('period', kind=float, default=0.1, + description='Specifies the time, in Seconds, between polling energy counters.'), + ] + + def on_run_init(self, context): + local_file = context.resolver.get(File(self, 'readenergy')) + self.device.killall('readenergy', as_root=True) + self.readenergy = self.device.install(local_file) + + def setup(self, context): + self.host_output_file = os.path.join(context.output_directory, 'energy.csv') + self.device_output_file = self.device.path.join(self.device.working_directory, 'energy.csv') + self.command = '{} -o {}'.format(self.readenergy, self.device_output_file) + self.device.killall('readenergy', as_root=True) + + def start(self, context): + self.device.kick_off(self.command) + + def stop(self, context): + self.device.killall('readenergy', signal='TERM', as_root=True) + + def update_result(self, context): + self.device.pull_file(self.device_output_file, self.host_output_file) + context.add_artifact('junoenergy', self.host_output_file, 'data') + + def teardown(self, conetext): + self.device.delete_file(self.device_output_file) + + def validate(self): + if self.device.name.lower() != 'juno': + message = 'juno_energy instrument is only supported on juno devices; found {}' + raise InstrumentError(message.format(self.device.name)) + diff --git a/wlauto/instrumentation/juno_energy/readenergy b/wlauto/instrumentation/juno_energy/readenergy new file mode 100755 index 00000000..c26991c2 Binary files /dev/null and b/wlauto/instrumentation/juno_energy/readenergy differ diff --git a/wlauto/instrumentation/misc/__init__.py b/wlauto/instrumentation/misc/__init__.py new file mode 100644 index 00000000..6fc55de9 --- /dev/null +++ b/wlauto/instrumentation/misc/__init__.py @@ -0,0 +1,365 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=W0613,no-member,attribute-defined-outside-init +""" + +Some "standard" instruments to collect additional info about workload execution. + +.. note:: The run() method of a Workload may perform some "boilerplate" as well as + the actual execution of the workload (e.g. it may contain UI automation + needed to start the workload). This "boilerplate" execution will also + be measured by these instruments. As such, they are not suitable for collected + precise data about specific operations. +""" +import os +import re +import logging +import time +import tarfile +from itertools import izip, izip_longest +from subprocess import CalledProcessError + +from wlauto import Instrument, Parameter +from wlauto.core import signal +from wlauto.exceptions import DeviceError +from wlauto.utils.misc import diff_tokens, write_table, check_output, as_relative +from wlauto.utils.misc import ensure_file_directory_exists as _f +from wlauto.utils.misc import ensure_directory_exists as _d +from wlauto.utils.android import ApkInfo +from wlauto.utils.types import list_of_strings + + +logger = logging.getLogger(__name__) + + +class SysfsExtractor(Instrument): + + name = 'sysfs_extractor' + description = """ + Collects the contest of a set of directories, before and after workload execution + and diffs the result. + + """ + + mount_command = 'mount -t tmpfs -o size={} tmpfs {}' + extract_timeout = 30 + tarname = 'sysfs.tar.gz' + + parameters = [ + Parameter('paths', kind=list_of_strings, mandatory=True, + description="""A list of paths to be pulled from the device. These could be directories + as well as files.""", + global_alias='sysfs_extract_dirs'), + Parameter('tmpfs_mount_point', default=None, + description="""Mount point for tmpfs partition used to store snapshots of paths."""), + Parameter('tmpfs_size', default='32m', + description="""Size of the tempfs partition."""), + ] + + def initialize(self, context): + if self.device.is_rooted: + self.on_device_before = self.device.path.join(self.tmpfs_mount_point, 'before') + self.on_device_after = self.device.path.join(self.tmpfs_mount_point, 'after') + + if not self.device.file_exists(self.tmpfs_mount_point): + self.device.execute('mkdir -p {}'.format(self.tmpfs_mount_point), as_root=True) + self.device.execute(self.mount_command.format(self.tmpfs_size, self.tmpfs_mount_point), + as_root=True) + + def setup(self, context): + self.before_dirs = [ + _d(os.path.join(context.output_directory, 'before', self._local_dir(d))) + for d in self.paths + ] + self.after_dirs = [ + _d(os.path.join(context.output_directory, 'after', self._local_dir(d))) + for d in self.paths + ] + self.diff_dirs = [ + _d(os.path.join(context.output_directory, 'diff', self._local_dir(d))) + for d in self.paths + ] + + if self.device.is_rooted: + for d in self.paths: + before_dir = self.device.path.join(self.on_device_before, + self.device.path.dirname(as_relative(d))) + after_dir = self.device.path.join(self.on_device_after, + self.device.path.dirname(as_relative(d))) + if self.device.file_exists(before_dir): + self.device.execute('rm -rf {}'.format(before_dir), as_root=True) + self.device.execute('mkdir -p {}'.format(before_dir), as_root=True) + if self.device.file_exists(after_dir): + self.device.execute('rm -rf {}'.format(after_dir), as_root=True) + self.device.execute('mkdir -p {}'.format(after_dir), as_root=True) + + def slow_start(self, context): + if self.device.is_rooted: + for d in self.paths: + dest_dir = self.device.path.join(self.on_device_before, as_relative(d)) + if '*' in dest_dir: + dest_dir = self.device.path.dirname(dest_dir) + self.device.execute('busybox cp -Hr {} {}'.format(d, dest_dir), + as_root=True, check_exit_code=False) + else: # not rooted + for dev_dir, before_dir in zip(self.paths, self.before_dirs): + self.device.pull_file(dev_dir, before_dir) + + def slow_stop(self, context): + if self.device.is_rooted: + for d in self.paths: + dest_dir = self.device.path.join(self.on_device_after, as_relative(d)) + if '*' in dest_dir: + dest_dir = self.device.path.dirname(dest_dir) + self.device.execute('busybox cp -Hr {} {}'.format(d, dest_dir), + as_root=True, check_exit_code=False) + else: # not rooted + for dev_dir, after_dir in zip(self.paths, self.after_dirs): + self.device.pull_file(dev_dir, after_dir) + + def update_result(self, context): + if self.device.is_rooted: + on_device_tarball = self.device.path.join(self.device.working_directory, self.tarname) + on_host_tarball = self.device.path.join(context.output_directory, self.tarname) + self.device.execute('busybox tar czf {} -C {} .'.format(on_device_tarball, self.tmpfs_mount_point), + as_root=True) + self.device.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True) + self.device.pull_file(on_device_tarball, on_host_tarball) + with tarfile.open(on_host_tarball, 'r:gz') as tf: + tf.extractall(context.output_directory) + self.device.delete_file(on_device_tarball) + os.remove(on_host_tarball) + + for after_dir in self.after_dirs: + if not os.listdir(after_dir): + self.logger.error('sysfs files were not pulled from the device.') + return + for diff_dir, before_dir, after_dir in zip(self.diff_dirs, self.before_dirs, self.after_dirs): + _diff_sysfs_dirs(before_dir, after_dir, diff_dir) + + def teardown(self, context): + self._one_time_setup_done = [] + + def finalize(self, context): + if self.device.is_rooted: + try: + self.device.execute('umount {}'.format(self.tmpfs_mount_point), as_root=True) + except (DeviceError, CalledProcessError): + # assume a directory but not mount point + pass + self.device.execute('rm -rf {}'.format(self.tmpfs_mount_point), as_root=True) + + def validate(self): + if not self.tmpfs_mount_point: # pylint: disable=access-member-before-definition + self.tmpfs_mount_point = self.device.path.join(self.device.working_directory, 'temp-fs') + + def _local_dir(self, directory): + return os.path.dirname(as_relative(directory).replace(self.device.path.sep, os.sep)) + + +class ExecutionTimeInstrument(Instrument): + + name = 'execution_time' + description = """ + Measure how long it took to execute the run() methods of a Workload. + + """ + + priority = 15 + + def __init__(self, device, **kwargs): + super(ExecutionTimeInstrument, self).__init__(device, **kwargs) + self.start_time = None + self.end_time = None + + def on_run_start(self, context): + signal.connect(self.get_start_time, signal.BEFORE_WORKLOAD_EXECUTION, priority=self.priority) + signal.connect(self.get_stop_time, signal.AFTER_WORKLOAD_EXECUTION, priority=self.priority) + + def get_start_time(self, context): + self.start_time = time.time() + + def get_stop_time(self, context): + self.end_time = time.time() + + def update_result(self, context): + execution_time = self.end_time - self.start_time + context.result.add_metric('execution_time', execution_time, 'seconds') + + +class ApkVersion(Instrument): + + name = 'apk_version' + description = """ + Extracts APK versions for workloads that have them. + + """ + + def __init__(self, device, **kwargs): + super(ApkVersion, self).__init__(device, **kwargs) + self.apk_info = None + + def setup(self, context): + if hasattr(context.workload, 'apk_file'): + self.apk_info = ApkInfo(context.workload.apk_file) + else: + self.apk_info = None + + def update_result(self, context): + if self.apk_info: + context.result.add_metric(self.name, self.apk_info.version_name) + + +class InterruptStatsInstrument(Instrument): + + name = 'interrupts' + description = """ + Pulls the ``/proc/interrupts`` file before and after workload execution and diffs them + to show what interrupts occurred during that time. + + """ + + def __init__(self, device, **kwargs): + super(InterruptStatsInstrument, self).__init__(device, **kwargs) + self.before_file = None + self.after_file = None + self.diff_file = None + + def setup(self, context): + self.before_file = os.path.join(context.output_directory, 'before', 'proc', 'interrupts') + self.after_file = os.path.join(context.output_directory, 'after', 'proc', 'interrupts') + self.diff_file = os.path.join(context.output_directory, 'diff', 'proc', 'interrupts') + + def start(self, context): + with open(_f(self.before_file), 'w') as wfh: + wfh.write(self.device.execute('cat /proc/interrupts')) + + def stop(self, context): + with open(_f(self.after_file), 'w') as wfh: + wfh.write(self.device.execute('cat /proc/interrupts')) + + def update_result(self, context): + # If workload execution failed, the after_file may not have been created. + if os.path.isfile(self.after_file): + _diff_interrupt_files(self.before_file, self.after_file, _f(self.diff_file)) + + +class DynamicFrequencyInstrument(SysfsExtractor): + + name = 'cpufreq' + description = """ + Collects dynamic frequency (DVFS) settings before and after workload execution. + + """ + + tarname = 'cpufreq.tar.gz' + + parameters = [ + Parameter('paths', mandatory=False, override=True), + ] + + def setup(self, context): + self.paths = ['/sys/devices/system/cpu'] + if self.device.is_rooted: + self.paths.append('/sys/class/devfreq/*') # the '*' would cause problems for adb pull. + super(DynamicFrequencyInstrument, self).setup(context) + + def validate(self): + # temp-fs would have been set in super's validate, if not explicitly specified. + if not self.tmpfs_mount_point.endswith('-cpufreq'): # pylint: disable=access-member-before-definition + self.tmpfs_mount_point += '-cpufreq' + + +def _diff_interrupt_files(before, after, result): # pylint: disable=R0914 + output_lines = [] + with open(before) as bfh: + with open(after) as ofh: + for bline, aline in izip(bfh, ofh): + bchunks = bline.strip().split() + while True: + achunks = aline.strip().split() + if achunks[0] == bchunks[0]: + diffchunks = [''] + diffchunks.append(achunks[0]) + diffchunks.extend([diff_tokens(b, a) for b, a + in zip(bchunks[1:], achunks[1:])]) + output_lines.append(diffchunks) + break + else: # new category appeared in the after file + diffchunks = ['>'] + achunks + output_lines.append(diffchunks) + try: + aline = ofh.next() + except StopIteration: + break + + # Offset heading columns by one to allow for row labels on subsequent + # lines. + output_lines[0].insert(0, '') + + # Any "columns" that do not have headings in the first row are not actually + # columns -- they are a single column where space-spearated words got + # split. Merge them back together to prevent them from being + # column-aligned by write_table. + table_rows = [output_lines[0]] + num_cols = len(output_lines[0]) + for row in output_lines[1:]: + table_row = row[:num_cols] + table_row.append(' '.join(row[num_cols:])) + table_rows.append(table_row) + + with open(result, 'w') as wfh: + write_table(table_rows, wfh) + + +def _diff_sysfs_dirs(before, after, result): # pylint: disable=R0914 + before_files = [] + os.path.walk(before, + lambda arg, dirname, names: arg.extend([os.path.join(dirname, f) for f in names]), + before_files + ) + before_files = filter(os.path.isfile, before_files) + files = [os.path.relpath(f, before) for f in before_files] + after_files = [os.path.join(after, f) for f in files] + diff_files = [os.path.join(result, f) for f in files] + + for bfile, afile, dfile in zip(before_files, after_files, diff_files): + if not os.path.isfile(afile): + logger.debug('sysfs_diff: {} does not exist or is not a file'.format(afile)) + continue + + with open(bfile) as bfh, open(afile) as afh: # pylint: disable=C0321 + with open(_f(dfile), 'w') as dfh: + for i, (bline, aline) in enumerate(izip_longest(bfh, afh), 1): + if aline is None: + logger.debug('Lines missing from {}'.format(afile)) + break + bchunks = re.split(r'(\W+)', bline) + achunks = re.split(r'(\W+)', aline) + if len(bchunks) != len(achunks): + logger.debug('Token length mismatch in {} on line {}'.format(bfile, i)) + dfh.write('xxx ' + bline) + continue + if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2) and + (bchunks[0] == achunks[0])): + # if there are only two columns and the first column is the + # same, assume it's a "header" column and do not diff it. + dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])] + else: + dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)] + dfh.write(''.join(dchunks)) + diff --git a/wlauto/instrumentation/perf/LICENSE b/wlauto/instrumentation/perf/LICENSE new file mode 100644 index 00000000..99f70b0d --- /dev/null +++ b/wlauto/instrumentation/perf/LICENSE @@ -0,0 +1,9 @@ +perf binaries included here are part of the Linux kernel and are distributed +under GPL version 2; The full text of the license may be viewed here: + +http://www.gnu.org/licenses/gpl-2.0.html + +Source for these binaries is part of Linux Kernel source tree. This may be obtained +from Linaro here: + +https://git.linaro.org/arm/big.LITTLE/mp.git diff --git a/wlauto/instrumentation/perf/__init__.py b/wlauto/instrumentation/perf/__init__.py new file mode 100644 index 00000000..523ae2e0 --- /dev/null +++ b/wlauto/instrumentation/perf/__init__.py @@ -0,0 +1,176 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=W0613,E1101,W0201 +import os +import re +import itertools + + +from wlauto import Instrument, Executable, Parameter +from wlauto.exceptions import ConfigError +from wlauto.utils.misc import ensure_file_directory_exists as _f +from wlauto.utils.types import list_or_string, list_of_strs + +PERF_COMMAND_TEMPLATE = '{} stat {} {} sleep 1000 > {} 2>&1 ' + +DEVICE_RESULTS_FILE = '/data/local/perf_results.txt' +HOST_RESULTS_FILE_BASENAME = 'perf.txt' + +PERF_COUNT_REGEX = re.compile(r'^\s*(\d+)\s*(.*?)\s*(\[\s*\d+\.\d+%\s*\])?\s*$') + + +class PerfInstrument(Instrument): + + name = 'perf' + description = """ + Perf is a Linux profiling with performance counters. + + Performance counters are CPU hardware registers that count hardware events + such as instructions executed, cache-misses suffered, or branches + mispredicted. They form a basis for profiling applications to trace dynamic + control flow and identify hotspots. + + pref accepts options and events. If no option is given the default '-a' is + used. For events, the default events are migrations and cs. They both can + be specified in the config file. + + Events must be provided as a list that contains them and they will look like + this :: + + perf_events = ['migrations', 'cs'] + + Events can be obtained by typing the following in the command line on the + device :: + + perf list + + Whereas options, they can be provided as a single string as following :: + + perf_options = '-a -i' + + Options can be obtained by running the following in the command line :: + + man perf-record + """ + + parameters = [ + Parameter('events', kind=list_of_strs, default=['migrations', 'cs'], + constraint=(lambda x: x, 'must not be empty.'), + description="""Specifies the events to be counted."""), + Parameter('optionstring', kind=list_or_string, default='-a', + description="""Specifies options to be used for the perf command. This + may be a list of option strings, in which case, multiple instances of perf + will be kicked off -- one for each option string. This may be used to e.g. + collected different events from different big.LITTLE clusters. + """), + Parameter('labels', kind=list_of_strs, default=None, + description="""Provides labels for pref output. If specified, the number of + labels must match the number of ``optionstring``\ s. + """), + ] + + def on_run_init(self, context): + if not self.device.is_installed('perf'): + binary = context.resolver.get(Executable(self, self.device.abi, 'perf')) + self.device.install(binary) + self.commands = self._build_commands() + + def setup(self, context): + self._clean_device() + + def start(self, context): + for command in self.commands: + self.device.kick_off(command) + + def stop(self, context): + self.device.killall('sleep') + + def update_result(self, context): + for label in self.labels: + device_file = self._get_device_outfile(label) + host_relpath = os.path.join('perf', os.path.basename(device_file)) + host_file = _f(os.path.join(context.output_directory, host_relpath)) + self.device.pull_file(device_file, host_file) + context.add_iteration_artifact(label, kind='raw', path=host_relpath) + with open(host_file) as fh: + in_results_section = False + for line in fh: + if 'Performance counter stats' in line: + in_results_section = True + fh.next() # skip the following blank line + if in_results_section: + if not line.strip(): # blank line + in_results_section = False + break + else: + line = line.split('#')[0] # comment + match = PERF_COUNT_REGEX.search(line) + if match: + count = int(match.group(1)) + metric = '{}_{}'.format(label, match.group(2)) + context.result.add_metric(metric, count) + + def teardown(self, context): # pylint: disable=R0201 + self._clean_device() + + def validate(self): + if isinstance(self.optionstring, list): + self.optionstrings = self.optionstring + else: + self.optionstrings = [self.optionstring] + if isinstance(self.events[0], list): # we know events are non-empty due to param constraint pylint: disable=access-member-before-definition + self.events = self.events + else: + self.events = [self.events] + if not self.labels: # pylint: disable=E0203 + self.labels = ['perf_{}'.format(i) for i in xrange(len(self.optionstrings))] + if not len(self.labels) == len(self.optionstrings): + raise ConfigError('The number of labels must match the number of optstrings provided for perf.') + + def _build_commands(self): + events = itertools.cycle(self.events) + commands = [] + for opts, label in itertools.izip(self.optionstrings, self.labels): + commands.append(self._build_perf_command(opts, events.next(), label)) + return commands + + def _clean_device(self): + for label in self.labels: + filepath = self._get_device_outfile(label) + self.device.delete_file(filepath) + + def _get_device_outfile(self, label): + return self.device.path.join(self.device.working_directory, '{}.out'.format(label)) + + def _build_perf_command(self, options, events, label): + event_string = ' '.join(['-e {}'.format(e) for e in events]) + command = PERF_COMMAND_TEMPLATE.format('perf', + options or '', + event_string, + self._get_device_outfile(label)) + return command + + +class CCIPerfEvent(object): + + def __init__(self, name, config): + self.name = name + self.config = config + + def __str__(self): + return 'CCI/config={config},name={name}/'.format(**self.__dict__) + diff --git a/wlauto/instrumentation/perf/bin/arm64/perf b/wlauto/instrumentation/perf/bin/arm64/perf new file mode 100755 index 00000000..5ec37c76 Binary files /dev/null and b/wlauto/instrumentation/perf/bin/arm64/perf differ diff --git a/wlauto/instrumentation/perf/bin/armeabi/perf b/wlauto/instrumentation/perf/bin/armeabi/perf new file mode 100755 index 00000000..5a52db56 Binary files /dev/null and b/wlauto/instrumentation/perf/bin/armeabi/perf differ diff --git a/wlauto/instrumentation/pmu_logger/__init__.py b/wlauto/instrumentation/pmu_logger/__init__.py new file mode 100644 index 00000000..1a9a0adb --- /dev/null +++ b/wlauto/instrumentation/pmu_logger/__init__.py @@ -0,0 +1,148 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=W0613,E1101,W0201 +import os +import re +import csv + +from wlauto import Instrument, settings, Parameter +from wlauto.instrumentation import instrument_is_installed +from wlauto.exceptions import ConfigError +from wlauto.utils.types import boolean + + +NUMBER_OF_CCI_PMU_COUNTERS = 4 +DEFAULT_EVENTS = ['0x63', '0x6A', '0x83', '0x8A'] +DEFAULT_PERIOD = 10 # in jiffies + +CPL_BASE = '/sys/kernel/debug/cci_pmu_logger/' +CPL_CONTROL_FILE = CPL_BASE + 'control' +CPL_PERIOD_FILE = CPL_BASE + 'period_jiffies' + +DRIVER = 'pmu_logger.ko' + +REGEX = re.compile(r'(\d+(?:\.\d+)?):\s+bprint:.*Cycles:\s*(\S+)\s*Counter_0:\s*(\S+)\s*Counter_1:\s*(\S+)\s*Counter_2:\s*(\S+)\s*Counter_3:\s*(\S+)') + + +class CciPmuLogger(Instrument): + + name = "cci_pmu_logger" + description = """ + This instrument allows collecting CCI counter data. + + It relies on the pmu_logger.ko kernel driver, the source for which is + included with Workload Automation (see inside ``wlauto/external`` directory). + You will need to build this against your specific kernel. Once compiled, it needs + to be placed in the dependencies directory (usually ``~/.workload_uatomation/dependencies``). + + .. note:: When compling pmu_logger.ko for a new hardware platform, you may need to + modify CCI_BASE inside pmu_logger.c to contain the base address of where + CCI is mapped in memory on your device. + + This instrument relies on ``trace-cmd`` instrument to also be enabled. You should enable + at least ``'bprint'`` trace event. + + """ + + parameters = [ + Parameter('events', kind=list, default=DEFAULT_EVENTS, + description=""" + A list of strings, each representing an event to be counted. The length + of the list cannot exceed the number of PMU counters available (4 in CCI-400). + If this is not specified, shareable read transactions and snoop hits on both + clusters will be counted by default. E.g. ``['0x63', '0x83']``. + """), + Parameter('event_labels', kind=list, default=[], + description=""" + A list of labels to be used when reporting PMU counts. If specified, + this must be of the same length as ``cci_pmu_events``. If not specified, + events will be labeled "event_". + """), + Parameter('period', kind=int, default=10, + description='The period (in jiffies) between counter reads.'), + Parameter('install_module', kind=boolean, default=True, + description=""" + Specifies whether pmu_logger has been compiled as a .ko module that needs + to be installed by the instrument. (.ko binary must be in {}). If this is set + to ``False``, it will be assumed that pmu_logger has been compiled into the kernel, + or that it has been installed prior to the invocation of WA. + """.format(settings.dependencies_directory)), + ] + + def on_run_init(self, context): + if self.install_module: + self.device_driver_file = self.device.path.join(self.device.working_directory, DRIVER) + host_driver_file = os.path.join(settings.dependencies_directory, DRIVER) + self.device.push_file(host_driver_file, self.device_driver_file) + + def setup(self, context): + if self.install_module: + self.device.execute('insmod {}'.format(self.device_driver_file), check_exit_code=False) + self.device.set_sysfile_value(CPL_PERIOD_FILE, self.period) + for i, event in enumerate(self.events): + counter = CPL_BASE + 'counter{}'.format(i) + self.device.set_sysfile_value(counter, event, verify=False) + + def start(self, context): + self.device.set_sysfile_value(CPL_CONTROL_FILE, 1, verify=False) + + def stop(self, context): + self.device.set_sysfile_value(CPL_CONTROL_FILE, 1, verify=False) + + # Doing result processing inside teardown because need to make sure that + # trace-cmd has processed its results and generated the trace.txt + def teardown(self, context): + trace_file = os.path.join(context.output_directory, 'trace.txt') + rows = [['timestamp', 'cycles'] + self.event_labels] + with open(trace_file) as fh: + for line in fh: + match = REGEX.search(line) + if match: + rows.append([ + float(match.group(1)), + int(match.group(2), 16), + int(match.group(3), 16), + int(match.group(4), 16), + int(match.group(5), 16), + int(match.group(6), 16), + ]) + output_file = os.path.join(context.output_directory, 'cci_counters.txt') + with open(output_file, 'wb') as wfh: + writer = csv.writer(wfh) + writer.writerows(rows) + context.add_iteration_artifact('cci_counters', path='cci_counters.txt', kind='data', + description='CCI PMU counter data.') + + # summary metrics + sums = map(sum, zip(*(r[1:] for r in rows[1:]))) + labels = ['cycles'] + self.event_labels + for label, value in zip(labels, sums): + context.result.add_metric('cci ' + label, value, lower_is_better=True) + + # actual teardown + if self.install_module: + self.device.execute('rmmod pmu_logger', check_exit_code=False) + + def validate(self): + if not instrument_is_installed('trace-cmd'): + raise ConfigError('To use cci_pmu_logger, trace-cmd instrument must also be enabled.') + if not self.event_labels: # pylint: disable=E0203 + self.event_labels = ['event_{}'.format(e) for e in self.events] + elif not len(self.events) == len(self.event_labels): + raise ConfigError('cci_pmu_events and cci_pmu_event_labels must be of the same length.') + if len(self.events) > NUMBER_OF_CCI_PMU_COUNTERS: + raise ConfigError('The number cci_pmu_counters must be at most {}'.format(NUMBER_OF_CCI_PMU_COUNTERS)) diff --git a/wlauto/instrumentation/streamline/__init__.py b/wlauto/instrumentation/streamline/__init__.py new file mode 100644 index 00000000..841c44b0 --- /dev/null +++ b/wlauto/instrumentation/streamline/__init__.py @@ -0,0 +1,298 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=W0613,E1101 +import os +import signal +import shutil +import subprocess +import logging +import re + +from wlauto import settings, Instrument, Parameter, ResourceGetter, GetterPriority, File +from wlauto.exceptions import InstrumentError, DeviceError, ResourceError +from wlauto.utils.misc import ensure_file_directory_exists as _f +from wlauto.utils.types import boolean +from wlauto.utils.log import StreamLogger, LogWriter, LineLogWriter + + +SESSION_TEXT_TEMPLATE = ('' + '' + '') + +VERSION_REGEX = re.compile(r'\(DS-5 v(.*?)\)') + + +class StreamlineResourceGetter(ResourceGetter): + + name = 'streamline_resource' + resource_type = 'file' + priority = GetterPriority.environment + 1 # run before standard enviroment resolvers. + + dependencies_directory = os.path.join(settings.dependencies_directory, 'streamline') + old_dependencies_directory = os.path.join(settings.environment_root, 'streamline') # backwards compatibility + + def get(self, resource, **kwargs): + if resource.owner.name != 'streamline': + return None + test_path = _f(os.path.join(self.dependencies_directory, resource.path)) + if os.path.isfile(test_path): + return test_path + test_path = _f(os.path.join(self.old_dependencies_directory, resource.path)) + if os.path.isfile(test_path): + return test_path + + +class StreamlineInstrument(Instrument): + + name = 'streamline' + description = """ + Collect Streamline traces from the device. + + .. note:: This instrument supports streamline that comes with DS-5 5.17 and later + earlier versions of streamline may not work correctly (or at all). + + This Instrument allows collecting streamline traces (such as PMU counter values) from + the device. It assumes you have DS-5 (which Streamline is part of) installed on your + system, and that streamline command is somewhere in PATH. + + Streamline works by connecting to gator service on the device. gator comes in two parts + a driver (gator.ko) and daemon (gatord). The driver needs to be compiled against your + kernel and both driver and daemon need to be compatible with your version of Streamline. + The best way to ensure compatibility is to build them from source which came with your + DS-5. gator source can be found in :: + + /usr/local/DS-5/arm/gator + + (the exact path may vary depending of where you have installed DS-5.) Please refer to the + README the accompanies the source for instructions on how to build it. + + Once you have built the driver and the daemon, place the binaries into your + ~/.workload_automation/streamline/ directory (if you haven't tried running WA with + this instrument before, the streamline/ subdirectory might not exist, in which + case you will need to create it. + + In order to specify which events should be captured, you need to provide a + configuration.xml for the gator. The easiest way to obtain this file is to export it + from event configuration dialog in DS-5 streamline GUI. The file should be called + "configuration.xml" and it be placed in the same directory as the gator binaries. + + With that done, you can enable streamline traces by adding the following entry to + instrumentation list in your ~/.workload_automation/config.py + + :: + + instrumentation = [ + # ... + 'streamline', + # ... + ] + + You can also specify the following (optional) configuration in the same config file: + + """ + supported_platforms = ['android'] + + parameters = [ + Parameter('port', default='8080', + description='Specifies the port on which streamline will connect to gator'), + Parameter('configxml', default=None, + description='streamline configuration XML file to be used. This must be ' + 'an absolute path, though it may count the user home symbol (~)'), + Parameter('report', kind=boolean, default=False, global_alias='streamline_report_csv', + description='Specifies whether a report should be generated from streamline data.'), + Parameter('report_options', kind=str, default='-format csv', + description='A string with options that will be added to stramline -report command.'), + ] + + daemon = 'gatord' + driver = 'gator.ko' + configuration_file_name = 'configuration.xml' + + def __init__(self, device, **kwargs): + super(StreamlineInstrument, self).__init__(device, **kwargs) + self.streamline = None + self.session_file = None + self.capture_file = None + self.analysis_file = None + self.report_file = None + self.configuration_file = None + self.on_device_config = None + self.daemon_process = None + self.enabled = False + self.resource_getter = None + + self.host_daemon_file = None + self.host_driver_file = None + self.device_driver_file = None + + self._check_has_valid_display() + + def on_run_start(self, context): + if subprocess.call('which caiman', stdout=subprocess.PIPE, shell=True): + raise InstrumentError('caiman not in PATH. Cannot enable Streamline tracing.') + p = subprocess.Popen('caiman --version 2>&1', stdout=subprocess.PIPE, shell=True) + out, _ = p.communicate() + match = VERSION_REGEX.search(out) + if not match: + raise InstrumentError('caiman not in PATH. Cannot enable Streamline tracing.') + version_tuple = tuple(map(int, match.group(1).split('.'))) + if version_tuple < (5, 17): + raise InstrumentError('Need DS-5 v5.17 or greater; found v{}'.format(match.group(1))) + self.enabled = True + self.resource_getter = StreamlineResourceGetter(context.resolver) + self.resource_getter.register() + + def on_run_end(self, context): + self.enabled = False + self.resource_getter.unregister() + + def on_run_init(self, context): + try: + self.host_daemon_file = context.resolver.get(File(self, self.daemon)) + self.logger.debug('Using daemon from {}.'.format(self.host_daemon_file)) + self.device.killall(self.daemon) # in case a version is already running + self.device.install(self.host_daemon_file) + except ResourceError: + self.logger.debug('Using on-device daemon.') + + try: + self.host_driver_file = context.resolver.get(File(self, self.driver)) + self.logger.debug('Using driver from {}.'.format(self.host_driver_file)) + self.device_driver_file = self.device.install(self.host_driver_file) + except ResourceError: + self.logger.debug('Using on-device driver.') + + try: + self.configuration_file = (os.path.expanduser(self.configxml or '') or + context.resolver.get(File(self, self.configuration_file_name))) + self.logger.debug('Using {}'.format(self.configuration_file)) + self.on_device_config = self.device.path.join(self.device.working_directory, 'configuration.xml') + shutil.copy(self.configuration_file, settings.meta_directory) + except ResourceError: + self.logger.debug('No configuration file was specfied.') + + caiman_path = subprocess.check_output('which caiman', shell=True).strip() # pylint: disable=E1103 + self.session_file = os.path.join(context.host_working_directory, 'streamline_session.xml') + with open(self.session_file, 'w') as wfh: + wfh.write(SESSION_TEXT_TEMPLATE.format(self.port, caiman_path)) + + def setup(self, context): + # Note: the config file needs to be copies on each iteration's setup + # because gator appears to "consume" it on invocation... + if self.configuration_file: + self.device.push_file(self.configuration_file, self.on_device_config) + self._initialize_daemon() + self.capture_file = _f(os.path.join(context.output_directory, 'streamline', 'capture.apc')) + self.report_file = _f(os.path.join(context.output_directory, 'streamline', 'streamline.csv')) + + def start(self, context): + if self.enabled: + command = ['streamline', '-capture', self.session_file, '-output', self.capture_file] + self.streamline = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + preexec_fn=os.setpgrp) + outlogger = StreamLogger('streamline', self.streamline.stdout, klass=LineLogWriter) + errlogger = StreamLogger('streamline', self.streamline.stderr, klass=LineLogWriter) + outlogger.start() + errlogger.start() + + def stop(self, context): + if self.enabled: + os.killpg(self.streamline.pid, signal.SIGTERM) + + def update_result(self, context): + if self.enabled: + self._kill_daemon() + if self.report: + self.logger.debug('Creating report...') + command = ['streamline', '-report', self.capture_file, '-output', self.report_file] + command += self.report_options.split() + _run_streamline_command(command) + context.add_artifact('streamlinecsv', self.report_file, 'data') + + def teardown(self, context): + self.device.delete_file(self.on_device_config) + + def _check_has_valid_display(self): # pylint: disable=R0201 + reason = None + if os.name == 'posix' and not os.getenv('DISPLAY'): + reason = 'DISPLAY is not set.' + else: + p = subprocess.Popen('xhost', stdout=subprocess.PIPE, stderr=subprocess.PIPE) + _, error = p.communicate() + if p.returncode: + reason = 'Invalid DISPLAY; xhost returned: "{}".'.format(error.strip()) # pylint: disable=E1103 + if reason: + raise InstrumentError('{}\nstreamline binary requires a valid display server to be running.'.format(reason)) + + def _initialize_daemon(self): + if self.device_driver_file: + try: + self.device.execute('insmod {}'.format(self.device_driver_file)) + except DeviceError, e: + if 'File exists' not in e.message: + raise + self.logger.debug('Driver was already installed.') + self._start_daemon() + port_spec = 'tcp:{}'.format(self.port) + self.device.forward_port(port_spec, port_spec) + + def _start_daemon(self): + self.logger.debug('Starting gatord') + self.device.killall('gatord', as_root=True) + if self.configuration_file: + command = '{} -c {}'.format(self.daemon, self.on_device_config) + else: + + command = '{}'.format(self.daemon) + + self.daemon_process = self.device.execute(command, as_root=True, background=True) + outlogger = StreamLogger('gatord', self.daemon_process.stdout) + errlogger = StreamLogger('gatord', self.daemon_process.stderr, logging.ERROR) + outlogger.start() + errlogger.start() + if self.daemon_process.poll() is not None: + # If adb returned, something went wrong. + raise InstrumentError('Could not start gatord.') + + def _kill_daemon(self): + self.logger.debug('Killing daemon process.') + self.daemon_process.kill() + + +def _run_streamline_command(command): + streamline = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + output, error = streamline.communicate() + LogWriter('streamline').write(output).close() + LogWriter('streamline').write(error).close() + diff --git a/wlauto/instrumentation/trace_cmd/LICENSE b/wlauto/instrumentation/trace_cmd/LICENSE new file mode 100644 index 00000000..9d46c1a5 --- /dev/null +++ b/wlauto/instrumentation/trace_cmd/LICENSE @@ -0,0 +1,39 @@ +Included trace-cmd binaries are Free Software ditributed under GPLv2: + +/* + * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +The full text of the license may be viewed here: + +http://www.gnu.org/licenses/gpl-2.0.html + +Source code for trace-cmd may be obtained here: + +git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/trace-cmd.git + +Binaries included here contain modifications by ARM that, at the time of writing, +have not yet made it into the above repository. The patches for these modifications +are available here: + +http://article.gmane.org/gmane.linux.kernel/1869111 +http://article.gmane.org/gmane.linux.kernel/1869112 + + + diff --git a/wlauto/instrumentation/trace_cmd/__init__.py b/wlauto/instrumentation/trace_cmd/__init__.py new file mode 100644 index 00000000..fdd4f76b --- /dev/null +++ b/wlauto/instrumentation/trace_cmd/__init__.py @@ -0,0 +1,322 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# pylint: disable=W0613,E1101 +from __future__ import division +import os +import time +import subprocess +from collections import defaultdict + +from wlauto import Instrument, Parameter, Executable +from wlauto.exceptions import InstrumentError, ConfigError +from wlauto.core import signal +from wlauto.utils.types import boolean + +OUTPUT_TRACE_FILE = 'trace.dat' +OUTPUT_TEXT_FILE = '{}.txt'.format(os.path.splitext(OUTPUT_TRACE_FILE)[0]) +TIMEOUT = 180 + + +class TraceCmdInstrument(Instrument): + + name = 'trace-cmd' + description = """ + trace-cmd is an instrument which interacts with Ftrace Linux kernel internal + tracer + + From trace-cmd man page: + + trace-cmd command interacts with the Ftrace tracer that is built inside the + Linux kernel. It interfaces with the Ftrace specific files found in the + debugfs file system under the tracing directory. + + trace-cmd reads a list of events it will trace, which can be specified in + the config file as follows :: + + trace_events = ['irq*', 'power*'] + + If no event is specified in the config file, trace-cmd traces the following events: + + - sched* + - irq* + - power* + - cpufreq_interactive* + + The list of available events can be obtained by rooting and running the following + command line on the device :: + + trace-cmd list + + You may also specify ``trace_buffer_size`` setting which must be an integer that will + be used to set the ftrace buffer size. It will be interpreted as KB:: + + trace_cmd_buffer_size = 8000 + + The maximum buffer size varies from device to device, but there is a maximum and trying + to set buffer size beyound that will fail. If you plan on collecting a lot of trace over + long periods of time, the buffer size will not be enough and you will only get trace for + the last portion of your run. To deal with this you can set the ``trace_mode`` setting to + ``'record'`` (the default is ``'start'``):: + + trace_cmd_mode = 'record' + + This will cause trace-cmd to trace into file(s) on disk, rather than the buffer, and so the + limit for the max size of the trace is set by the storage available on device. Bear in mind + that ``'record'`` mode *is* more instrusive than the default, so if you do not plan on + generating a lot of trace, it is best to use the default ``'start'`` mode. + + .. note:: Mode names correspend to the underlying trace-cmd exectuable's command used to + implement them. You can find out more about what is happening in each case from + trace-cmd documentation: https://lwn.net/Articles/341902/. + + This instrument comes with an Android trace-cmd binary that will be copied and used on the + device, however post-processing will be done on-host and you must have trace-cmd installed and + in your path. On Ubuntu systems, this may be done with:: + + sudo apt-get install trace-cmd + + """ + + parameters = [ + Parameter('events', kind=list, default=['sched*', 'irq*', 'power*', 'cpufreq_interactive*'], + global_alias='trace_events', + description=""" + Specifies the list of events to be traced. Each event in the list will be passed to + trace-cmd with -e parameter and must be in the format accepted by trace-cmd. + """), + Parameter('mode', default='start', allowed_values=['start', 'record'], + global_alias='trace_mode', + description=""" + Trace can be collected using either 'start' or 'record' trace-cmd + commands. In 'start' mode, trace will be collected into the ftrace buffer; + in 'record' mode, trace will be written into a file on the device's file + system. 'start' mode is (in theory) less intrusive than 'record' mode, however + it is limited by the size of the ftrace buffer (which is configurable -- + see ``buffer_size`` -- but only up to a point) and that may overflow + for long-running workloads, which will result in dropped events. + """), + Parameter('buffer_size', kind=int, default=None, + global_alias='trace_buffer_size', + description=""" + Attempt to set ftrace buffer size to the specified value (in KB). Default buffer size + may need to be increased for long-running workloads, or if a large number + of events have been enabled. Note: there is a maximum size that the buffer can + be set, and that varies from device to device. Attempting to set buffer size higher + than this will fail. In that case, this instrument will set the size to the highest + possible value by going down from the specified size in ``buffer_size_step`` intervals. + """), + Parameter('buffer_size_step', kind=int, default=1000, + global_alias='trace_buffer_size_step', + description=""" + Defines the decremental step used if the specified ``buffer_size`` could not be set. + This will be subtracted form the buffer size until set succeeds or size is reduced to + 1MB. + """), + Parameter('buffer_size_file', default='/d/tracing/buffer_size_kb', + description=""" + Path to the debugs file that may be used to set ftrace buffer size. This should need + to be modified for the vast majority devices. + """), + Parameter('report', kind=boolean, default=True, + description=""" + Specifies whether host-side reporting should be performed once the binary trace has been + pulled form the device. + + .. note:: This requires the latest version of trace-cmd to be installed on the host (the + one in your distribution's repos may be too old). + + """), + Parameter('no_install', kind=boolean, default=False, + description=""" + Do not install the bundled trace-cmd and use the one on the device instead. If there is + not already a trace-cmd on the device, an error is raised. + + """), + ] + + def __init__(self, device, **kwargs): + super(TraceCmdInstrument, self).__init__(device, **kwargs) + self.trace_cmd = None + self.event_string = _build_trace_events(self.events) + self.output_file = os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE) + self.temp_trace_file = self.device.path.join(self.device.working_directory, OUTPUT_TRACE_FILE) + + def on_run_init(self, context): + if not self.device.is_rooted: + raise InstrumentError('trace-cmd instrument cannot be used on an unrooted device.') + if not self.no_install: + host_file = context.resolver.get(Executable(self, self.device.abi, 'trace-cmd')) + self.trace_cmd = self.device.install_executable(host_file) + else: + if not self.device.is_installed('trace-cmd'): + raise ConfigError('No trace-cmd found on device and no_install=True is specified.') + self.trace_cmd = 'trace-cmd' + # Register ourselves as absolute last event before and + # first after so we can mark the trace at the right time + signal.connect(self.insert_start_mark, signal.BEFORE_WORKLOAD_EXECUTION, priority=11) + signal.connect(self.insert_end_mark, signal.AFTER_WORKLOAD_EXECUTION, priority=11) + + def setup(self, context): + if self.mode == 'start': + if self.buffer_size: + self._set_buffer_size() + self.device.execute('{} reset'.format(self.trace_cmd), as_root=True, timeout=180) + elif self.mode == 'record': + pass + else: + raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here + + def start(self, context): + self.start_time = time.time() # pylint: disable=attribute-defined-outside-init + if self.mode == 'start': + self.device.execute('{} start {}'.format(self.trace_cmd, self.event_string), as_root=True) + elif self.mode == 'record': + self.device.kick_off('{} record -o {} {}'.format(self.trace_cmd, self.output_file, self.event_string)) + else: + raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here + + def stop(self, context): + self.stop_time = time.time() # pylint: disable=attribute-defined-outside-init + if self.mode == 'start': + self.device.execute('{} stop'.format(self.trace_cmd), timeout=60, as_root=True) + elif self.mode == 'record': + # There will be a trace-cmd worker process per CPU core plus a main + # control trace-cmd process. Interrupting the control process will + # trigger the generation of the single binary trace file. + trace_cmds = self.device.ps(name=self.trace_cmd) + if not trace_cmds: + raise InstrumentError('Could not find running trace-cmd on device.') + # The workers will have their PPID set to the PID of control. + parent_map = defaultdict(list) + for entry in trace_cmds: + parent_map[entry.ppid].append(entry.pid) + controls = [v[0] for _, v in parent_map.iteritems() + if len(v) == 1 and v[0] in parent_map] + if len(controls) > 1: + self.logger.warning('More than one trace-cmd instance found; stopping all of them.') + for c in controls: + self.device.kill(c, signal='INT', as_root=True) + else: + raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here + + def update_result(self, context): # NOQA pylint: disable=R0912 + if self.mode == 'start': + self.device.execute('{} extract -o {}'.format(self.trace_cmd, self.output_file), + timeout=TIMEOUT, as_root=True) + elif self.mode == 'record': + self.logger.debug('Waiting for trace.dat to be generated.') + while self.device.ps(name=self.trace_cmd): + time.sleep(2) + else: + raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here + + # The size of trace.dat will depend on how long trace-cmd was running. + # Therefore timout for the pull command must also be adjusted + # accordingly. + pull_timeout = (self.stop_time - self.start_time) + self.device.pull_file(self.output_file, context.output_directory, timeout=pull_timeout) + context.add_iteration_artifact('bintrace', OUTPUT_TRACE_FILE, kind='data', + description='trace-cmd generated ftrace dump.') + + local_trace_file = os.path.join(context.output_directory, OUTPUT_TRACE_FILE) + local_txt_trace_file = os.path.join(context.output_directory, OUTPUT_TEXT_FILE) + + if self.report: + # To get the output of trace.dat, trace-cmd must be installed + # This is done host-side because the generated file is very large + if not os.path.isfile(local_trace_file): + self.logger.warning('Not generating trace.txt, as trace.bin does not exist.') + try: + command = 'trace-cmd report {} > {}'.format(local_trace_file, local_txt_trace_file) + self.logger.debug(command) + process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True) + _, error = process.communicate() + if process.returncode: + raise InstrumentError('trace-cmd returned non-zero exit code {}'.format(process.returncode)) + if error: + # logged at debug level, as trace-cmd always outputs some + # errors that seem benign. + self.logger.debug(error) + if os.path.isfile(local_txt_trace_file): + context.add_iteration_artifact('txttrace', OUTPUT_TEXT_FILE, kind='export', + description='trace-cmd generated ftrace dump.') + self.logger.debug('Verifying traces.') + with open(local_txt_trace_file) as fh: + for line in fh: + if 'EVENTS DROPPED' in line: + self.logger.warning('Dropped events detected.') + break + else: + self.logger.debug('Trace verified.') + else: + self.logger.warning('Could not generate trace.txt.') + except OSError: + raise InstrumentError('Could not find trace-cmd. Please make sure it is installed and is in PATH.') + + def teardown(self, context): + self.device.delete_file(os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE)) + + def on_run_end(self, context): + pass + + def validate(self): + if self.report and os.system('which trace-cmd > /dev/null'): + raise InstrumentError('trace-cmd is not in PATH; is it installed?') + if self.buffer_size: + if self.mode == 'record': + self.logger.debug('trace_buffer_size specified with record mode; it will be ignored.') + else: + try: + int(self.buffer_size) + except ValueError: + raise ConfigError('trace_buffer_size must be an int.') + + def insert_start_mark(self, context): + # trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_START in info field + self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_START", verify=False) + + def insert_end_mark(self, context): + # trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_STOP in info field + self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_STOP", verify=False) + + def _set_buffer_size(self): + target_buffer_size = self.buffer_size + attempt_buffer_size = target_buffer_size + buffer_size = 0 + floor = 1000 if target_buffer_size > 1000 else target_buffer_size + while attempt_buffer_size >= floor: + self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False) + buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int) + if buffer_size == attempt_buffer_size: + break + else: + attempt_buffer_size -= self.buffer_size_step + if buffer_size == target_buffer_size: + return + while attempt_buffer_size < target_buffer_size: + attempt_buffer_size += self.buffer_size_step + self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False) + buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int) + if attempt_buffer_size != buffer_size: + self.logger.warning('Failed to set trace buffer size to {}, value set was {}'.format(target_buffer_size, buffer_size)) + break + + +def _build_trace_events(events): + event_string = ' '.join(['-e {}'.format(e) for e in events]) + return event_string + diff --git a/wlauto/instrumentation/trace_cmd/bin/arm64/trace-cmd b/wlauto/instrumentation/trace_cmd/bin/arm64/trace-cmd new file mode 100755 index 00000000..0d025d0d Binary files /dev/null and b/wlauto/instrumentation/trace_cmd/bin/arm64/trace-cmd differ diff --git a/wlauto/instrumentation/trace_cmd/bin/armeabi/trace-cmd b/wlauto/instrumentation/trace_cmd/bin/armeabi/trace-cmd new file mode 100755 index 00000000..a4456627 Binary files /dev/null and b/wlauto/instrumentation/trace_cmd/bin/armeabi/trace-cmd differ diff --git a/wlauto/modules/__init__.py b/wlauto/modules/__init__.py new file mode 100644 index 00000000..16224d6f --- /dev/null +++ b/wlauto/modules/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + diff --git a/wlauto/modules/active_cooling.py b/wlauto/modules/active_cooling.py new file mode 100644 index 00000000..7f9fbb03 --- /dev/null +++ b/wlauto/modules/active_cooling.py @@ -0,0 +1,64 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from wlauto import Module, Parameter +from wlauto.utils.serial_port import open_serial_connection + + +class MbedFanActiveCooling(Module): + + name = 'mbed-fan' + + capabilities = ['active_cooling'] + + parameters = [ + Parameter('port', default='/dev/ttyACM0', + description="""The serial port for the active cooling solution (see above)."""), + Parameter('buad', kind=int, default=115200, + description="""Baud for the serial port (see above)."""), + Parameter('fan_pin', kind=int, default=0, + description="""Which controller pin on the mbed the fan for the active cooling solution is + connected to (controller pin 0 is physical pin 22 on the mbed)."""), + ] + + timeout = 30 + + def start_active_cooling(self): + with open_serial_connection(timeout=self.timeout, + port=self.port, + baudrate=self.buad) as target: + target.sendline('motor_{}_1'.format(self.fan_pin)) + + def stop_active_cooling(self): + with open_serial_connection(timeout=self.timeout, + port=self.port, + baudrate=self.buad) as target: + target.sendline('motor_{}_0'.format(self.fan_pin)) + + +class OdroidXU3ctiveCooling(Module): + + name = 'odroidxu3-fan' + + capabilities = ['active_cooling'] + + def start_active_cooling(self): + self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/fan_mode', 0, verify=False) + self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/pwm_duty', 255, verify=False) + + def stop_active_cooling(self): + self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/fan_mode', 0, verify=False) + self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/pwm_duty', 1, verify=False) diff --git a/wlauto/modules/flashing.py b/wlauto/modules/flashing.py new file mode 100644 index 00000000..38cf95ce --- /dev/null +++ b/wlauto/modules/flashing.py @@ -0,0 +1,253 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# pylint: disable=attribute-defined-outside-init +import os +import time +import tarfile +import tempfile +import shutil + +from wlauto import Module, Parameter +from wlauto.exceptions import ConfigError, DeviceError +from wlauto.utils.android import fastboot_flash_partition, fastboot_command +from wlauto.utils.serial_port import open_serial_connection +from wlauto.utils.uefi import UefiMenu +from wlauto.utils.types import boolean +from wlauto.utils.misc import merge_dicts + + +class Flasher(Module): + """ + Implements a mechanism for flashing a device. The images to be flashed can be + specified either as a tarball "image bundle" (in which case instructions for + flashing are provided as flasher-specific metadata also in the bundle), or as + individual image files, in which case instructions for flashing as specified + as part of flashing config. + + .. note:: It is important that when resolving configuration, concrete flasher + implementations prioritise settings specified in the config over those + in the bundle (if they happen to clash). + + """ + + capabilities = ['flash'] + + def flash(self, image_bundle=None, images=None): + """ + Flashes the specified device using the specified config. As a post condition, + the device must be ready to run workloads upon returning from this method (e.g. + it must be fully-booted into the OS). + + """ + raise NotImplementedError() + + +class FastbootFlasher(Flasher): + + name = 'fastboot' + description = """ + Enables automated flashing of images using the fastboot utility. + To use this flasher, a set of image files to be flused are required. + In addition a mapping between partitions and image file is required. There are two ways + to specify those requirements: + + - Image mapping: In this mode, a mapping between partitions and images is given in the agenda. + - Image Bundle: In This mode a tarball is specified, which must contain all image files as well + as well as a partition file, named ``partitions.txt`` which contains the mapping between + partitions and images. + + The format of ``partitions.txt`` defines one mapping per line as such: :: + + kernel zImage-dtb + ramdisk ramdisk_image + + """ + + delay = 0.5 + serial_timeout = 30 + partitions_file_name = 'partitions.txt' + + def flash(self, image_bundle=None, images=None): + self.prelude_done = False + to_flash = {} + if image_bundle: # pylint: disable=access-member-before-definition + image_bundle = expand_path(image_bundle) + to_flash = self._bundle_to_images(image_bundle) + to_flash = merge_dicts(to_flash, images or {}, should_normalize=False) + for partition, image_path in to_flash.iteritems(): + self.logger.debug('flashing {}'.format(partition)) + self._flash_image(self.owner, partition, expand_path(image_path)) + fastboot_command('reboot') + + def _validate_image_bundle(self, image_bundle): + if not tarfile.is_tarfile(image_bundle): + raise ConfigError('File {} is not a tarfile'.format(image_bundle)) + with tarfile.open(image_bundle) as tar: + files = [tf.name for tf in tar.getmembers()] + if not any(pf in files for pf in (self.partitions_file_name, '{}/{}'.format(files[0], self.partitions_file_name))): + ConfigError('Image bundle does not contain the required partition file (see documentation)') + + def _bundle_to_images(self, image_bundle): + """ + Extracts the bundle to a temporary location and creates a mapping between the contents of the bundle + and images to be flushed. + """ + self._validate_image_bundle(image_bundle) + extract_dir = tempfile.mkdtemp() + with tarfile.open(image_bundle) as tar: + tar.extractall(path=extract_dir) + files = [tf.name for tf in tar.getmembers()] + if self.partitions_file_name not in files: + extract_dir = os.path.join(extract_dir, files[0]) + partition_file = os.path.join(extract_dir, self.partitions_file_name) + return get_mapping(extract_dir, partition_file) + + def _flash_image(self, device, partition, image_path): + if not self.prelude_done: + self._fastboot_prelude(device) + fastboot_flash_partition(partition, image_path) + time.sleep(self.delay) + + def _fastboot_prelude(self, device): + with open_serial_connection(port=device.port, + baudrate=device.baudrate, + timeout=self.serial_timeout, + init_dtr=0, + get_conn=False) as target: + device.reset() + time.sleep(self.delay) + target.sendline(' ') + time.sleep(self.delay) + target.sendline('fast') + time.sleep(self.delay) + self.prelude_done = True + + +class VersatileExpressFlasher(Flasher): + + name = 'vexpress' + + parameters = [ + Parameter('image_name', default='Image', + description='The name of the kernel image to boot.'), + Parameter('image_args', default=None, + description='Kernel arguments with which the image will be booted.'), + Parameter('fdt_support', kind=boolean, default=True, + description='Specifies whether the image has device tree support.'), + Parameter('initrd', default=None, + description='If the kernel image uses an INITRD, this can be used to specify it.'), + Parameter('fdt_path', default=None, + description='If specified, this will be set as the FDT path.'), + ] + + def flash(self, image_bundle=None, images=None): + device = self.owner + if not hasattr(device, 'port') or not hasattr(device, 'microsd_mount_point'): + msg = 'Device {} does not appear to support VExpress flashing.' + raise ConfigError(msg.format(device.name)) + with open_serial_connection(port=device.port, + baudrate=device.baudrate, + timeout=device.timeout, + init_dtr=0) as target: + target.sendline('usb_on') # this will cause the MicroSD to be mounted on the host + device.wait_for_microsd_mount_point(target) + self.deploy_images(device, image_bundle, images) + + self.logger.debug('Resetting the device.') + device.hard_reset(target) + + with open_serial_connection(port=device.port, + baudrate=device.baudrate, + timeout=device.timeout, + init_dtr=0) as target: + menu = UefiMenu(target) + menu.open(timeout=120) + if menu.has_option(device.uefi_entry): + self.logger.debug('Deleting existing device entry.') + menu.delete_entry(device.uefi_entry) + self.create_uefi_enty(device, menu) + menu.select(device.uefi_entry) + target.expect(device.android_prompt, timeout=device.timeout) + + def create_uefi_enty(self, device, menu): + menu.create_entry(device.uefi_entry, + self.image_name, + self.image_args, + self.fdt_support, + self.initrd, + self.fdt_path) + + def deploy_images(self, device, image_bundle=None, images=None): + try: + if image_bundle: + self.deploy_image_bundle(device, image_bundle) + if images: + self.overlay_images(device, images) + os.system('sync') + except (IOError, OSError), e: + msg = 'Could not deploy images to {}; got: {}' + raise DeviceError(msg.format(device.microsd_mount_point, e)) + + def deploy_image_bundle(self, device, bundle): + self.logger.debug('Validating {}'.format(bundle)) + validate_image_bundle(bundle) + self.logger.debug('Extracting {} into {}...'.format(bundle, device.microsd_mount_point)) + with tarfile.open(bundle) as tar: + tar.extractall(device.microsd_mount_point) + + def overlay_images(self, device, images): + for dest, src in images.iteritems(): + dest = os.path.join(device.microsd_mount_point, dest) + self.logger.debug('Copying {} to {}'.format(src, dest)) + shutil.copy(src, dest) + + +# utility functions + +def get_mapping(base_dir, partition_file): + mapping = {} + with open(partition_file) as pf: + for line in pf: + pair = line.split() + if len(pair) != 2: + ConfigError('partitions.txt is not properly formated') + image_path = os.path.join(base_dir, pair[1]) + if not os.path.isfile(expand_path(image_path)): + ConfigError('file {} was not found in the bundle or was misplaced'.format(pair[1])) + mapping[pair[0]] = image_path + return mapping + + +def expand_path(original_path): + path = os.path.abspath(os.path.expanduser(original_path)) + if not os.path.exists(path): + raise ConfigError('{} does not exist.'.format(path)) + return path + + +def validate_image_bundle(bundle): + if not tarfile.is_tarfile(bundle): + raise ConfigError('Image bundle {} does not appear to be a valid TAR file.'.format(bundle)) + with tarfile.open(bundle) as tar: + try: + tar.getmember('config.txt') + except KeyError: + try: + tar.getmember('./config.txt') + except KeyError: + msg = 'Tarball {} does not appear to be a valid image bundle (did not see config.txt).' + raise ConfigError(msg.format(bundle)) + diff --git a/wlauto/modules/reset.py b/wlauto/modules/reset.py new file mode 100644 index 00000000..31003f33 --- /dev/null +++ b/wlauto/modules/reset.py @@ -0,0 +1,52 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import time + +from wlauto import Module, Parameter +from wlauto.exceptions import DeviceError +from wlauto.utils.netio import KshellConnection + + +class NetioSwitchReset(Module): + + #pylint: disable=E1101 + name = 'netio_switch' + capabilities = ['reset_power'] + + parameters = [ + Parameter('host', default='ippowerbar', + description='IP address or DNS name of the Netio power switch.'), + Parameter('port', kind=int, default=1234, + description='Port on which KSHELL is listening.'), + Parameter('username', default='admin', + description='User name for the administrator on the Netio.'), + Parameter('password', default='admin', + description='User name for the administrator on the Netio.'), + Parameter('psu', kind=int, default=1, + description='The device port number on the Netio, i.e. which ' + 'PSU port the device is connected to.'), + ] + + def hard_reset(self): + try: + conn = KshellConnection(host=self.host, port=self.port) + conn.login(self.username, self.password) + conn.disable_port(self.psu) + time.sleep(2) + conn.enable_port(self.psu) + conn.close() + except Exception as e: + raise DeviceError('Could not reset power: {}'.format(e)) diff --git a/wlauto/resource_getters/__init__.py b/wlauto/resource_getters/__init__.py new file mode 100644 index 00000000..cd5d64d6 --- /dev/null +++ b/wlauto/resource_getters/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + diff --git a/wlauto/resource_getters/standard.py b/wlauto/resource_getters/standard.py new file mode 100644 index 00000000..4de6d753 --- /dev/null +++ b/wlauto/resource_getters/standard.py @@ -0,0 +1,350 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +""" +This module contains the standard set of resource getters used by Workload Automation. + +""" +import os +import sys +import glob +import shutil +import inspect + +from wlauto import ResourceGetter, GetterPriority, Parameter, NO_ONE, settings, __file__ as __base_filepath +from wlauto.exceptions import ResourceError +from wlauto.utils.misc import ensure_directory_exists as _d +from wlauto.utils.types import boolean + + +class PackageFileGetter(ResourceGetter): + + name = 'package_file' + description = """ + Looks for exactly one file with the specified extension in the owner's directory. If a version + is specified on invocation of get, it will filter the discovered file based on that version. + Versions are treated as case-insensitive. + """ + + extension = None + + def register(self): + self.resolver.register(self, self.extension, GetterPriority.package) + + def get(self, resource, **kwargs): + resource_dir = os.path.dirname(sys.modules[resource.owner.__module__].__file__) + version = kwargs.get('version') + return get_from_location_by_extension(resource, resource_dir, self.extension, version) + + +class EnvironmentFileGetter(ResourceGetter): + + name = 'environment_file' + description = """Looks for exactly one file with the specified extension in the owner's directory. If a version + is specified on invocation of get, it will filter the discovered file based on that version. + Versions are treated as case-insensitive.""" + + extension = None + + def register(self): + self.resolver.register(self, self.extension, GetterPriority.environment) + + def get(self, resource, **kwargs): + resource_dir = resource.owner.dependencies_directory + version = kwargs.get('version') + return get_from_location_by_extension(resource, resource_dir, self.extension, version) + + +class ReventGetter(ResourceGetter): + """Implements logic for identifying revent files.""" + + def get_base_location(self, resource): + raise NotImplementedError() + + def register(self): + self.resolver.register(self, 'revent', GetterPriority.package) + + def get(self, resource, **kwargs): + filename = '.'.join([resource.owner.device.name, resource.stage, 'revent']).lower() + location = _d(os.path.join(self.get_base_location(resource), 'revent_files')) + for candidate in os.listdir(location): + if candidate.lower() == filename.lower(): + return os.path.join(location, candidate) + + +class PackageApkGetter(PackageFileGetter): + name = 'package_apk' + extension = 'apk' + + +class PackageJarGetter(PackageFileGetter): + name = 'package_jar' + extension = 'jar' + + +class PackageReventGetter(ReventGetter): + + name = 'package_revent' + + def get_base_location(self, resource): + return _get_owner_path(resource) + + +class EnvironmentApkGetter(EnvironmentFileGetter): + name = 'environment_apk' + extension = 'apk' + + +class EnvironmentJarGetter(EnvironmentFileGetter): + name = 'environment_jar' + extension = 'jar' + + +class EnvironmentReventGetter(ReventGetter): + + name = 'enviroment_revent' + + def get_base_location(self, resource): + return resource.owner.dependencies_directory + + +class ExecutableGetter(ResourceGetter): + + name = 'exe_getter' + resource_type = 'executable' + priority = GetterPriority.environment + + def get(self, resource, **kwargs): + if settings.binaries_repository: + path = os.path.join(settings.binaries_repository, resource.platform, resource.filename) + if os.path.isfile(path): + return path + + +class PackageExecutableGetter(ExecutableGetter): + + name = 'package_exe_getter' + priority = GetterPriority.package + + def get(self, resource, **kwargs): + path = os.path.join(_get_owner_path(resource), 'bin', resource.platform, resource.filename) + if os.path.isfile(path): + return path + + +class EnvironmentExecutableGetter(ExecutableGetter): + + name = 'env_exe_getter' + + def get(self, resource, **kwargs): + path = os.path.join(settings.environment_root, 'bin', resource.platform, resource.filename) + if os.path.isfile(path): + return path + + +class DependencyFileGetter(ResourceGetter): + + name = 'filer' + description = """ + Gets resources from the specified mount point. Copies them the local dependencies + directory, and returns the path to the local copy. + + """ + resource_type = 'file' + relative_path = '' # May be overridden by subclasses. + + default_mount_point = '/' + priority = GetterPriority.remote + + parameters = [ + Parameter('mount_point', default='/', global_alias='filer_mount_point', + description='Local mount point for the remote filer.'), + ] + + def __init__(self, resolver, **kwargs): + super(DependencyFileGetter, self).__init__(resolver, **kwargs) + self.mount_point = settings.filer_mount_point or self.default_mount_point + + def get(self, resource, **kwargs): + force = kwargs.get('force') + remote_path = os.path.join(self.mount_point, self.relative_path, resource.path) + local_path = os.path.join(resource.owner.dependencies_directory, os.path.basename(resource.path)) + + if not os.path.isfile(local_path) or force: + if not os.path.isfile(remote_path): + return None + self.logger.debug('Copying {} to {}'.format(remote_path, local_path)) + shutil.copy(remote_path, local_path) + + return local_path + + +class PackageCommonDependencyGetter(ResourceGetter): + + name = 'packaged_common_dependency' + resource_type = 'file' + priority = GetterPriority.package - 1 # check after owner-specific locations + + def get(self, resource, **kwargs): + path = os.path.join(settings.package_directory, 'common', resource.path) + if os.path.exists(path): + return path + + +class EnvironmentCommonDependencyGetter(ResourceGetter): + + name = 'environment_common_dependency' + resource_type = 'file' + priority = GetterPriority.environment - 1 # check after owner-specific locations + + def get(self, resource, **kwargs): + path = os.path.join(settings.dependencies_directory, + os.path.basename(resource.path)) + if os.path.exists(path): + return path + + +class PackageDependencyGetter(ResourceGetter): + + name = 'packaged_dependency' + resource_type = 'file' + priority = GetterPriority.package + + def get(self, resource, **kwargs): + owner_path = inspect.getfile(resource.owner.__class__) + path = os.path.join(os.path.dirname(owner_path), resource.path) + if os.path.exists(path): + return path + + +class EnvironmentDependencyGetter(ResourceGetter): + + name = 'environment_dependency' + resource_type = 'file' + priority = GetterPriority.environment + + def get(self, resource, **kwargs): + path = os.path.join(resource.owner.dependencies_directory, os.path.basename(resource.path)) + if os.path.exists(path): + return path + + +class ExtensionAssetGetter(DependencyFileGetter): + + name = 'extension_asset' + resource_type = 'extension_asset' + relative_path = 'workload_automation/assets' + + +class RemoteFilerGetter(ResourceGetter): + + name = 'filer_assets' + description = """ + Finds resources on a (locally mounted) remote filer and caches them locally. + + This assumes that the filer is mounted on the local machine (e.g. as a samba share). + + """ + priority = GetterPriority.remote + resource_type = ['apk', 'file', 'jar', 'revent'] + + parameters = [ + Parameter('remote_path', global_alias='remote_assets_path', default='', + description="""Path, on the local system, where the assets are located."""), + Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets', + description="""If ``True``, will always attempt to fetch assets from the remote, even if + a local cached copy is available."""), + ] + + def get(self, resource, **kwargs): + version = kwargs.get('version') + if resource.owner: + remote_path = os.path.join(self.remote_path, resource.owner.name) + local_path = os.path.join(settings.environment_root, resource.owner.dependencies_directory) + return self.try_get_resource(resource, version, remote_path, local_path) + else: + result = None + for entry in os.listdir(remote_path): + remote_path = os.path.join(self.remote_path, entry) + local_path = os.path.join(settings.environment_root, settings.dependencies_directory, entry) + result = self.try_get_resource(resource, version, remote_path, local_path) + if result: + break + return result + + def try_get_resource(self, resource, version, remote_path, local_path): + if not self.always_fetch: + result = self.get_from(resource, version, local_path) + if result: + return result + if remote_path: + # Didn't find it cached locally; now check the remoted + result = self.get_from(resource, version, remote_path) + if not result: + return result + else: # remote path is not set + return None + # Found it remotely, cache locally, then return it + local_full_path = os.path.join(_d(local_path), os.path.basename(result)) + self.logger.debug('cp {} {}'.format(result, local_full_path)) + shutil.copy(result, local_full_path) + return local_full_path + + def get_from(self, resource, version, location): # pylint: disable=no-self-use + if resource.name in ['apk', 'jar']: + return get_from_location_by_extension(resource, location, resource.name, version) + elif resource.name == 'file': + filepath = os.path.join(location, resource.path) + if os.path.exists(filepath): + return filepath + elif resource.name == 'revent': + filename = '.'.join([resource.owner.device.name, resource.stage, 'revent']).lower() + alternate_location = os.path.join(location, 'revent_files') + # There tends to be some confusion as to where revent files should + # be placed. This looks both in the extension's directory, and in + # 'revent_files' subdirectory under it, if it exists. + if os.path.isdir(alternate_location): + for candidate in os.listdir(alternate_location): + if candidate.lower() == filename.lower(): + return os.path.join(alternate_location, candidate) + for candidate in os.listdir(location): + if candidate.lower() == filename.lower(): + return os.path.join(location, candidate) + else: + raise ValueError('Unexpected resource type: {}'.format(resource.name)) + + +# Utility functions + +def get_from_location_by_extension(resource, location, extension, version=None): + found_files = glob.glob(os.path.join(location, '*.{}'.format(extension))) + if version: + found_files = [ff for ff in found_files if version.lower() in os.path.basename(ff).lower()] + if len(found_files) == 1: + return found_files[0] + elif not found_files: + return None + else: + raise ResourceError('More than one .{} found in {} for {}.'.format(extension, + location, + resource.owner.name)) + + +def _get_owner_path(resource): + if resource.owner is NO_ONE: + return os.path.join(os.path.dirname(__base_filepath), 'common') + else: + return os.path.dirname(sys.modules[resource.owner.__module__].__file__) diff --git a/wlauto/result_processors/__init__.py b/wlauto/result_processors/__init__.py new file mode 100644 index 00000000..cd5d64d6 --- /dev/null +++ b/wlauto/result_processors/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + diff --git a/wlauto/result_processors/dvfs.py b/wlauto/result_processors/dvfs.py new file mode 100644 index 00000000..b5a865e0 --- /dev/null +++ b/wlauto/result_processors/dvfs.py @@ -0,0 +1,375 @@ +# Copyright 2013-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import csv +import re + +from wlauto import ResultProcessor, settings, instrumentation +from wlauto.exceptions import ConfigError, ResultProcessorError + + +class DVFS(ResultProcessor): + name = 'dvfs' + description = """ + Reports DVFS state residency data form ftrace power events. + + This generates a ``dvfs.csv`` in the top-level results directory that, + for each workload iteration, reports the percentage of time each CPU core + spent in each of the DVFS frequency states (P-states), as well as percentage + of the time spent in idle, during the execution of the workload. + + .. note:: ``trace-cmd`` instrument *MUST* be enabled in the instrumentation, + and at least ``'power*'`` events must be enabled. + + + """ + + def __init__(self, **kwargs): + super(DVFS, self).__init__(**kwargs) + self.device = None + self.infile = None + self.outfile = None + self.current_cluster = None + self.currentstates_of_clusters = [] + self.current_frequency_of_clusters = [] + self.timestamp = [] + self.state_time_map = {} # hold state at timestamp + self.cpuid_time_map = {} # hold cpuid at timestamp + self.cpu_freq_time_spent = {} + self.cpuids_of_clusters = [] + self.power_state = [0, 1, 2, 3] + self.UNKNOWNSTATE = 4294967295 + self.multiply_factor = None + self.corename_of_clusters = [] + self.numberofcores_in_cluster = [] + self.minimum_frequency_cluster = [] + self.idlestate_description = {} + + def validate(self): + if not instrumentation.instrument_is_installed('trace-cmd'): + raise ConfigError('"dvfs" works only if "trace_cmd" in enabled in instrumentation') + + def initialize(self, context): # pylint: disable=R0912 + self.device = context.device + if not self.device.core_names: + message = 'Device does not specify its core types (core_names/core_clusters not set in device_config).' + raise ResultProcessorError(message) + number_of_clusters = max(self.device.core_clusters) + 1 + # In IKS devices, actual number of cores is double + # from what we get from device.number_of_cores + if self.device.scheduler == 'iks': + self.multiply_factor = 2 + elif self.device.scheduler == 'unknown': + # Device doesn't specify its scheduler type. It could be IKS, in + # which case reporeted values would be wrong, so error out. + message = ('The Device doesn not specify it\'s scheduler type. If you are ' + 'using a generic device interface, please make sure to set the ' + '"scheduler" parameter in the device config.') + raise ResultProcessorError(message) + else: + self.multiply_factor = 1 + # separate out the cores in each cluster + # It is list of list of cores in cluster + listof_cores_clusters = [] + for cluster in range(number_of_clusters): + listof_cores_clusters.append([core for core in self.device.core_clusters if core == cluster]) + # Extract minimum frequency of each cluster and + # the idle power state with its descriptive name + # + total_cores = 0 + current_cores = 0 + for cluster, cores_list in enumerate(listof_cores_clusters): + self.corename_of_clusters.append(self.device.core_names[total_cores]) + if self.device.scheduler != 'iks': + self.idlestate_description.update(self.device.get_cpuidle_states(total_cores)) + else: + self.idlestate_description.update(self.device.get_cpuidle_states()) + total_cores += len(cores_list) + self.numberofcores_in_cluster.append(len(cores_list)) + for i in range(current_cores, total_cores): + if i in self.device.active_cpus: + self.minimum_frequency_cluster.append(int(self.device.get_cpu_min_frequency("cpu{}".format(i)))) + break + current_cores = total_cores + length_frequency_cluster = len(self.minimum_frequency_cluster) + if length_frequency_cluster != number_of_clusters: + diff = number_of_clusters - length_frequency_cluster + offline_value = -1 + for i in range(diff): + if self.device.scheduler != 'iks': + self.minimum_frequency_cluster.append(offline_value) + else: + self.minimum_frequency_cluster.append(self.device.iks_switch_frequency) + + def process_iteration_result(self, result, context): + """ + Parse the trace.txt for each iteration, calculate DVFS residency state/frequencies + and dump the result in csv and flush the data for next iteration. + """ + self.infile = os.path.join(context.output_directory, 'trace.txt') + if os.path.isfile(self.infile): + self.logger.debug('Running result_processor "dvfs"') + self.outfile = os.path.join(settings.output_directory, 'dvfs.csv') + self.flush_parse_initialize() + self.calculate() + self.percentage() + self.generate_csv(context) + self.logger.debug('Completed result_processor "dvfs"') + else: + self.logger.debug('trace.txt not found.') + + def flush_parse_initialize(self): + """ + Store state, cpu_id for each timestamp from trace.txt and flush all the values for + next iterations. + """ + self.current_cluster = 0 + self.current_frequency_of_clusters = [] + self.timestamp = [] + self.currentstates_of_clusters = [] + self.state_time_map = {} + self.cpuid_time_map = {} + self.cpu_freq_time_spent = {} + self.cpuids_of_clusters = [] + self.parse() # Parse trace.txt generated from trace-cmd instrumentation + # Initialize the states of each core of clusters and frequency of + # each clusters with its minimum freq + # cpu_id is assigned for each of clusters. + # For IKS devices cpuid remains same in other clusters + # and for other it will increment by 1 + count = 0 + for cluster, cores_number in enumerate(self.numberofcores_in_cluster): + self.currentstates_of_clusters.append([-1 for dummy in range(cores_number)]) + self.current_frequency_of_clusters.append(self.minimum_frequency_cluster[cluster]) + if self.device.scheduler == 'iks': + self.cpuids_of_clusters.append([j for j in range(cores_number)]) + else: + self.cpuids_of_clusters.append(range(count, count + cores_number)) + count += cores_number + + # Initialize the time spent in each state/frequency for each core. + for i in range(self.device.number_of_cores * self.multiply_factor): + self.cpu_freq_time_spent["cpu{}".format(i)] = {} + for j in self.unique_freq(): + self.cpu_freq_time_spent["cpu{}".format(i)][j] = 0 + # To determine offline -1 state is added + offline_value = -1 + self.cpu_freq_time_spent["cpu{}".format(i)][offline_value] = 0 + if 0 not in self.unique_freq(): + self.cpu_freq_time_spent["cpu{}".format(i)][0] = 0 + + def update_cluster_freq(self, state, cpu_id): + """ Update the cluster frequency and current cluster""" + # For IKS devices cluster changes only possible when + # freq changes, for other it is determine by cpu_id. + if self.device.scheduler != 'iks': + self.current_cluster = self.get_cluster(cpu_id, state) + if self.get_state_name(state) == "freqstate": + self.current_cluster = self.get_cluster(cpu_id, state) + self.current_frequency_of_clusters[self.current_cluster] = state + + def get_cluster(self, cpu_id, state): + # For IKS if current state is greater than switch + # freq then it is in cluster2 else cluster1 + # For other, Look the current cpu_id and check this id + # belong to which cluster. + if self.device.scheduler == 'iks': + return 1 if state >= self.device.iks_switch_frequency else 0 + else: + for cluster, cpuids_list in enumerate(self.cpuids_of_clusters): + if cpu_id in cpuids_list: + return cluster + + def get_cluster_freq(self): + return self.current_frequency_of_clusters[self.current_cluster] + + def update_state(self, state, cpu_id): # pylint: disable=R0912 + """ + Update state of each cores in every cluster. + This is done for each timestamp. + """ + POWERDOWN = 2 + offline_value = -1 + # if state is in unknowstate, then change state of current cpu_id + # with cluster freq of current cluster. + # if state is in powerstate then change state with that power state. + if self.get_state_name(state) in ["unknownstate", "powerstate"]: + for i in range(len(self.cpuids_of_clusters[self.current_cluster])): + if cpu_id == self.cpuids_of_clusters[self.current_cluster][i]: + if self.get_state_name(state) == "unknownstate": + self.currentstates_of_clusters[self.current_cluster][i] = self.current_frequency_of_clusters[self.current_cluster] + elif self.get_state_name(state) == "powerstate": + self.currentstates_of_clusters[self.current_cluster][i] = state + # If state is in freqstate then update the state with current state. + # For IKS, if all cores is in power down and current state is freqstate + # then update the all the cores in current cluster to current state + # and other state cluster changed to Power down. + if self.get_state_name(state) == "freqstate": + for i, j in enumerate(self.currentstates_of_clusters[self.current_cluster]): + if j != offline_value: + self.currentstates_of_clusters[self.current_cluster][i] = state + if cpu_id == self.cpuids_of_clusters[self.current_cluster][i]: + self.currentstates_of_clusters[self.current_cluster][i] = state + if self.device.scheduler == 'iks': + check = False # All core in cluster is power down + for i in range(len(self.currentstates_of_clusters[self.current_cluster])): + if self.currentstates_of_clusters[self.current_cluster][i] != POWERDOWN: + check = True + break + if not check: + for i in range(len(self.currentstates_of_clusters[self.current_cluster])): + self.currentstates_of_clusters[self.current_cluster][i] = self.current_frequency_of_clusters[self.current_cluster] + for cluster, state_list in enumerate(self.currentstates_of_clusters): + if cluster != self.current_cluster: + for j in range(len(state_list)): + self.currentstates_of_clusters[i][j] = POWERDOWN + + def unique_freq(self): + """ Determine the unique Frequency and state""" + unique_freq = [] + for i in self.timestamp: + if self.state_time_map[i] not in unique_freq and self.state_time_map[i] != self.UNKNOWNSTATE: + unique_freq.append(self.state_time_map[i]) + for i in self.minimum_frequency_cluster: + if i not in unique_freq: + unique_freq.append(i) + return unique_freq + + def parse(self): + """ + Parse the trace.txt :: + + store timestamp, state, cpu_id + --------------------------------------------------------------------------------- + |timestamp| |state| |cpu_id| + -0 [001] 294.554380: cpu_idle: state=4294967295 cpu_id=1 + -0 [001] 294.554454: power_start: type=1 state=0 cpu_id=1 + -0 [001] 294.554458: cpu_idle: state=0 cpu_id=1 + -0 [001] 294.554464: power_end: cpu_id=1 + -0 [001] 294.554471: cpu_idle: state=4294967295 cpu_id=1 + -0 [001] 294.554590: power_start: type=1 state=0 cpu_id=1 + -0 [001] 294.554593: cpu_idle: state=0 cpu_id=1 + -0 [001] 294.554636: power_end: cpu_id=1 + -0 [001] 294.554639: cpu_idle: state=4294967295 cpu_id=1 + -0 [001] 294.554669: power_start: type=1 state=0 cpu_id=1 + + + """ + pattern = re.compile(r'\s+(?P