mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-04-15 23:30:47 +01:00
Compare commits
367 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
2d14c82f92 | ||
|
8598d1ba3c | ||
|
523fb3f659 | ||
|
0732fa9cf0 | ||
|
b03f28d1d5 | ||
|
f125fd340d | ||
|
75cfb56b38 | ||
|
b734e90de1 | ||
|
5670e571e1 | ||
|
45f09a66be | ||
|
9638a084f9 | ||
|
4da8b0691f | ||
|
412a785068 | ||
|
6fc5340f2f | ||
|
da667b58ac | ||
|
4e9d402c24 | ||
|
e0bf7668b8 | ||
|
4839ab354f | ||
|
b6ecc18763 | ||
|
7315041e90 | ||
|
adbb647fa7 | ||
|
366f59ebf7 | ||
|
0eb17bf8f0 | ||
|
f166ac742e | ||
|
6fe4bce68d | ||
|
28b78a93f1 | ||
|
77ebefba08 | ||
|
41f7984243 | ||
|
23fcb2c120 | ||
|
e38b51b242 | ||
|
ea08a4f9e6 | ||
|
5b56210d5f | ||
|
0179202c90 | ||
|
617306fdda | ||
|
8d4fe9556b | ||
|
775b24f7a3 | ||
|
13f9c64513 | ||
|
6cd1c60715 | ||
|
05eab42f27 | ||
|
b113a8b351 | ||
|
d67d9bd2a4 | ||
|
11374aae3f | ||
|
839242d636 | ||
|
b9b02f83fc | ||
|
6aa1caad94 | ||
|
bf72a576e6 | ||
|
951eec991c | ||
|
0b64b51259 | ||
|
f4ebca39a1 | ||
|
88b085c11b | ||
|
36a909dda2 | ||
|
3228a3187c | ||
|
5e0c59babb | ||
|
dc2fc99e98 | ||
|
46ff6e1f62 | ||
|
8b3f58e726 | ||
|
fe7a88e43e | ||
|
61bb162350 | ||
|
d1e960e9b0 | ||
|
29a5a7fd43 | ||
|
37346fe1b1 | ||
|
40a118c8cd | ||
|
c4535320fa | ||
|
08b87291f8 | ||
|
a3eacb877c | ||
|
48152224a8 | ||
|
095d6bc100 | ||
|
8b94ed972d | ||
|
276f146c1e | ||
|
3b9fcd8001 | ||
|
88fb1de62b | ||
|
7dc337b7d0 | ||
|
b0f9072830 | ||
|
b109acac05 | ||
|
9c7bae3440 | ||
|
7b5ffafbda | ||
|
be02ad649c | ||
|
5a121983fc | ||
|
69795628ed | ||
|
7a332dfd5b | ||
|
4bad433670 | ||
|
0b558e408c | ||
|
c023b9859c | ||
|
284cc60b00 | ||
|
06b508107b | ||
|
cb1107df8f | ||
|
789e150b0a | ||
|
43cb80d854 | ||
|
31d306c23a | ||
|
591c85edec | ||
|
72298ff9ac | ||
|
f08770884a | ||
|
a5e5920aca | ||
|
5558d43ddd | ||
|
c8ea525a00 | ||
|
c4c0230958 | ||
|
b65a371b9d | ||
|
7f0a6da86b | ||
|
75a70ad181 | ||
|
84b5ea8a56 | ||
|
4b54e17020 | ||
|
da4d10d4e7 | ||
|
8882feed84 | ||
|
7f82480a26 | ||
|
e4be2b73ef | ||
|
22750b15c7 | ||
|
e3703f0e1e | ||
|
4ddd610149 | ||
|
c5e3a421b1 | ||
|
0e2a150170 | ||
|
69378b0873 | ||
|
c543c49423 | ||
|
dd07d2ec43 | ||
|
94590e88ee | ||
|
c2725ffaa2 | ||
|
751bbb19fe | ||
|
ae1bc2c031 | ||
|
91b791665a | ||
|
62c4f3837c | ||
|
3c5bece01e | ||
|
cb51ef4d47 | ||
|
8e56a4c831 | ||
|
76032c1d05 | ||
|
4c20fe814a | ||
|
92e253d838 | ||
|
18439e3b31 | ||
|
5cfe452a35 | ||
|
f1aff6b5a8 | ||
|
5dd3abe564 | ||
|
e3ab798f6e | ||
|
ed925938dc | ||
|
ed4eb8af5d | ||
|
a1bdb7de45 | ||
|
fbe9460995 | ||
|
aa4df95a69 | ||
|
fbb84eca72 | ||
|
fbd6f4e90c | ||
|
1c08360263 | ||
|
ff220dfb44 | ||
|
7489b487e1 | ||
|
ba5a65aad7 | ||
|
7bea3a69bb | ||
|
971289698b | ||
|
66e220d444 | ||
|
ae8a7bdfb5 | ||
|
b0355194bc | ||
|
7817308bf7 | ||
|
ab9e29bdae | ||
|
9edb6b20f0 | ||
|
879a491691 | ||
|
7086fa6b48 | ||
|
716e59daf5 | ||
|
08fcc7d30f | ||
|
684121e2e7 | ||
|
0c1229df8c | ||
|
615cbbc94d | ||
|
1425a6f6c9 | ||
|
4557da2f80 | ||
|
7cf5fbd8af | ||
|
3f5a31de96 | ||
|
7c6ebfb49c | ||
|
8640f4f69a | ||
|
460965363f | ||
|
d4057367d8 | ||
|
ef6cffd85a | ||
|
37f4d33015 | ||
|
8c7320a1be | ||
|
6d72a242ce | ||
|
0c2613c608 | ||
|
b8301640f7 | ||
|
c473cfa8fe | ||
|
1f0da5facf | ||
|
39121caf66 | ||
|
83da20ce9f | ||
|
f664a00bdc | ||
|
443358f513 | ||
|
586d95a4f0 | ||
|
58f3ea35ec | ||
|
7fe334b467 | ||
|
3967071a5e | ||
|
cd6f4541ca | ||
|
7e6eb089ab | ||
|
491dcd5b5b | ||
|
7a085e586a | ||
|
0f47002e4e | ||
|
6ff5abdffe | ||
|
82d09612cb | ||
|
ecbfe32b9d | ||
|
2d32d81acb | ||
|
b9d593e578 | ||
|
1f8be77331 | ||
|
66f0edec5b | ||
|
e2489ea3a0 | ||
|
16be8a70f5 | ||
|
dce07e5095 | ||
|
711bff6a60 | ||
|
2a8454db6a | ||
|
9b19f33186 | ||
|
53faf159e8 | ||
|
84a9526dd3 | ||
|
a3cf2e5650 | ||
|
607cff4c54 | ||
|
d56f0fbe20 | ||
|
0f9c20dc69 | ||
|
310bad3966 | ||
|
a8abf24db0 | ||
|
dad0a28b5e | ||
|
2cd4bf7e31 | ||
|
5049e3663b | ||
|
c9ddee761a | ||
|
3be00b296d | ||
|
9a931f42ee | ||
|
06ba8409c1 | ||
|
2da9370920 | ||
|
ef9b4c8919 | ||
|
31f4c0fd5f | ||
|
62ca7c0c36 | ||
|
d0f099700a | ||
|
5f00a94121 | ||
|
0f2de5f951 | ||
|
51ffd60c06 | ||
|
0a4164349b | ||
|
fe50d75858 | ||
|
b93a8cbbd6 | ||
|
79dec810f3 | ||
|
44cead2f76 | ||
|
c6d23ab01f | ||
|
6f9856cf2e | ||
|
0f9331dafe | ||
|
659e60414f | ||
|
796f62d924 | ||
|
f60032a59d | ||
|
977ce4995d | ||
|
a66251dd60 | ||
|
d3adfa1af9 | ||
|
39a294ddbe | ||
|
164095e664 | ||
|
24a4a032db | ||
|
05857ec2bc | ||
|
fd8a7e442c | ||
|
dfb4737e51 | ||
|
06518ad40a | ||
|
009fd831b8 | ||
|
88284750e7 | ||
|
8b337768a3 | ||
|
38aa9d12bd | ||
|
769c883a3a | ||
|
90db655959 | ||
|
817d98ed72 | ||
|
d67668621c | ||
|
1531ddcdef | ||
|
322f9be2d3 | ||
|
494424c8ea | ||
|
ee54a68b65 | ||
|
cc1cc6f77f | ||
|
da0ceab027 | ||
|
683eec2377 | ||
|
07e47de807 | ||
|
5906bca6b3 | ||
|
9556c3a004 | ||
|
1f4bae92bf | ||
|
dcbc00addd | ||
|
4ee75be7ab | ||
|
796dfb1de6 | ||
|
f3e7b14b28 | ||
|
e9839d52c4 | ||
|
7ebbb05934 | ||
|
13166f66d1 | ||
|
ab5d12be72 | ||
|
298bc3a7f3 | ||
|
09d6f4dea1 | ||
|
d7c95fa844 | ||
|
0efd20cf59 | ||
|
e41aa3c967 | ||
|
3bef4fc92d | ||
|
0166180f30 | ||
|
a9f3ee9752 | ||
|
35ce87068c | ||
|
6beac11ee2 | ||
|
2f231b5ce5 | ||
|
75878e2f27 | ||
|
023cb88ab1 | ||
|
d27443deb5 | ||
|
1a15f5c761 | ||
|
d3af4e7515 | ||
|
73b0b0d709 | ||
|
bb18a1a51c | ||
|
062be6d544 | ||
|
c1e095be51 | ||
|
eeebd010b9 | ||
|
e387e3d9b7 | ||
|
6042fa374a | ||
|
050329a5ee | ||
|
d9e7aa9af0 | ||
|
125cd3bb41 | ||
|
75ea78ea4f | ||
|
12bb21045e | ||
|
4bb1f4988f | ||
|
0ff6b4842a | ||
|
98b787e326 | ||
|
e915436661 | ||
|
68e1806c07 | ||
|
f19ebb79ee | ||
|
c950f5ec8f | ||
|
6aaa28781b | ||
|
d87025ad3a | ||
|
ac5819da8e | ||
|
31e08a6477 | ||
|
47769cf28d | ||
|
d8601880ac | ||
|
0efc9b9ccd | ||
|
501d3048a5 | ||
|
c4daccd800 | ||
|
db944629f3 | ||
|
564738a2ad | ||
|
c092128e94 | ||
|
463840d2b7 | ||
|
43633ab362 | ||
|
a6f0ab31e4 | ||
|
72fd5b5139 | ||
|
766bb4da1a | ||
|
a5f0521353 | ||
|
3435c36b98 | ||
|
bd252a6471 | ||
|
f46851a3b4 | ||
|
8910234448 | ||
|
1108c5701e | ||
|
f5d1a9e94a | ||
|
959106d61b | ||
|
0aea3abcaf | ||
|
24ccc024f8 | ||
|
42ab811032 | ||
|
832ed797e1 | ||
|
31b44e447e | ||
|
179b2e2264 | ||
|
22437359b6 | ||
|
2347c8c007 | ||
|
52a0a79012 | ||
|
60693e1b65 | ||
|
8ddf16dfea | ||
|
9aec4850c2 | ||
|
bdaa26d772 | ||
|
d7aedae69c | ||
|
45af8c69b8 | ||
|
e398083f6e | ||
|
4ce41407e9 | ||
|
aa0564e8f3 | ||
|
83f826d6fe | ||
|
1599b59770 | ||
|
8cd9862e32 | ||
|
b4ea2798dd | ||
|
76e6f14212 | ||
|
ce59318e66 | ||
|
5652057adb | ||
|
e9f5577237 | ||
|
ec3d928b3b | ||
|
ee8bab365b | ||
|
e3406bdb74 | ||
|
55d983ecaf | ||
|
f8908e8194 | ||
|
dd44d6fa16 | ||
|
753786a45c | ||
|
8647ceafd8 | ||
|
2c2118ad23 | ||
|
0ec8427d05 | ||
|
cf5c3a2723 | ||
|
8ddc1c1eba |
16
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
16
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help resolve an issue.
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the issue**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**Run Log**
|
||||
Please attach your `run.log` detailing the issue.
|
||||
|
||||
**Other comments (optional)**
|
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is.
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the feature request here.
|
10
.github/ISSUE_TEMPLATE/question---support-.md
vendored
Normal file
10
.github/ISSUE_TEMPLATE/question---support-.md
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
name: 'Question / Support '
|
||||
about: Ask a question or reqeust support
|
||||
title: ''
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**
|
11
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
name: Question
|
||||
about: Ask a question
|
||||
title: ''
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe you query**
|
||||
What would you like to know / what are you trying to achieve?
|
92
.github/workflows/main.yml
vendored
Normal file
92
.github/workflows/main.yml
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
name: WA Test Suite
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
schedule:
|
||||
- cron: 0 2 * * *
|
||||
# Allows runing this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
Run-Linters-and-Tests:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.8.18
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8.18
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .[test]
|
||||
python -m pip install pylint==2.6.2 pep8 flake8 mock nose
|
||||
- name: Run pylint
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE && ./dev_scripts/pylint wa/
|
||||
- name: Run PEP8
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE && ./dev_scripts/pep8 wa
|
||||
- name: Run nose tests
|
||||
run: |
|
||||
nosetests
|
||||
|
||||
Execute-Test-Workload-and-Process:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .
|
||||
- name: Run test workload
|
||||
run: |
|
||||
cd /tmp && wa run $GITHUB_WORKSPACE/tests/ci/idle_agenda.yaml -v -d idle_workload
|
||||
- name: Test Process Command
|
||||
run: |
|
||||
cd /tmp && wa process -f -p csv idle_workload
|
||||
|
||||
Test-WA-Commands:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .
|
||||
- name: Test Show Command
|
||||
run: |
|
||||
wa show dhrystone && wa show generic_android && wa show trace-cmd && wa show csv
|
||||
- name: Test List Command
|
||||
run: |
|
||||
wa list all
|
||||
- name: Test Create Command
|
||||
run: |
|
||||
wa create agenda dhrystone generic_android csv trace_cmd && wa create package test && wa create workload test
|
28
.readthedocs.yml
Normal file
28
.readthedocs.yml
Normal file
@ -0,0 +1,28 @@
|
||||
# .readthedocs.yml
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
# Required
|
||||
version: 2
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
builder: html
|
||||
configuration: doc/source/conf.py
|
||||
|
||||
# Build the docs in additional formats such as PDF and ePub
|
||||
formats: all
|
||||
|
||||
|
||||
# Configure the build environment
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
|
||||
# Ensure doc dependencies are installed before building
|
||||
python:
|
||||
install:
|
||||
- requirements: doc/requirements.txt
|
||||
- method: pip
|
||||
path: .
|
54
.travis.yml
54
.travis.yml
@ -1,54 +0,0 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
language: python
|
||||
|
||||
python:
|
||||
- "3.6"
|
||||
- "2.7"
|
||||
|
||||
install:
|
||||
- pip install nose
|
||||
- pip install nose2
|
||||
- pip install flake8
|
||||
- pip install pylint==1.9.2
|
||||
- git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && cd /tmp/devlib && python setup.py install
|
||||
- cd $TRAVIS_BUILD_DIR && python setup.py install
|
||||
|
||||
env:
|
||||
global:
|
||||
- PYLINT="cd $TRAVIS_BUILD_DIR && ./dev_scripts/pylint wa"
|
||||
- PEP8="cd $TRAVIS_BUILD_DIR && ./dev_scripts/pep8 wa"
|
||||
- NOSETESTS="nose2 -s $TRAVIS_BUILD_DIR/tests"
|
||||
- WORKLOAD="cd /tmp && wa run $TRAVIS_BUILD_DIR/tests/travis/idle_agenda.yaml -v -d idle_workload"
|
||||
- PROCESS_CMD="$WORKLOAD && wa process -f -p csv idle_workload"
|
||||
- SHOW_CMD="wa show dhrystone && wa show generic_android && wa show trace-cmd && wa show csv"
|
||||
- LIST_CMD="wa list all"
|
||||
- CREATE_CMD="wa create agenda dhrystone generic_android csv trace_cmd && wa create package test && wa create workload test"
|
||||
matrix:
|
||||
- TEST=$PYLINT
|
||||
- TEST=$PEP8
|
||||
- TEST=$NOSETESTS
|
||||
- TEST=$WORKLOAD
|
||||
- TEST="$PROCESS_CMD && $SHOW_CMD && $LIST_CMD && $CREATE_CMD"
|
||||
script:
|
||||
- echo $TEST && eval $TEST
|
||||
|
||||
matrix:
|
||||
exclude:
|
||||
- python: "2.7"
|
||||
env: TEST=$PYLINT
|
||||
- python: "2.7"
|
||||
env: TEST=$PEP8
|
@ -18,7 +18,7 @@ workloads, instruments or output processing.
|
||||
Requirements
|
||||
============
|
||||
|
||||
- Python 2.7 or Python 3
|
||||
- Python 3.5+
|
||||
- Linux (should work on other Unixes, but untested)
|
||||
- Latest Android SDK (ANDROID_HOME must be set) for Android devices, or
|
||||
- SSH for Linux devices
|
||||
@ -30,7 +30,11 @@ Installation
|
||||
To install::
|
||||
|
||||
git clone git@github.com:ARM-software/workload-automation.git workload-automation
|
||||
sudo -H pip install ./workload-automation
|
||||
sudo -H python setup [install|develop]
|
||||
|
||||
Note: A `requirements.txt` is included however this is designed to be used as a
|
||||
reference for known working versions rather than as part of a standard
|
||||
installation.
|
||||
|
||||
Please refer to the `installation section <http://workload-automation.readthedocs.io/en/latest/user_information.html#install>`_
|
||||
in the documentation for more details.
|
||||
|
@ -6,7 +6,7 @@ DEFAULT_DIRS=(
|
||||
|
||||
EXCLUDE=wa/tests,wa/framework/target/descriptor.py
|
||||
EXCLUDE_COMMA=
|
||||
IGNORE=E501,E265,E266,W391,E401,E402,E731,W504,W605,F401
|
||||
IGNORE=E501,E265,E266,W391,E401,E402,E731,W503,W605,F401
|
||||
|
||||
if ! hash flake8 2>/dev/null; then
|
||||
echo "flake8 not found in PATH"
|
||||
|
@ -36,6 +36,9 @@ pylint_version=$(python -c 'from pylint.__pkginfo__ import version; print(versio
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
pylint_version=$(python3 -c 'from pylint.__pkginfo__ import version; print(version)' 2>/dev/null)
|
||||
fi
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
pylint_version=$(python3 -c 'from pylint import version; print(version)' 2>/dev/null)
|
||||
fi
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
echo "ERROR: no pylint verison found; is it installed?"
|
||||
exit 1
|
||||
|
@ -32,17 +32,11 @@ def transform(mod):
|
||||
if b'pylint:' in text[0]:
|
||||
msg = 'pylint directive found on the first line of {}; please move to below copyright header'
|
||||
raise RuntimeError(msg.format(mod.name))
|
||||
if sys.version_info[0] == 3:
|
||||
char = chr(text[0][0])
|
||||
else:
|
||||
char = text[0][0]
|
||||
char = chr(text[0][0])
|
||||
if text[0].strip() and char != '#':
|
||||
msg = 'first line of {} is not a comment; is the copyright header missing?'
|
||||
raise RuntimeError(msg.format(mod.name))
|
||||
if sys.version_info[0] == 3:
|
||||
text[0] = '# pylint: disable={}'.format(','.join(errors)).encode('utf-8')
|
||||
else:
|
||||
text[0] = '# pylint: disable={}'.format(','.join(errors))
|
||||
text[0] = '# pylint: disable={}'.format(','.join(errors)).encode('utf-8')
|
||||
mod.file_bytes = b'\n'.join(text)
|
||||
|
||||
# This is what *should* happen, but doesn't work.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2015-2015 ARM Limited
|
||||
# Copyright 2015-2019 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -26,10 +26,11 @@ OUTPUT_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), 'source', 'instr
|
||||
|
||||
|
||||
def generate_instrument_method_map(outfile):
|
||||
signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.iteritems()],
|
||||
signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.items()],
|
||||
headers=['method name', 'signal'], align='<<')
|
||||
priority_table = format_simple_table(zip(CallbackPriority.names, CallbackPriority.values),
|
||||
headers=['decorator', 'priority'], align='<>')
|
||||
decorator_names = map(lambda x: x.replace('high', 'fast').replace('low', 'slow'), CallbackPriority.names)
|
||||
priority_table = format_simple_table(zip(decorator_names, CallbackPriority.names, CallbackPriority.values),
|
||||
headers=['decorator', 'CallbackPriority name', 'CallbackPriority value'], align='<>')
|
||||
with open(OUTPUT_TEMPLATE_FILE) as fh:
|
||||
template = string.Template(fh.read())
|
||||
with open(outfile, 'w') as wfh:
|
||||
@ -37,4 +38,4 @@ def generate_instrument_method_map(outfile):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
generate_instrumentation_method_map(sys.argv[1])
|
||||
generate_instrument_method_map(sys.argv[1])
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2019 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -25,7 +25,12 @@ from wa.utils.doc import (strip_inlined_text, get_rst_from_plugin,
|
||||
get_params_rst, underline, line_break)
|
||||
from wa.utils.misc import capitalize
|
||||
|
||||
GENERATE_FOR_PACKAGES = ['wa.workloads', 'wa.instruments', 'wa.output_processors']
|
||||
GENERATE_FOR_PACKAGES = [
|
||||
'wa.workloads',
|
||||
'wa.instruments',
|
||||
'wa.output_processors',
|
||||
]
|
||||
|
||||
|
||||
def insert_contents_table(title='', depth=1):
|
||||
"""
|
||||
@ -41,6 +46,7 @@ def insert_contents_table(title='', depth=1):
|
||||
|
||||
|
||||
def generate_plugin_documentation(source_dir, outdir, ignore_paths):
|
||||
# pylint: disable=unused-argument
|
||||
pluginloader.clear()
|
||||
pluginloader.update(packages=GENERATE_FOR_PACKAGES)
|
||||
if not os.path.exists(outdir):
|
||||
@ -57,7 +63,7 @@ def generate_plugin_documentation(source_dir, outdir, ignore_paths):
|
||||
exts = pluginloader.list_plugins(ext_type)
|
||||
sorted_exts = iter(sorted(exts, key=lambda x: x.name))
|
||||
try:
|
||||
wfh.write(get_rst_from_plugin(sorted_exts.next()))
|
||||
wfh.write(get_rst_from_plugin(next(sorted_exts)))
|
||||
except StopIteration:
|
||||
return
|
||||
for ext in sorted_exts:
|
||||
@ -73,9 +79,11 @@ def generate_target_documentation(outdir):
|
||||
'juno_linux',
|
||||
'juno_android']
|
||||
|
||||
intro = '\nThis is a list of commonly used targets and their device '\
|
||||
'parameters, to see a complete for a complete reference please use the '\
|
||||
'WA :ref:`list command <list-command>`.\n\n\n'
|
||||
intro = (
|
||||
'\nThis is a list of commonly used targets and their device '
|
||||
'parameters, to see a complete for a complete reference please use the'
|
||||
' WA :ref:`list command <list-command>`.\n\n\n'
|
||||
)
|
||||
|
||||
pluginloader.clear()
|
||||
pluginloader.update(packages=['wa.framework.target.descriptor'])
|
||||
@ -112,7 +120,8 @@ def generate_config_documentation(config, outdir):
|
||||
if not os.path.exists(outdir):
|
||||
os.mkdir(outdir)
|
||||
|
||||
outfile = os.path.join(outdir, '{}.rst'.format('_'.join(config.name.split())))
|
||||
config_name = '_'.join(config.name.split())
|
||||
outfile = os.path.join(outdir, '{}.rst'.format(config_name))
|
||||
with open(outfile, 'w') as wfh:
|
||||
wfh.write(get_params_rst(config.config_points))
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
nose
|
||||
numpy
|
||||
pandas
|
||||
sphinx_rtd_theme>=0.3.1
|
||||
sphinx_rtd_theme==1.0.0
|
||||
sphinx==4.2
|
||||
docutils<0.18
|
||||
devlib @ git+https://github.com/ARM-software/devlib@master
|
||||
|
@ -284,6 +284,13 @@ methods
|
||||
:return: A list of `str` labels of workloads that were part of this run.
|
||||
|
||||
|
||||
.. method:: RunOutput.add_classifier(name, value, overwrite=False)
|
||||
|
||||
Add a classifier to the run as a whole. If a classifier with the specified
|
||||
``name`` already exists, a``ValueError`` will be raised, unless
|
||||
`overwrite=True` is specified.
|
||||
|
||||
|
||||
:class:`RunDatabaseOutput`
|
||||
---------------------------
|
||||
|
||||
@ -315,9 +322,12 @@ methods
|
||||
|
||||
.. method:: RunDatabaseOutput.get_artifact_path(name)
|
||||
|
||||
Returns a `StringIO` object containing the contents of the artifact
|
||||
specified by ``name``. This will only look at the run artifacts; this will
|
||||
not search the artifacts of the individual jobs.
|
||||
If the artifcat is a file this method returns a `StringIO` object containing
|
||||
the contents of the artifact specified by ``name``. If the aritifcat is a
|
||||
directory, the method returns a path to a locally extracted version of the
|
||||
directory which is left to the user to remove after use. This will only look
|
||||
at the run artifacts; this will not search the artifacts of the individual
|
||||
jobs.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:return: A `StringIO` object with the contents of the artifact
|
||||
@ -399,7 +409,7 @@ artifacts, metadata, and configuration. It has the following attributes:
|
||||
methods
|
||||
~~~~~~~
|
||||
|
||||
.. method:: RunOutput.get_artifact(name)
|
||||
.. method:: JobOutput.get_artifact(name)
|
||||
|
||||
Return the :class:`Artifact` specified by ``name`` associated with this job.
|
||||
|
||||
@ -407,7 +417,7 @@ methods
|
||||
:return: The :class:`Artifact` with that name
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
.. method:: RunOutput.get_artifact_path(name)
|
||||
.. method:: JobOutput.get_artifact_path(name)
|
||||
|
||||
Return the path to the file backing the artifact specified by ``name``,
|
||||
associated with this job.
|
||||
@ -416,13 +426,20 @@ methods
|
||||
:return: The path to the artifact
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
.. method:: RunOutput.get_metric(name)
|
||||
.. method:: JobOutput.get_metric(name)
|
||||
|
||||
Return the :class:`Metric` associated with this job with the specified
|
||||
`name`.
|
||||
|
||||
:return: The :class:`Metric` object for the metric with the specified name.
|
||||
|
||||
.. method:: JobOutput.add_classifier(name, value, overwrite=False)
|
||||
|
||||
Add a classifier to the job. The classifier will be propagated to all
|
||||
existing artifacts and metrics, as well as those added afterwards. If a
|
||||
classifier with the specified ``name`` already exists, a ``ValueError`` will
|
||||
be raised, unless `overwrite=True` is specified.
|
||||
|
||||
|
||||
:class:`JobDatabaseOutput`
|
||||
---------------------------
|
||||
@ -452,8 +469,11 @@ methods
|
||||
|
||||
.. method:: JobDatabaseOutput.get_artifact_path(name)
|
||||
|
||||
Returns a ``StringIO`` object containing the contents of the artifact
|
||||
specified by ``name`` associated with this job.
|
||||
If the artifcat is a file this method returns a `StringIO` object containing
|
||||
the contents of the artifact specified by ``name`` associated with this job.
|
||||
If the aritifcat is a directory, the method returns a path to a locally
|
||||
extracted version of the directory which is left to the user to remove after
|
||||
use.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:return: A `StringIO` object with the contents of the artifact
|
||||
@ -602,6 +622,12 @@ The available attributes of the class are as follows:
|
||||
The name of the target class that was uised ot interact with the device
|
||||
during the run E.g. ``"AndroidTarget"``, ``"LinuxTarget"`` etc.
|
||||
|
||||
``modules``
|
||||
A list of names of modules that have been loaded by the target. Modules
|
||||
provide additional functionality, such as access to ``cpufreq`` and which
|
||||
modules are installed may impact how much of the ``TargetInfo`` has been
|
||||
populated.
|
||||
|
||||
``cpus``
|
||||
A list of :class:`CpuInfo` objects describing the capabilities of each CPU.
|
||||
|
||||
|
@ -183,6 +183,11 @@ methods.
|
||||
workload, if a specific apk version is not specified then any available
|
||||
supported version may be chosen.
|
||||
|
||||
``activity``
|
||||
This attribute can be optionally set to override the default activity that
|
||||
will be extracted from the selected APK file which will be used when
|
||||
launching the APK.
|
||||
|
||||
``view``
|
||||
This is the "view" associated with the application. This is used by
|
||||
instruments like ``fps`` to monitor the current framerate being generated by
|
||||
|
@ -2,6 +2,296 @@
|
||||
What's New in Workload Automation
|
||||
=================================
|
||||
|
||||
***********
|
||||
Version 3.3.1
|
||||
***********
|
||||
|
||||
.. warning:: This is the last release supporting Python 3.5 and Python 3.6.
|
||||
Subsequent releases will support Python 3.7+.
|
||||
|
||||
New Features:
|
||||
==============
|
||||
|
||||
Commands:
|
||||
---------
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- ``perf``: Add support for ``report-sample``.
|
||||
|
||||
Workloads:
|
||||
----------------
|
||||
- ``PCMark``: Add support for PCMark 3.0.
|
||||
- ``Antutu``: Add support for 9.1.6.
|
||||
- ``Geekbench``: Add support for Geekbench5.
|
||||
- ``gfxbench``: Support the non corporate version.
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Fix installation on systems without git installed.
|
||||
- Avoid querying online cpus if hotplug is disabled.
|
||||
|
||||
Dockerfile:
|
||||
-----------
|
||||
- Update base image to Ubuntu 20.04.
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- ``perf``: Fix parsing csv with using interval-only-values.
|
||||
- ``perf``: Improve error reporting of an invalid agenda.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``postgres``: Fixed SQL command when creating a new event.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``speedometer``: Fix adb reverse when rebooting a device.
|
||||
- ``googleplaybook``: Support newer apk version.
|
||||
- ``googlephotos``: Support newer apk version.
|
||||
- ``gmail``: Support newer apk version.
|
||||
|
||||
Other:
|
||||
------
|
||||
- Upgrade Android Gradle to 7.2 and Gradle plugin to 4.2.
|
||||
|
||||
***********
|
||||
Version 3.3
|
||||
***********
|
||||
|
||||
New Features:
|
||||
==============
|
||||
|
||||
Commands:
|
||||
---------
|
||||
- Add ``report`` command to provide a summary of a run.
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- Add ``proc_stat`` instrument to monitor CPU load using data from ``/proc/stat``.
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Add support for simulating atomic writes to prevent race conditions when running current instances of WA.
|
||||
- Add support file transfer for SSH connections via SFTP and falling back to using SCP implementation.
|
||||
- Support detection of logcat buffer overflow and present a warning if this occurs.
|
||||
- Allow skipping all remaining jobs if a job had exhausted all of its retires.
|
||||
- Add polling mechanism for file transfers rather than relying on timeouts.
|
||||
- Add `run_completed` reboot policy to enable rebooting a target after a run has been completed.
|
||||
|
||||
|
||||
Android Devices:
|
||||
----------------
|
||||
- Enable configuration of whether to keep the screen on while the device is plugged in.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- Enable the use of cascading deletion in Postgres databases to clean up after deletion of a run entry.
|
||||
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Improvements to the ``process`` command to correctly handle skipped and in process jobs.
|
||||
- Add support for deprecated parameters allowing for a warning to be raised when providing
|
||||
a parameter that will no longer have an effect.
|
||||
- Switch implementation of SSH connections to use Paramiko for greater stability.
|
||||
- By default use sftp for file transfers with SSH connections, allow falling back to scp
|
||||
by setting ``use_scp``.
|
||||
- Fix callbacks not being disconnected correctly when requested.
|
||||
- ``ApkInfo`` objects are now cached to reduce re-parsing of APK files.
|
||||
- Speed up discovery of wa output directories.
|
||||
- Fix merge handling of parameters from multiple files.
|
||||
|
||||
Dockerfile:
|
||||
-----------
|
||||
- Install additional instruments for use in the docker environment.
|
||||
- Fix environment variables not being defined in non interactive environments.
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- ``trace_cmd`` additional fixes for python 3 support.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``postgres``: Fixed SQL command when creating a new event.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``aitutu``: Improve reliability of results extraction.
|
||||
- ``androbench``: Enabling dismissing of additional popups on some devices.
|
||||
- ``antutu``: Now supports major version 8 in additional to version 7.X.
|
||||
- ``exoplayer``: Add support for Android 10.
|
||||
- ``googlephotos``: Support newer apk version.
|
||||
- ``gfxbench``: Allow user configuration for which tests should be ran.
|
||||
- ``gfxbench``: Improved score detection for a wider range of devices.
|
||||
- ``gfxbench``: Moved results extraction out of run stage.
|
||||
- ``jankbench``: Support newer versions of Pandas for processing.
|
||||
- ``pcmark``: Add support for handling additional popups and installation flows.
|
||||
- ``pcmark``: No longer clear and re-download test data before each execution.
|
||||
- ``speedometer``: Enable the workload to run offline and drops requirement for
|
||||
UiAutomator. To support this root access is now required to run the workload.
|
||||
- ``youtube``: Update to support later versions of the apk.
|
||||
|
||||
Other:
|
||||
------
|
||||
- ``cpustates``: Improved name handling for unknown idle states.
|
||||
|
||||
|
||||
***********
|
||||
Version 3.2
|
||||
***********
|
||||
|
||||
.. warning:: This release only supports Python 3.5+. Python 2 support has now
|
||||
been dropped.
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- ``TargetInfo`` now tracks installed modules and will ensure the cache is
|
||||
also updated on module change.
|
||||
- Migrated the build scripts for uiauto based workloads to Python 3.
|
||||
- Uiauto applications now target SDK version 28 to prevent PlayProtect
|
||||
blocking the installation of the automation apks on some devices.
|
||||
- The workload metadata now includes the apk package name if applicable.
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- ``energy_instruments`` will now have their ``teardown`` method called
|
||||
correctly.
|
||||
- ``energy_instruments``: Added a ``keep_raw`` parameter to control whether
|
||||
raw files generated during execution should be deleted upon teardown.
|
||||
- Update relevant instruments to make use of the new devlib collector
|
||||
interface, for more information please see the
|
||||
`devlib documentation <https://devlib.readthedocs.io/en/latest/collectors.html>`_.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``postgres``: If initialisation fails then the output processor will no
|
||||
longer attempt to reconnect at a later point during the run.
|
||||
- ``postgres``: Will now ensure that the connection to the database is
|
||||
re-established if it is dropped e.g. due to a long expecting workload.
|
||||
- ``postgres``: Change the type of the ``hostid`` field to ``Bigint`` to
|
||||
allow a larger range of ids.
|
||||
- ``postgres``: Bump schema version to 1.5.
|
||||
- ``perf``: Added support for the ``simpleperf`` profiling tool for android
|
||||
devices.
|
||||
- ``perf``: Added support for the perf ``record`` command.
|
||||
- ``cpustates``: Improve handling of situations where cpufreq and/or cpuinfo
|
||||
data is unavailable.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``adodereader``: Now support apk version 19.7.1.10709.
|
||||
- ``antutu``: Supports dismissing of popup asking to create a shortcut on
|
||||
the homescreen.
|
||||
- ``gmail``: Now supports apk version 2019.05.26.252424914.
|
||||
- ``googlemaps``: Now supports apk version 10.19.1.
|
||||
- ``googlephotos``: Now supports apk version 4.28.0.
|
||||
- ``geekbench``: Added support for versions 4.3.4, 4.4.0 and 4.4.2.
|
||||
- ``geekbench-corporate``: Added support for versions 5.0.1 and 5.0.3.
|
||||
- ``pcmark``: Now locks device orientation to portrait to increase
|
||||
compatibility.
|
||||
- ``pcmark``: Supports dismissing new Android 10 permission warnings.
|
||||
|
||||
Other:
|
||||
------
|
||||
- Improve documentation to help debugging module installation errors.
|
||||
|
||||
*************
|
||||
Version 3.1.4
|
||||
*************
|
||||
|
||||
.. warning:: This is the last release that supports Python 2. Subsequent versions
|
||||
will be support Python 3.5+ only.
|
||||
|
||||
New Features:
|
||||
==============
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- ``ApkWorkload``: Allow specifying A maximum and minimum version of an APK
|
||||
instead of requiring a specific version.
|
||||
- ``TestPackageHandler``: Added to support running android applications that
|
||||
are invoked via ``am instrument``.
|
||||
- Directories can now be added as ``Artifacts``.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``aitutu``: Executes the Aitutu Image Speed/Accuracy and Object
|
||||
Speed/Accuracy tests.
|
||||
- ``uibench``: Run a configurable activity of the UIBench workload suite.
|
||||
- ``uibenchjanktests``: Run an automated and instrument version of the
|
||||
UIBench JankTests.
|
||||
- ``motionmark``: Run a browser graphical benchmark.
|
||||
|
||||
Other:
|
||||
------
|
||||
- Added ``requirements.txt`` as a reference for known working package versions.
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- ``JobOuput``: Added an ``augmentation`` attribute to allow listing of
|
||||
enabled augmentations for individual jobs.
|
||||
- Better error handling for misconfiguration job selection.
|
||||
- All ``Workload`` classes now have an ``uninstall`` parameter to control whether
|
||||
any binaries installed to the target should be uninstalled again once the
|
||||
run has completed.
|
||||
- The ``cleanup_assets`` parameter is now more consistently utilized across
|
||||
workloads.
|
||||
- ``ApkWorkload``: Added an ``activity`` attribute to allow for overriding the
|
||||
automatically detected version from the APK.
|
||||
- ``ApkWorkload`` Added support for providing an implicit activity path.
|
||||
- Fixed retrieving job level artifacts from a database backend.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``SysfsExtractor``: Ensure that the extracted directories are added as
|
||||
``Artifacts``.
|
||||
- ``InterruptStatsInstrument``: Ensure that the output files are added as
|
||||
``Artifacts``.
|
||||
- ``Postgres``: Fix missing ``system_id`` field from ``TargetInfo``.
|
||||
- ``Postgres``: Support uploading directory ``Artifacts``.
|
||||
- ``Postgres``: Bump the schema version to v1.3.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``geekbench``: Improved apk version handling.
|
||||
- ``geekbench``: Now supports apk version 4.3.2.
|
||||
|
||||
Other:
|
||||
------
|
||||
- ``Dockerfile``: Now installs all optional extras for use with WA.
|
||||
- Fixed support for YAML anchors.
|
||||
- Fixed building of documentation with Python 3.
|
||||
- Changed shorthand of installing all of WA extras to `all` as per
|
||||
the documentation.
|
||||
- Upgraded the Dockerfile to use Ubuntu 18.10 and Python 3.
|
||||
- Restricted maximum versions of ``numpy`` and ``pandas`` for Python 2.7.
|
||||
|
||||
|
||||
*************
|
||||
Version 3.1.3
|
||||
*************
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Other:
|
||||
------
|
||||
- Security update for PyYAML to attempt prevention of arbitrary code execution
|
||||
during parsing.
|
||||
|
||||
*************
|
||||
Version 3.1.2
|
||||
*************
|
||||
|
@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 ARM Limited
|
||||
# Copyright 2023 ARM Limited
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
@ -68,7 +68,7 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'wa'
|
||||
copyright = u'2018, ARM Limited'
|
||||
copyright = u'2023, ARM Limited'
|
||||
author = u'ARM Limited'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
|
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 74 KiB |
@ -47,6 +47,10 @@ submitting a pull request:
|
||||
- If significant additions have been made to the framework, unit
|
||||
tests should be added to cover the new functionality.
|
||||
|
||||
- If modifications have been made to the UI Automation source of a workload, the
|
||||
corresponding APK should be rebuilt and submitted as part of the same pull
|
||||
request. This can be done via the ``build.sh`` script in the relevant
|
||||
``uiauto`` subdirectory.
|
||||
- If modifications have been made to documentation (this includes description
|
||||
attributes for Parameters and Extensions), documentation should be built to
|
||||
make sure no errors or warning during build process, and a visual inspection
|
||||
|
@ -37,8 +37,8 @@ This section contains reference information common to plugins of all types.
|
||||
The Context
|
||||
~~~~~~~~~~~
|
||||
|
||||
.. note:: For clarification on the meaning of "workload specification" ("spec"), "job"
|
||||
and "workload" and the distiction between them, please see the :ref:`glossary <glossary>`.
|
||||
.. note:: For clarification on the meaning of "workload specification" "spec", "job"
|
||||
and "workload" and the distinction between them, please see the :ref:`glossary <glossary>`.
|
||||
|
||||
The majority of methods in plugins accept a context argument. This is an
|
||||
instance of :class:`wa.framework.execution.ExecutionContext`. It contains
|
||||
@ -119,7 +119,7 @@ context.output_directory
|
||||
This is the output directory for the current iteration. This will an
|
||||
iteration-specific subdirectory under the main results location. If
|
||||
there is no current iteration (e.g. when processing overall run results)
|
||||
this will point to the same location as ``root_output_directory``.
|
||||
this will point to the same location as ``run_output_directory``.
|
||||
|
||||
|
||||
Additionally, the global ``wa.settings`` object exposes on other location:
|
||||
@ -158,7 +158,7 @@ irrespective of the host's path notation. For example:
|
||||
.. note:: Output processors, unlike workloads and instruments, do not have their
|
||||
own target attribute as they are designed to be able to be run offline.
|
||||
|
||||
.. _plugin-parmeters:
|
||||
.. _plugin-parameters:
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~~
|
||||
|
@ -5,10 +5,12 @@ Convention for Naming revent Files for Revent Workloads
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
There is a convention for naming revent files which you should follow if you
|
||||
want to record your own revent files. Each revent file must start with the
|
||||
device name(case sensitive) then followed by a dot '.' then the stage name
|
||||
then '.revent'. All your custom revent files should reside at
|
||||
``'~/.workload_automation/dependencies/WORKLOAD NAME/'``. These are the current
|
||||
want to record your own revent files. Each revent file must be called (case sensitive)
|
||||
``<device name>.<stage>.revent``,
|
||||
where ``<device name>`` is the name of your device (as defined by the model
|
||||
name of your device which can be retrieved with
|
||||
``adb shell getprop ro.product.model`` or by the ``name`` attribute of your
|
||||
customized device class), and ``<stage>`` is one of the following currently
|
||||
supported stages:
|
||||
|
||||
:setup: This stage is where the application is loaded (if present). It is
|
||||
@ -26,10 +28,12 @@ Only the run stage is mandatory, the remaining stages will be replayed if a
|
||||
recording is present otherwise no actions will be performed for that particular
|
||||
stage.
|
||||
|
||||
For instance, to add a custom revent files for a device named "mydevice" and
|
||||
a workload name "myworkload", you need to add the revent files to the directory
|
||||
``/home/$WA_USER_HOME/dependencies/myworkload/revent_files`` creating it if
|
||||
necessary. ::
|
||||
All your custom revent files should reside at
|
||||
``'$WA_USER_DIRECTORY/dependencies/WORKLOAD NAME/'``. So
|
||||
typically to add a custom revent files for a device named "mydevice" and a
|
||||
workload name "myworkload", you would need to add the revent files to the
|
||||
directory ``~/.workload_automation/dependencies/myworkload/revent_files``
|
||||
creating the directory structure if necessary. ::
|
||||
|
||||
mydevice.setup.revent
|
||||
mydevice.run.revent
|
||||
@ -332,6 +336,6 @@ recordings in scripts. Here is an example:
|
||||
from wa.utils.revent import ReventRecording
|
||||
|
||||
with ReventRecording('/path/to/recording.revent') as recording:
|
||||
print "Recording: {}".format(recording.filepath)
|
||||
print "There are {} input events".format(recording.num_events)
|
||||
print "Over a total of {} seconds".format(recording.duration)
|
||||
print("Recording: {}".format(recording.filepath))
|
||||
print("There are {} input events".format(recording.num_events))
|
||||
print("Over a total of {} seconds".format(recording.duration))
|
||||
|
@ -58,22 +58,28 @@ will automatically generate a workload in the your ``WA_CONFIG_DIR/plugins``. If
|
||||
you wish to specify a custom location this can be provided with ``-p
|
||||
<path>``
|
||||
|
||||
A typical invocation of the :ref:`create <create-command>` command would be in
|
||||
the form::
|
||||
|
||||
wa create workload -k <workload_kind> <workload_name>
|
||||
|
||||
|
||||
.. _adding-a-basic-workload-example:
|
||||
|
||||
Adding a Basic Workload
|
||||
-----------------------
|
||||
|
||||
To add a basic workload you can simply use the command::
|
||||
To add a ``basic`` workload template for our example workload we can simply use the
|
||||
command::
|
||||
|
||||
wa create workload basic
|
||||
wa create workload -k basic ziptest
|
||||
|
||||
This will generate a very basic workload with dummy methods for the workload
|
||||
interface and it is left to the developer to add any required functionality to
|
||||
the workload.
|
||||
This will generate a very basic workload with dummy methods for the each method in
|
||||
the workload interface and it is left to the developer to add any required functionality.
|
||||
|
||||
Not all the methods are required to be implemented, this example shows how a
|
||||
subset might be used to implement a simple workload that times how long it takes
|
||||
to compress a file of a particular size on the device.
|
||||
Not all the methods from the interface are required to be implemented, this
|
||||
example shows how a subset might be used to implement a simple workload that
|
||||
times how long it takes to compress a file of a particular size on the device.
|
||||
|
||||
|
||||
.. note:: This is intended as an example of how to implement the Workload
|
||||
@ -87,14 +93,15 @@ in this example we are implementing a very simple workload and do not
|
||||
require any additional feature so shall inherit directly from the the base
|
||||
:class:`Workload` class. We then need to provide a ``name`` for our workload
|
||||
which is what will be used to identify your workload for example in an
|
||||
agenda or via the show command.
|
||||
agenda or via the show command, if you used the `create` command this will
|
||||
already be populated for you.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import os
|
||||
from wa import Workload, Parameter
|
||||
|
||||
class ZipTestWorkload(Workload):
|
||||
class ZipTest(Workload):
|
||||
|
||||
name = 'ziptest'
|
||||
|
||||
@ -113,7 +120,7 @@ separated by a new line.
|
||||
'''
|
||||
|
||||
In order to allow for additional configuration of the workload from a user a
|
||||
list of :ref:`parameters <plugin-parmeters>` can be supplied. These can be
|
||||
list of :ref:`parameters <plugin-parameters>` can be supplied. These can be
|
||||
configured in a variety of different ways. For example here we are ensuring that
|
||||
the value of the parameter is an integer and larger than 0 using the ``kind``
|
||||
and ``constraint`` options, also if no value is provided we are providing a
|
||||
@ -176,7 +183,7 @@ allow it to decide whether to keep the file or not.
|
||||
# Pull the results file to the host
|
||||
self.host_outfile = os.path.join(context.output_directory, 'timing_results')
|
||||
self.target.pull(self.target_outfile, self.host_outfile)
|
||||
context.add_artifact('ziptest-results', host_output_file, kind='raw')
|
||||
context.add_artifact('ziptest-results', self.host_outfile, kind='raw')
|
||||
|
||||
The ``update_output`` method we can do any generation of metrics that we wish to
|
||||
for our workload. In this case we are going to simply convert the times reported
|
||||
@ -252,7 +259,7 @@ The full implementation of this workload would look something like:
|
||||
# Pull the results file to the host
|
||||
self.host_outfile = os.path.join(context.output_directory, 'timing_results')
|
||||
self.target.pull(self.target_outfile, self.host_outfile)
|
||||
context.add_artifact('ziptest-results', host_output_file, kind='raw')
|
||||
context.add_artifact('ziptest-results', self.host_outfile, kind='raw')
|
||||
|
||||
def update_output(self, context):
|
||||
super(ZipTestWorkload, self).update_output(context)
|
||||
@ -485,9 +492,10 @@ Adding an Instrument
|
||||
====================
|
||||
This is an example of how we would create a instrument which will trace device
|
||||
errors using a custom "trace" binary file. For more detailed information please see the
|
||||
:ref:`Instrument Reference <instrument-reference>`. The first thing to do is to subclass
|
||||
:class:`Instrument`, overwrite the variable name with what we want our instrument
|
||||
to be called and locate our binary for our instrument.
|
||||
:ref:`Instrument Reference <instrument-reference>`. The first thing to do is to create
|
||||
a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass
|
||||
:class:`Instrument`. Make sure to overwrite the variable name with what we want our instrument
|
||||
to be called and then locate our binary for the instrument.
|
||||
|
||||
::
|
||||
|
||||
@ -495,8 +503,8 @@ to be called and locate our binary for our instrument.
|
||||
|
||||
name = 'trace-errors'
|
||||
|
||||
def __init__(self, target):
|
||||
super(TraceErrorsInstrument, self).__init__(target)
|
||||
def __init__(self, target, **kwargs):
|
||||
super(TraceErrorsInstrument, self).__init__(target, **kwargs)
|
||||
self.binary_name = 'trace'
|
||||
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
|
||||
self.trace_on_target = None
|
||||
@ -533,21 +541,20 @@ again decorated the method. ::
|
||||
Once we have generated our result data we need to retrieve it from the device
|
||||
for further processing or adding directly to WA's output for that job. For
|
||||
example for trace data we will want to pull it to the device and add it as a
|
||||
:ref:`artifact <artifact>` to WA's :ref:`context <context>` as shown below::
|
||||
|
||||
def extract_results(self, context):
|
||||
# pull the trace file from the target
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.target.pull(self.result, context.working_directory)
|
||||
context.add_artifact('error_trace', self.result, kind='export')
|
||||
|
||||
Once we have retrieved the data we can now do any further processing and add any
|
||||
relevant :ref:`Metrics <metrics>` to the :ref:`context <context>`. For this we
|
||||
will use the the ``add_metric`` method to add the results to the final output
|
||||
for that workload. The method can be passed 4 params, which are the metric
|
||||
`key`, `value`, `unit` and `lower_is_better`. ::
|
||||
:ref:`artifact <artifact>` to WA's :ref:`context <context>`. Once we have
|
||||
retrieved the data, we can now do any further processing and add any relevant
|
||||
:ref:`Metrics <metrics>` to the :ref:`context <context>`. For this we will use
|
||||
the the ``add_metric`` method to add the results to the final output for that
|
||||
workload. The method can be passed 4 params, which are the metric `key`,
|
||||
`value`, `unit` and `lower_is_better`. ::
|
||||
|
||||
def update_output(self, context):
|
||||
# pull the trace file from the target
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.outfile = os.path.join(context.output_directory, 'trace.txt')
|
||||
self.target.pull(self.result, self.outfile)
|
||||
context.add_artifact('error_trace', self.outfile, kind='export')
|
||||
|
||||
# parse the file if needs to be parsed, or add result directly to
|
||||
# context.
|
||||
|
||||
@ -567,12 +574,14 @@ At the very end of the run we would want to uninstall the binary we deployed ear
|
||||
|
||||
So the full example would look something like::
|
||||
|
||||
from wa import Instrument
|
||||
|
||||
class TraceErrorsInstrument(Instrument):
|
||||
|
||||
name = 'trace-errors'
|
||||
|
||||
def __init__(self, target):
|
||||
super(TraceErrorsInstrument, self).__init__(target)
|
||||
def __init__(self, target, **kwargs):
|
||||
super(TraceErrorsInstrument, self).__init__(target, **kwargs)
|
||||
self.binary_name = 'trace'
|
||||
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
|
||||
self.trace_on_target = None
|
||||
@ -588,12 +597,12 @@ So the full example would look something like::
|
||||
def stop(self, context):
|
||||
self.target.execute('{} stop'.format(self.trace_on_target))
|
||||
|
||||
def extract_results(self, context):
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.target.pull(self.result, context.working_directory)
|
||||
context.add_artifact('error_trace', self.result, kind='export')
|
||||
|
||||
def update_output(self, context):
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.outfile = os.path.join(context.output_directory, 'trace.txt')
|
||||
self.target.pull(self.result, self.outfile)
|
||||
context.add_artifact('error_trace', self.outfile, kind='export')
|
||||
|
||||
metric = # ..
|
||||
context.add_metric('number_of_errors', metric, lower_is_better=True
|
||||
|
||||
@ -609,8 +618,9 @@ Adding an Output Processor
|
||||
==========================
|
||||
|
||||
This is an example of how we would create an output processor which will format
|
||||
the run metrics as a column-aligned table. The first thing to do is to subclass
|
||||
:class:`OutputProcessor` and overwrite the variable name with what we want our
|
||||
the run metrics as a column-aligned table. The first thing to do is to create
|
||||
a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass
|
||||
:class:`OutputProcessor`. Make sure to overwrite the variable name with what we want our
|
||||
processor to be called and provide a short description.
|
||||
|
||||
Next we need to implement any relevant methods, (please see
|
||||
|
@ -114,3 +114,27 @@ parameter and will be picked up be the ``fps`` augmentation.
|
||||
mismatch of your WA and devlib versions. Please update both to their latest
|
||||
versions and delete your ``$USER_HOME/.workload_automation/cache/targets.json``
|
||||
(or equivalent) file.
|
||||
|
||||
**Q:** I get an error which looks similar to ``UnicodeDecodeError('ascii' codec can't decode byte...``
|
||||
------------------------------------------------------------------------------------------------------
|
||||
**A:** If you receive this error or a similar warning about your environment,
|
||||
please ensure that you configure your environment to use a locale which supports
|
||||
UTF-8. Otherwise this can cause issues when attempting to parse files containing
|
||||
none ascii characters.
|
||||
|
||||
**Q:** I get the error ``Module "X" failed to install on target``
|
||||
------------------------------------------------------------------------------------------------------
|
||||
**A:** By default a set of devlib modules will be automatically loaded onto the
|
||||
target designed to add additional functionality. If the functionality provided
|
||||
by the module is not required then the module can be safely disabled by setting
|
||||
``load_default_modules`` to ``False`` in the ``device_config`` entry of the
|
||||
:ref:`agenda <config-agenda-entry>` and then re-enabling any specific modules
|
||||
that are still required. An example agenda snippet is shown below:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
config:
|
||||
device: generic_android
|
||||
device_config:
|
||||
load_default_modules: False
|
||||
modules: ['list', 'of', 'modules', 'to', 'enable']
|
||||
|
@ -13,10 +13,11 @@ these signals are dispatched during execution please see the
|
||||
$signal_names
|
||||
|
||||
The methods above may be decorated with on the listed decorators to set the
|
||||
priority of the Instrument method relative to other callbacks registered for the
|
||||
signal (within the same priority level, callbacks are invoked in the order they
|
||||
were registered). The table below shows the mapping of the decorator to the
|
||||
corresponding priority:
|
||||
priority (a value in the ``wa.framework.signal.CallbackPriority`` enum) of the
|
||||
Instrument method relative to other callbacks registered for the signal (within
|
||||
the same priority level, callbacks are invoked in the order they were
|
||||
registered). The table below shows the mapping of the decorator to the
|
||||
corresponding priority name and level:
|
||||
|
||||
$priority_prefixes
|
||||
|
||||
|
@ -16,7 +16,7 @@ Configuration
|
||||
Default configuration file change
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Instead of the standard ``config.py`` file located at
|
||||
``$WA_USER_HOME/config.py`` WA now uses a ``confg.yaml`` file (at the same
|
||||
``$WA_USER_DIRECTORY/config.py`` WA now uses a ``confg.yaml`` file (at the same
|
||||
location) which is written in the YAML format instead of python. Additionally
|
||||
upon first invocation WA3 will automatically try and detect whether a WA2 config
|
||||
file is present and convert it to use the new WA3 format. During this process
|
||||
|
@ -690,7 +690,7 @@ Workload-specific augmentation
|
||||
It is possible to enable or disable (but not configure) augmentations at
|
||||
workload or section level, as well as in the global config, in which case, the
|
||||
augmentations would only be enabled/disabled for that workload/section. If the
|
||||
same augmentation is enabled at one level and disabled at another, as will all
|
||||
same augmentation is enabled at one level and disabled at another, as with all
|
||||
WA configuration, the more specific settings will take precedence over the less
|
||||
specific ones (i.e. workloads override sections that, in turn, override global
|
||||
config).
|
||||
|
@ -17,6 +17,8 @@ further configuration will be required.
|
||||
Android
|
||||
-------
|
||||
|
||||
.. _android-general-device-setup:
|
||||
|
||||
General Device Setup
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -44,12 +46,15 @@ common parameters you might want to change are outlined below.
|
||||
Android builds. If this is not the case for your device, you will need to
|
||||
specify an alternative working directory (e.g. under ``/data/local``).
|
||||
|
||||
:load_default_modules: A number of "default" modules (e.g. for cpufreq
|
||||
subsystem) are loaded automatically, unless explicitly disabled. If you
|
||||
encounter an issue with one of the modules then this setting can be set to
|
||||
``False`` and any specific modules that you require can be request via the
|
||||
``modules`` entry.
|
||||
|
||||
:modules: A list of additional modules to be installed for the target. Devlib
|
||||
implements functionality for particular subsystems as modules. A number of
|
||||
"default" modules (e.g. for cpufreq subsystem) are loaded automatically,
|
||||
unless explicitly disabled. If additional modules need to be loaded, they
|
||||
may be specified using this parameter.
|
||||
implements functionality for particular subsystems as modules. If additional
|
||||
modules need to be loaded, they may be specified using this parameter.
|
||||
|
||||
Please see the `devlib documentation <http://devlib.readthedocs.io/en/latest/modules.html>`_
|
||||
for information on the available modules.
|
||||
@ -83,6 +88,7 @@ or a more specific config could be:
|
||||
device_config:
|
||||
device: 0123456789ABCDEF
|
||||
working_direcory: '/sdcard/wa-working'
|
||||
load_default_modules: True
|
||||
modules: ['hotplug', 'cpufreq']
|
||||
core_names : ['a7', 'a7', 'a7', 'a15', 'a15']
|
||||
# ...
|
||||
|
@ -14,9 +14,9 @@ Using revent with workloads
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Some workloads (pretty much all games) rely on recorded revents for their
|
||||
execution. ReventWorkloads will require between 1 and 4 revent files be be ran.
|
||||
There is one mandatory recording ``run`` for performing the actual execution of
|
||||
the workload and the remaining are optional. ``setup`` can be used to perform
|
||||
execution. ReventWorkloads require between 1 and 4 revent files to be ran.
|
||||
There is one mandatory recording, ``run``, for performing the actual execution of
|
||||
the workload and the remaining stages are optional. ``setup`` can be used to perform
|
||||
the initial setup (navigating menus, selecting game modes, etc).
|
||||
``extract_results`` can be used to perform any actions after the main stage of
|
||||
the workload for example to navigate a results or summary screen of the app. And
|
||||
@ -26,17 +26,21 @@ exiting the app.
|
||||
Because revents are very device-specific\ [*]_, these files would need to
|
||||
be recorded for each device.
|
||||
|
||||
The files must be called ``<device name>.(setup|run|extract_results|teardown).revent``
|
||||
, where ``<device name>`` is the name of your device (as defined by the ``name``
|
||||
attribute of your device's class). WA will look for these files in two
|
||||
places: ``<install dir>/wa/workloads/<workload name>/revent_files``
|
||||
and ``~/.workload_automation/dependencies/<workload name>``. The first
|
||||
location is primarily intended for revent files that come with WA (and if
|
||||
The files must be called ``<device name>.(setup|run|extract_results|teardown).revent``,
|
||||
where ``<device name>`` is the name of your device (as defined by the model
|
||||
name of your device which can be retrieved with
|
||||
``adb shell getprop ro.product.model`` or by the ``name`` attribute of your
|
||||
customized device class).
|
||||
|
||||
WA will look for these files in two places:
|
||||
``<installdir>/wa/workloads/<workload name>/revent_files`` and
|
||||
``$WA_USER_DIRECTORY/dependencies/<workload name>``. The
|
||||
first location is primarily intended for revent files that come with WA (and if
|
||||
you did a system-wide install, you'll need sudo to add files there), so it's
|
||||
probably easier to use the second location for the files you record. Also,
|
||||
if revent files for a workload exist in both locations, the files under
|
||||
``~/.workload_automation/dependencies`` will be used in favour of those
|
||||
installed with WA.
|
||||
probably easier to use the second location for the files you record. Also, if
|
||||
revent files for a workload exist in both locations, the files under
|
||||
``$WA_USER_DIRECTORY/dependencies`` will be used in favour
|
||||
of those installed with WA.
|
||||
|
||||
.. [*] It's not just about screen resolution -- the event codes may be different
|
||||
even if devices use the same screen.
|
||||
|
@ -12,8 +12,9 @@ Installation
|
||||
.. module:: wa
|
||||
|
||||
This page describes the 3 methods of installing Workload Automation 3. The first
|
||||
option is to use :ref:`pip` which
|
||||
will install the latest release of WA, the latest development version from :ref:`github <github>` or via a :ref:`dockerfile`.
|
||||
option is to use :ref:`pip` which will install the latest release of WA, the
|
||||
latest development version from :ref:`github <github>` or via a
|
||||
:ref:`dockerfile`.
|
||||
|
||||
|
||||
Prerequisites
|
||||
@ -22,11 +23,11 @@ Prerequisites
|
||||
Operating System
|
||||
----------------
|
||||
|
||||
WA runs on a native Linux install. It was tested with Ubuntu 14.04,
|
||||
but any recent Linux distribution should work. It should run on either
|
||||
32-bit or 64-bit OS, provided the correct version of Android (see below)
|
||||
was installed. Officially, **other environments are not supported**. WA
|
||||
has been known to run on Linux Virtual machines and in Cygwin environments,
|
||||
WA runs on a native Linux install. It has been tested on recent Ubuntu releases,
|
||||
but other recent Linux distributions should work as well. It should run on
|
||||
either 32-bit or 64-bit OS, provided the correct version of dependencies (see
|
||||
below) are installed. Officially, **other environments are not supported**.
|
||||
WA has been known to run on Linux Virtual machines and in Cygwin environments,
|
||||
though additional configuration may be required in both cases (known issues
|
||||
include makings sure USB/serial connections are passed to the VM, and wrong
|
||||
python/pip binaries being picked up in Cygwin). WA *should* work on other
|
||||
@ -45,7 +46,8 @@ possible to get limited functionality with minimal porting effort).
|
||||
Android SDK
|
||||
-----------
|
||||
|
||||
You need to have the Android SDK with at least one platform installed.
|
||||
To interact with Android devices you will need to have the Android SDK
|
||||
with at least one platform installed.
|
||||
To install it, download the ADT Bundle from here_. Extract it
|
||||
and add ``<path_to_android_sdk>/sdk/platform-tools`` and ``<path_to_android_sdk>/sdk/tools``
|
||||
to your ``PATH``. To test that you've installed it properly, run ``adb
|
||||
@ -72,7 +74,11 @@ the install location of the SDK (i.e. ``<path_to_android_sdk>/sdk``).
|
||||
Python
|
||||
------
|
||||
|
||||
Workload Automation 3 currently supports both Python 2.7 and Python 3.
|
||||
Workload Automation 3 currently supports Python 3.5+
|
||||
|
||||
.. note:: If your system's default python version is still Python 2, please
|
||||
replace the commands listed here with their Python3 equivalent
|
||||
(e.g. python3, pip3 etc.)
|
||||
|
||||
.. _pip:
|
||||
|
||||
@ -94,11 +100,11 @@ similar distributions, this may be done with APT::
|
||||
sudo -H pip install --upgrade pip
|
||||
sudo -H pip install --upgrade setuptools
|
||||
|
||||
If you do run into this issue after already installing some packages,
|
||||
If you do run into this issue after already installing some packages,
|
||||
you can resolve it by running ::
|
||||
|
||||
sudo chmod -R a+r /usr/local/lib/python2.7/dist-packagessudo
|
||||
find /usr/local/lib/python2.7/dist-packages -type d -exec chmod a+x {} \;
|
||||
sudo chmod -R a+r /usr/local/lib/python3.X/dist-packages
|
||||
sudo find /usr/local/lib/python3.X/dist-packages -type d -exec chmod a+x {} \;
|
||||
|
||||
(The paths above will work for Ubuntu; they may need to be adjusted
|
||||
for other distros).
|
||||
@ -171,9 +177,11 @@ install them upfront (e.g. if you're planning to use WA to an environment that
|
||||
may not always have Internet access).
|
||||
|
||||
* nose
|
||||
* PyDAQmx
|
||||
* pymongo
|
||||
* jinja2
|
||||
* mock
|
||||
* daqpower
|
||||
* sphinx
|
||||
* sphinx_rtd_theme
|
||||
* psycopg2-binary
|
||||
|
||||
|
||||
|
||||
@ -184,12 +192,12 @@ Installing
|
||||
|
||||
Installing the latest released version from PyPI (Python Package Index)::
|
||||
|
||||
sudo -H pip install wa
|
||||
sudo -H pip install wlauto
|
||||
|
||||
This will install WA along with its mandatory dependencies. If you would like to
|
||||
install all optional dependencies at the same time, do the following instead::
|
||||
|
||||
sudo -H pip install wa[all]
|
||||
sudo -H pip install wlauto[all]
|
||||
|
||||
|
||||
Alternatively, you can also install the latest development version from GitHub
|
||||
@ -205,6 +213,13 @@ Alternatively, you can also install the latest development version from GitHub
|
||||
install the latest version of
|
||||
`devlib <https://github.com/ARM-software/devlib>`_.
|
||||
|
||||
.. note:: Please note that while a `requirements.txt` is included, this is
|
||||
designed to be a reference of known working packages rather to than to
|
||||
be used as part of a standard installation. The version restrictions
|
||||
in place as part of `setup.py` should automatically ensure the correct
|
||||
packages are install however if encountering issues please try
|
||||
updating/downgrading to the package versions list within.
|
||||
|
||||
|
||||
If the above succeeds, try ::
|
||||
|
||||
|
@ -20,7 +20,7 @@ Install
|
||||
.. note:: This is a quick summary. For more detailed instructions, please see
|
||||
the :ref:`installation` section.
|
||||
|
||||
Make sure you have Python 2.7 or Python 3 and a recent Android SDK with API
|
||||
Make sure you have Python 3.5+ and a recent Android SDK with API
|
||||
level 18 or above installed on your system. A complete install of the Android
|
||||
SDK is required, as WA uses a number of its utilities, not just adb. For the
|
||||
SDK, make sure that either ``ANDROID_HOME`` environment variable is set, or that
|
||||
@ -125,7 +125,7 @@ There are multiple options for configuring your device depending on your
|
||||
particular use case.
|
||||
|
||||
You can either add your configuration to the default configuration file
|
||||
``config.yaml``, under the ``$WA_USER_HOME/`` directory or you can specify it in
|
||||
``config.yaml``, under the ``$WA_USER_DIRECTORY/`` directory or you can specify it in
|
||||
the ``config`` section of your agenda directly.
|
||||
|
||||
Alternatively if you are using multiple devices, you may want to create separate
|
||||
@ -318,7 +318,7 @@ like this:
|
||||
config:
|
||||
augmentations:
|
||||
- ~execution_time
|
||||
- json
|
||||
- targz
|
||||
iterations: 2
|
||||
workloads:
|
||||
- memcpy
|
||||
@ -332,7 +332,7 @@ This agenda:
|
||||
- Specifies two workloads: memcpy and dhrystone.
|
||||
- Specifies that dhrystone should run in one thread and execute five million loops.
|
||||
- Specifies that each of the two workloads should be run twice.
|
||||
- Enables json output processor, in addition to the output processors enabled in
|
||||
- Enables the targz output processor, in addition to the output processors enabled in
|
||||
the config.yaml.
|
||||
- Disables execution_time instrument, if it is enabled in the config.yaml
|
||||
|
||||
@ -352,13 +352,13 @@ in-depth information please see the :ref:`Create Command <create-command>` docum
|
||||
|
||||
In order to populate the agenda with relevant information you can supply all of
|
||||
the plugins you wish to use as arguments to the command, for example if we want
|
||||
to create an agenda file for running ``dhystrone`` on a 'generic android' device and we
|
||||
to create an agenda file for running ``dhrystone`` on a `generic_android` device and we
|
||||
want to enable the ``execution_time`` and ``trace-cmd`` instruments and display the
|
||||
metrics using the ``csv`` output processor. We would use the following command::
|
||||
|
||||
wa create agenda generic_android dhrystone execution_time trace-cmd csv -o my_agenda.yaml
|
||||
|
||||
This will produce a `my_agenda.yaml` file containing all the relevant
|
||||
This will produce a ``my_agenda.yaml`` file containing all the relevant
|
||||
configuration for the specified plugins along with their default values as shown
|
||||
below:
|
||||
|
||||
@ -373,6 +373,7 @@ below:
|
||||
device: generic_android
|
||||
device_config:
|
||||
adb_server: null
|
||||
adb_port: null
|
||||
big_core: null
|
||||
core_clusters: null
|
||||
core_names: null
|
||||
@ -399,6 +400,7 @@ below:
|
||||
no_install: false
|
||||
report: true
|
||||
report_on_target: false
|
||||
mode: write-to-memory
|
||||
csv:
|
||||
extra_columns: null
|
||||
use_all_classifiers: false
|
||||
@ -483,14 +485,14 @@ that parses the contents of the output directory:
|
||||
>>> ro = RunOutput('./wa_output')
|
||||
>>> for job in ro.jobs:
|
||||
... if job.status != 'OK':
|
||||
... print 'Job "{}" did not complete successfully: {}'.format(job, job.status)
|
||||
... print('Job "{}" did not complete successfully: {}'.format(job, job.status))
|
||||
... continue
|
||||
... print 'Job "{}":'.format(job)
|
||||
... print('Job "{}":'.format(job))
|
||||
... for metric in job.metrics:
|
||||
... if metric.units:
|
||||
... print '\t{}: {} {}'.format(metric.name, metric.value, metric.units)
|
||||
... print('\t{}: {} {}'.format(metric.name, metric.value, metric.units))
|
||||
... else:
|
||||
... print '\t{}: {}'.format(metric.name, metric.value)
|
||||
... print('\t{}: {}'.format(metric.name, metric.value))
|
||||
...
|
||||
Job "wk1-dhrystone-1":
|
||||
thread 0 score: 20833333
|
||||
|
@ -30,7 +30,7 @@ An example agenda can be seen here:
|
||||
|
||||
device: generic_android
|
||||
device_config:
|
||||
device: R32C801B8XY # Th adb name of our device we want to run on
|
||||
device: R32C801B8XY # The adb name of our device we want to run on
|
||||
disable_selinux: true
|
||||
load_default_modules: true
|
||||
package_data_directory: /data/data
|
||||
@ -45,6 +45,7 @@ An example agenda can be seen here:
|
||||
no_install: false
|
||||
report: true
|
||||
report_on_target: false
|
||||
mode: write-to-disk
|
||||
csv: # Provide config for the csv augmentation
|
||||
use_all_classifiers: true
|
||||
|
||||
@ -116,7 +117,9 @@ whole will behave. The most common options that that you may want to specify are
|
||||
to connect to (e.g. ``host`` for an SSH connection or
|
||||
``device`` to specific an ADB name) as well as configure other
|
||||
options for the device for example the ``working_directory``
|
||||
or the list of ``modules`` to be loaded onto the device.
|
||||
or the list of ``modules`` to be loaded onto the device. (For
|
||||
more information please see
|
||||
:ref:`here <android-general-device-setup>`)
|
||||
:execution_order: Defines the order in which the agenda spec will be executed.
|
||||
:reboot_policy: Defines when during execution of a run a Device will be rebooted.
|
||||
:max_retries: The maximum number of times failed jobs will be retried before giving up.
|
||||
@ -124,7 +127,7 @@ whole will behave. The most common options that that you may want to specify are
|
||||
|
||||
For more information and a full list of these configuration options please see
|
||||
:ref:`Run Configuration <run-configuration>` and
|
||||
:ref:`"Meta Configuration" <meta-configuration>`.
|
||||
:ref:`Meta Configuration <meta-configuration>`.
|
||||
|
||||
|
||||
Plugins
|
||||
|
@ -40,7 +40,7 @@ Will display help for this subcommand that will look something like this:
|
||||
AGENDA Agenda for this workload automation run. This defines
|
||||
which workloads will be executed, how many times, with
|
||||
which tunables, etc. See example agendas in
|
||||
/usr/local/lib/python2.7/dist-packages/wa for an
|
||||
/usr/local/lib/python3.X/dist-packages/wa for an
|
||||
example of how this file should be structured.
|
||||
|
||||
optional arguments:
|
||||
|
@ -33,6 +33,7 @@ states.
|
||||
iterations: 1
|
||||
runtime_parameters:
|
||||
screen_on: false
|
||||
unlock_screen: 'vertical'
|
||||
- name: benchmarkpi
|
||||
iterations: 1
|
||||
sections:
|
||||
@ -208,6 +209,13 @@ Android Specific Runtime Parameters
|
||||
:screen_on: A ``boolean`` to specify whether the devices screen should be
|
||||
turned on. Defaults to ``True``.
|
||||
|
||||
:unlock_screen: A ``String`` to specify how the devices screen should be
|
||||
unlocked. Unlocking screen is disabled by default. ``vertical``, ``diagonal``
|
||||
and ``horizontal`` are the supported values (see :meth:`devlib.AndroidTarget.swipe_to_unlock`).
|
||||
Note that unlocking succeeds when no passcode is set. Since unlocking screen
|
||||
requires turning on the screen, this option overrides value of ``screen_on``
|
||||
option.
|
||||
|
||||
.. _setting-sysfiles:
|
||||
|
||||
Setting Sysfiles
|
||||
|
@ -6,7 +6,7 @@
|
||||
#
|
||||
# docker build -t wa .
|
||||
#
|
||||
# This will create an image called wadocker, which is preconfigured to
|
||||
# This will create an image called wa, which is preconfigured to
|
||||
# run WA and devlib. Please note that the build process automatically
|
||||
# accepts the licenses for the Android SDK, so please be sure that you
|
||||
# are willing to accept these prior to building and running the image
|
||||
@ -17,6 +17,13 @@
|
||||
#
|
||||
# docker run -it --privileged -v /dev/bus/usb:/dev/bus/usb --volume ${PWD}:/workspace --workdir /workspace wa
|
||||
#
|
||||
# If using selinux you may need to add the `z` option when mounting
|
||||
# volumes e.g.:
|
||||
# --volume ${PWD}:/workspace:z
|
||||
# Warning: Please ensure you do not use this option when mounting
|
||||
# system directores. For more information please see:
|
||||
# https://docs.docker.com/storage/bind-mounts/#configure-the-selinux-label
|
||||
#
|
||||
# The above command starts the container in privileged mode, with
|
||||
# access to USB devices. The current directory is mounted into the
|
||||
# image, allowing you to work from there. Any files written to this
|
||||
@ -32,27 +39,80 @@
|
||||
#
|
||||
# When you are finished, please run `exit` to leave the container.
|
||||
#
|
||||
# The relevant environment variables are stored in a separate
|
||||
# file which is automatically sourced in an interactive shell.
|
||||
# If running from a non-interactive environment this can
|
||||
# be manually sourced with `source /home/wa/.wa_environment`
|
||||
#
|
||||
# NOTE: Please make sure that the ADB server is NOT running on the
|
||||
# host. If in doubt, run `adb kill-server` before running the docker
|
||||
# container.
|
||||
#
|
||||
|
||||
# We want to make sure to base this on a recent ubuntu release
|
||||
FROM ubuntu:17.10
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# Please update the references below to use different versions of
|
||||
# devlib, WA or the Android SDK
|
||||
ARG DEVLIB_REF=v1.1.1
|
||||
ARG WA_REF=v3.1.3
|
||||
ARG DEVLIB_REF=v1.3.4
|
||||
ARG WA_REF=v3.3.1
|
||||
ARG ANDROID_SDK_URL=https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y python-pip git wget zip openjdk-8-jre-headless vim emacs nano curl sshpass ssh usbutils
|
||||
RUN pip install pandas
|
||||
# Set a default timezone to use
|
||||
ENV TZ=Europe/London
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apache2-utils \
|
||||
bison \
|
||||
cmake \
|
||||
curl \
|
||||
emacs \
|
||||
flex \
|
||||
git \
|
||||
libcdk5-dev \
|
||||
libiio-dev \
|
||||
libxml2 \
|
||||
libxml2-dev \
|
||||
locales \
|
||||
nano \
|
||||
openjdk-8-jre-headless \
|
||||
python3 \
|
||||
python3-pip \
|
||||
ssh \
|
||||
sshpass \
|
||||
sudo \
|
||||
trace-cmd \
|
||||
usbutils \
|
||||
vim \
|
||||
wget \
|
||||
zip
|
||||
|
||||
# Clone and download iio-capture
|
||||
RUN git clone -v https://github.com/BayLibre/iio-capture.git /tmp/iio-capture && \
|
||||
cd /tmp/iio-capture && \
|
||||
make && \
|
||||
make install
|
||||
|
||||
RUN pip3 install pandas
|
||||
|
||||
# Ensure we're using utf-8 as our default encoding
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
|
||||
# Let's get the two repos we need, and install them
|
||||
RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && cd /tmp/devlib && git checkout $DEVLIB_REF && python setup.py install
|
||||
RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && cd /tmp/wa && git checkout $WA_REF && python setup.py install
|
||||
RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && \
|
||||
cd /tmp/devlib && \
|
||||
git checkout $DEVLIB_REF && \
|
||||
python3 setup.py install && \
|
||||
pip3 install .[full]
|
||||
RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && \
|
||||
cd /tmp/wa && \
|
||||
git checkout $WA_REF && \
|
||||
python3 setup.py install && \
|
||||
pip3 install .[all]
|
||||
|
||||
# Clean-up
|
||||
RUN rm -R /tmp/devlib /tmp/wa
|
||||
@ -66,10 +126,19 @@ RUN mkdir -p /home/wa/.android
|
||||
RUN mkdir -p /home/wa/AndroidSDK && cd /home/wa/AndroidSDK && wget $ANDROID_SDK_URL -O sdk.zip && unzip sdk.zip
|
||||
RUN cd /home/wa/AndroidSDK/tools/bin && yes | ./sdkmanager --licenses && ./sdkmanager platform-tools && ./sdkmanager 'build-tools;27.0.3'
|
||||
|
||||
# Update the path
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/platform-tools:${PATH}' >> /home/wa/.bashrc
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/build-tools:${PATH}' >> /home/wa/.bashrc
|
||||
RUN echo 'export ANDROID_HOME=/home/wa/AndroidSDK' >> /home/wa/.bashrc
|
||||
# Download Monsoon
|
||||
RUN mkdir -p /home/wa/monsoon
|
||||
RUN curl https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py\?format\=TEXT | base64 --decode > /home/wa/monsoon/monsoon.py
|
||||
RUN chmod +x /home/wa/monsoon/monsoon.py
|
||||
|
||||
# Update WA's required environment variables.
|
||||
RUN echo 'export PATH=/home/wa/monsoon:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/platform-tools:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/build-tools:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export ANDROID_HOME=/home/wa/AndroidSDK' >> /home/wa/.wa_environment
|
||||
|
||||
# Source WA environment variables in an interactive environment
|
||||
RUN echo 'source /home/wa/.wa_environment' >> /home/wa/.bashrc
|
||||
|
||||
# Generate some ADB keys. These will change each time the image is build but will otherwise persist.
|
||||
RUN /home/wa/AndroidSDK/platform-tools/adb keygen /home/wa/.android/adbkey
|
||||
|
@ -43,7 +43,7 @@ ignore=external
|
||||
# https://bitbucket.org/logilab/pylint/issue/232/wrong-hanging-indentation-false-positive
|
||||
# TODO: disabling no-value-for-parameter and logging-format-interpolation, as they appear to be broken
|
||||
# in version 1.4.1 and return a lot of false postives; should be re-enabled once fixed.
|
||||
disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330,no-value-for-parameter,logging-format-interpolation,no-else-return,inconsistent-return-statements,keyword-arg-before-vararg,consider-using-enumerate,no-member
|
||||
disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330,no-value-for-parameter,logging-format-interpolation,no-else-return,inconsistent-return-statements,keyword-arg-before-vararg,consider-using-enumerate,no-member,super-with-arguments,useless-object-inheritance,raise-missing-from,no-else-raise,no-else-break,no-else-continue
|
||||
|
||||
[FORMAT]
|
||||
max-module-lines=4000
|
||||
|
3
pytest.ini
Normal file
3
pytest.ini
Normal file
@ -0,0 +1,3 @@
|
||||
[pytest]
|
||||
filterwarnings=
|
||||
ignore::DeprecationWarning:past[.*]
|
30
requirements.txt
Normal file
30
requirements.txt
Normal file
@ -0,0 +1,30 @@
|
||||
bcrypt==4.0.1
|
||||
certifi==2024.7.4
|
||||
cffi==1.15.1
|
||||
charset-normalizer==3.1.0
|
||||
colorama==0.4.6
|
||||
cryptography==43.0.1
|
||||
devlib==1.3.4
|
||||
future==0.18.3
|
||||
idna==3.7
|
||||
Louie-latest==1.3.1
|
||||
lxml==4.9.2
|
||||
nose==1.3.7
|
||||
numpy==1.24.3
|
||||
pandas==2.0.1
|
||||
paramiko==3.4.0
|
||||
pexpect==4.8.0
|
||||
ptyprocess==0.7.0
|
||||
pycparser==2.21
|
||||
PyNaCl==1.5.0
|
||||
pyserial==3.5
|
||||
python-dateutil==2.8.2
|
||||
pytz==2023.3
|
||||
PyYAML==6.0
|
||||
requests==2.32.0
|
||||
scp==0.14.5
|
||||
six==1.16.0
|
||||
tzdata==2023.3
|
||||
urllib3==1.26.19
|
||||
wlauto==3.3.1
|
||||
wrapt==1.15.0
|
26
setup.py
26
setup.py
@ -62,9 +62,14 @@ for root, dirs, files in os.walk(wa_dir):
|
||||
|
||||
scripts = [os.path.join('scripts', s) for s in os.listdir('scripts')]
|
||||
|
||||
with open("README.rst", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
devlib_version = format_version(required_devlib_version)
|
||||
params = dict(
|
||||
name='wlauto',
|
||||
description='A framework for automating workload execution and measurement collection on ARM devices.',
|
||||
long_description=long_description,
|
||||
version=get_wa_version_with_commit(),
|
||||
packages=packages,
|
||||
package_data=data_files,
|
||||
@ -74,8 +79,10 @@ params = dict(
|
||||
license='Apache v2',
|
||||
maintainer='ARM Architecture & Technology Device Lab',
|
||||
maintainer_email='workload-automation@arm.com',
|
||||
python_requires='>= 3.7',
|
||||
setup_requires=[
|
||||
'numpy'
|
||||
'numpy<=1.16.4; python_version<"3"',
|
||||
'numpy; python_version>="3"',
|
||||
],
|
||||
install_requires=[
|
||||
'python-dateutil', # converting between UTC and local time.
|
||||
@ -84,32 +91,33 @@ params = dict(
|
||||
'colorama', # Printing with colors
|
||||
'pyYAML>=5.1b3', # YAML-formatted agenda parsing
|
||||
'requests', # Fetch assets over HTTP
|
||||
'devlib>={}'.format(format_version(required_devlib_version)), # Interacting with devices
|
||||
'devlib>={}'.format(devlib_version), # Interacting with devices
|
||||
'louie-latest', # callbacks dispatch
|
||||
'wrapt', # better decorators
|
||||
'pandas>=0.23.0', # Data analysis and manipulation
|
||||
'pandas>=0.23.0,<=0.24.2; python_version<"3.5.3"', # Data analysis and manipulation
|
||||
'pandas>=0.23.0; python_version>="3.5.3"', # Data analysis and manipulation
|
||||
'future', # Python 2-3 compatiblity
|
||||
],
|
||||
dependency_links=['https://github.com/ARM-software/devlib/tarball/master#egg=devlib-{}'.format(devlib_version)],
|
||||
extras_require={
|
||||
'other': ['jinja2'],
|
||||
'test': ['nose', 'mock'],
|
||||
'mongodb': ['pymongo'],
|
||||
'notify': ['notify2'],
|
||||
'doc': ['sphinx'],
|
||||
'doc': ['sphinx', 'sphinx_rtd_theme'],
|
||||
'postgres': ['psycopg2-binary'],
|
||||
'daq': ['daqpower'],
|
||||
},
|
||||
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Environment :: Console',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
],
|
||||
)
|
||||
|
||||
all_extras = list(chain(iter(params['extras_require'].values())))
|
||||
params['extras_require']['everything'] = all_extras
|
||||
params['extras_require']['all'] = all_extras
|
||||
|
||||
|
||||
class sdist(orig_sdist):
|
||||
|
@ -17,7 +17,7 @@
|
||||
from wa import Plugin
|
||||
|
||||
|
||||
class TestDevice(Plugin):
|
||||
class MockDevice(Plugin):
|
||||
|
||||
name = 'test-device'
|
||||
kind = 'device'
|
||||
|
@ -18,7 +18,6 @@
|
||||
# pylint: disable=R0201
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
from collections import defaultdict
|
||||
from unittest import TestCase
|
||||
|
||||
@ -31,6 +30,7 @@ os.environ['WA_USER_DIRECTORY'] = os.path.join(DATA_DIR, 'includes')
|
||||
from wa.framework.configuration.execution import ConfigManager
|
||||
from wa.framework.configuration.parsers import AgendaParser
|
||||
from wa.framework.exception import ConfigError
|
||||
from wa.utils.serializer import yaml
|
||||
from wa.utils.types import reset_all_counters
|
||||
|
||||
|
||||
@ -44,8 +44,6 @@ workloads:
|
||||
workload_parameters:
|
||||
test: 1
|
||||
"""
|
||||
invalid_agenda = yaml.load(invalid_agenda_text)
|
||||
invalid_agenda.name = 'invalid1'
|
||||
|
||||
duplicate_agenda_text = """
|
||||
global:
|
||||
@ -58,14 +56,10 @@ workloads:
|
||||
- id: "1"
|
||||
workload_name: benchmarkpi
|
||||
"""
|
||||
duplicate_agenda = yaml.load(duplicate_agenda_text)
|
||||
duplicate_agenda.name = 'invalid2'
|
||||
|
||||
short_agenda_text = """
|
||||
workloads: [antutu, dhrystone, benchmarkpi]
|
||||
"""
|
||||
short_agenda = yaml.load(short_agenda_text)
|
||||
short_agenda.name = 'short'
|
||||
|
||||
default_ids_agenda_text = """
|
||||
workloads:
|
||||
@ -78,8 +72,6 @@ workloads:
|
||||
cpus: 1
|
||||
- vellamo
|
||||
"""
|
||||
default_ids_agenda = yaml.load(default_ids_agenda_text)
|
||||
default_ids_agenda.name = 'default_ids'
|
||||
|
||||
sectioned_agenda_text = """
|
||||
sections:
|
||||
@ -102,8 +94,6 @@ sections:
|
||||
workloads:
|
||||
- memcpy
|
||||
"""
|
||||
sectioned_agenda = yaml.load(sectioned_agenda_text)
|
||||
sectioned_agenda.name = 'sectioned'
|
||||
|
||||
dup_sectioned_agenda_text = """
|
||||
sections:
|
||||
@ -116,8 +106,22 @@ sections:
|
||||
workloads:
|
||||
- memcpy
|
||||
"""
|
||||
dup_sectioned_agenda = yaml.load(dup_sectioned_agenda_text)
|
||||
dup_sectioned_agenda.name = 'dup-sectioned'
|
||||
|
||||
yaml_anchors_agenda_text = """
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
params: &dhrystone_single_params
|
||||
cleanup_assets: true
|
||||
cpus: 0
|
||||
delay: 3
|
||||
duration: 0
|
||||
mloops: 10
|
||||
threads: 1
|
||||
- name: dhrystone
|
||||
params:
|
||||
<<: *dhrystone_single_params
|
||||
threads: 4
|
||||
"""
|
||||
|
||||
|
||||
class AgendaTest(TestCase):
|
||||
@ -132,6 +136,8 @@ class AgendaTest(TestCase):
|
||||
assert_equal(len(self.config.jobs_config.root_node.workload_entries), 4)
|
||||
|
||||
def test_duplicate_id(self):
|
||||
duplicate_agenda = yaml.load(duplicate_agenda_text)
|
||||
|
||||
try:
|
||||
self.parser.load(self.config, duplicate_agenda, 'test')
|
||||
except ConfigError as e:
|
||||
@ -140,6 +146,8 @@ class AgendaTest(TestCase):
|
||||
raise Exception('ConfigError was not raised for an agenda with duplicate ids.')
|
||||
|
||||
def test_yaml_missing_field(self):
|
||||
invalid_agenda = yaml.load(invalid_agenda_text)
|
||||
|
||||
try:
|
||||
self.parser.load(self.config, invalid_agenda, 'test')
|
||||
except ConfigError as e:
|
||||
@ -148,20 +156,26 @@ class AgendaTest(TestCase):
|
||||
raise Exception('ConfigError was not raised for an invalid agenda.')
|
||||
|
||||
def test_defaults(self):
|
||||
short_agenda = yaml.load(short_agenda_text)
|
||||
self.parser.load(self.config, short_agenda, 'test')
|
||||
|
||||
workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
assert_equal(len(workload_entries), 3)
|
||||
assert_equal(workload_entries[0].config['workload_name'], 'antutu')
|
||||
assert_equal(workload_entries[0].id, 'wk1')
|
||||
|
||||
def test_default_id_assignment(self):
|
||||
default_ids_agenda = yaml.load(default_ids_agenda_text)
|
||||
|
||||
self.parser.load(self.config, default_ids_agenda, 'test2')
|
||||
workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
assert_equal(workload_entries[0].id, 'wk2')
|
||||
assert_equal(workload_entries[3].id, 'wk3')
|
||||
|
||||
def test_sections(self):
|
||||
sectioned_agenda = yaml.load(sectioned_agenda_text)
|
||||
self.parser.load(self.config, sectioned_agenda, 'test')
|
||||
|
||||
root_node_workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
leaves = list(self.config.jobs_config.root_node.leaves())
|
||||
section1_workload_entries = leaves[0].workload_entries
|
||||
@ -171,8 +185,22 @@ class AgendaTest(TestCase):
|
||||
assert_true(section1_workload_entries[0].config['workload_parameters']['markers_enabled'])
|
||||
assert_equal(section2_workload_entries[0].config['workload_name'], 'antutu')
|
||||
|
||||
def test_yaml_anchors(self):
|
||||
yaml_anchors_agenda = yaml.load(yaml_anchors_agenda_text)
|
||||
self.parser.load(self.config, yaml_anchors_agenda, 'test')
|
||||
|
||||
workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
assert_equal(len(workload_entries), 2)
|
||||
assert_equal(workload_entries[0].config['workload_name'], 'dhrystone')
|
||||
assert_equal(workload_entries[0].config['workload_parameters']['threads'], 1)
|
||||
assert_equal(workload_entries[0].config['workload_parameters']['delay'], 3)
|
||||
assert_equal(workload_entries[1].config['workload_name'], 'dhrystone')
|
||||
assert_equal(workload_entries[1].config['workload_parameters']['threads'], 4)
|
||||
assert_equal(workload_entries[1].config['workload_parameters']['delay'], 3)
|
||||
|
||||
@raises(ConfigError)
|
||||
def test_dup_sections(self):
|
||||
dup_sectioned_agenda = yaml.load(dup_sectioned_agenda_text)
|
||||
self.parser.load(self.config, dup_sectioned_agenda, 'test')
|
||||
|
||||
@raises(ConfigError)
|
||||
|
@ -16,6 +16,7 @@
|
||||
import unittest
|
||||
from nose.tools import assert_equal
|
||||
|
||||
from wa.framework.configuration.execution import ConfigManager
|
||||
from wa.utils.misc import merge_config_values
|
||||
|
||||
|
||||
@ -38,3 +39,21 @@ class TestConfigUtils(unittest.TestCase):
|
||||
if v2 is not None:
|
||||
assert_equal(type(result), type(v2))
|
||||
|
||||
|
||||
|
||||
class TestConfigParser(unittest.TestCase):
|
||||
|
||||
def test_param_merge(self):
|
||||
config = ConfigManager()
|
||||
|
||||
config.load_config({'workload_params': {'one': 1, 'three': {'ex': 'x'}}, 'runtime_params': {'aye': 'a'}}, 'file_one')
|
||||
config.load_config({'workload_params': {'two': 2, 'three': {'why': 'y'}}, 'runtime_params': {'bee': 'b'}}, 'file_two')
|
||||
|
||||
assert_equal(
|
||||
config.jobs_config.job_spec_template['workload_parameters'],
|
||||
{'one': 1, 'two': 2, 'three': {'why': 'y'}},
|
||||
)
|
||||
assert_equal(
|
||||
config.jobs_config.job_spec_template['runtime_parameters'],
|
||||
{'aye': 'a', 'bee': 'b'},
|
||||
)
|
||||
|
@ -21,9 +21,10 @@ from nose.tools import assert_equal, assert_raises
|
||||
|
||||
from wa.utils.exec_control import (init_environment, reset_environment,
|
||||
activate_environment, once,
|
||||
once_per_class, once_per_instance)
|
||||
once_per_class, once_per_instance,
|
||||
once_per_attribute_value)
|
||||
|
||||
class TestClass(object):
|
||||
class MockClass(object):
|
||||
|
||||
called = 0
|
||||
|
||||
@ -32,7 +33,7 @@ class TestClass(object):
|
||||
|
||||
@once
|
||||
def called_once(self):
|
||||
TestClass.called += 1
|
||||
MockClass.called += 1
|
||||
|
||||
@once
|
||||
def initilize_once(self):
|
||||
@ -50,7 +51,7 @@ class TestClass(object):
|
||||
return '{}: Called={}'.format(self.__class__.__name__, self.called)
|
||||
|
||||
|
||||
class SubClass(TestClass):
|
||||
class SubClass(MockClass):
|
||||
|
||||
def __init__(self):
|
||||
super(SubClass, self).__init__()
|
||||
@ -110,7 +111,19 @@ class AnotherClass(object):
|
||||
self.count += 1
|
||||
|
||||
|
||||
class AnotherSubClass(TestClass):
|
||||
class NamedClass:
|
||||
|
||||
count = 0
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
@once_per_attribute_value('name')
|
||||
def initilize(self):
|
||||
NamedClass.count += 1
|
||||
|
||||
|
||||
class AnotherSubClass(MockClass):
|
||||
|
||||
def __init__(self):
|
||||
super(AnotherSubClass, self).__init__()
|
||||
@ -142,7 +155,7 @@ class EnvironmentManagementTest(TestCase):
|
||||
|
||||
def test_reset_current_environment(self):
|
||||
activate_environment('CURRENT_ENVIRONMENT')
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
t1.initilize_once()
|
||||
assert_equal(t1.count, 1)
|
||||
|
||||
@ -152,7 +165,7 @@ class EnvironmentManagementTest(TestCase):
|
||||
|
||||
def test_switch_environment(self):
|
||||
activate_environment('ENVIRONMENT1')
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
t1.initilize_once()
|
||||
assert_equal(t1.count, 1)
|
||||
|
||||
@ -166,7 +179,7 @@ class EnvironmentManagementTest(TestCase):
|
||||
|
||||
def test_reset_environment_name(self):
|
||||
activate_environment('ENVIRONMENT')
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
t1.initilize_once()
|
||||
assert_equal(t1.count, 1)
|
||||
|
||||
@ -195,7 +208,7 @@ class OnlyOnceEnvironmentTest(TestCase):
|
||||
reset_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def test_single_instance(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
ac = AnotherClass()
|
||||
|
||||
t1.initilize_once()
|
||||
@ -209,8 +222,8 @@ class OnlyOnceEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_mulitple_instances(self):
|
||||
t1 = TestClass()
|
||||
t2 = TestClass()
|
||||
t1 = MockClass()
|
||||
t2 = MockClass()
|
||||
|
||||
t1.initilize_once()
|
||||
assert_equal(t1.count, 1)
|
||||
@ -220,7 +233,7 @@ class OnlyOnceEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_sub_classes(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
sc = SubClass()
|
||||
ss = SubSubClass()
|
||||
asc = AnotherSubClass()
|
||||
@ -250,7 +263,7 @@ class OncePerClassEnvironmentTest(TestCase):
|
||||
reset_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def test_single_instance(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
ac = AnotherClass()
|
||||
|
||||
t1.initilize_once_per_class()
|
||||
@ -264,8 +277,8 @@ class OncePerClassEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_mulitple_instances(self):
|
||||
t1 = TestClass()
|
||||
t2 = TestClass()
|
||||
t1 = MockClass()
|
||||
t2 = MockClass()
|
||||
|
||||
t1.initilize_once_per_class()
|
||||
assert_equal(t1.count, 1)
|
||||
@ -275,7 +288,7 @@ class OncePerClassEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_sub_classes(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
sc1 = SubClass()
|
||||
sc2 = SubClass()
|
||||
ss1 = SubSubClass()
|
||||
@ -308,7 +321,7 @@ class OncePerInstanceEnvironmentTest(TestCase):
|
||||
reset_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def test_single_instance(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
ac = AnotherClass()
|
||||
|
||||
t1.initilize_once_per_instance()
|
||||
@ -322,8 +335,8 @@ class OncePerInstanceEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_mulitple_instances(self):
|
||||
t1 = TestClass()
|
||||
t2 = TestClass()
|
||||
t1 = MockClass()
|
||||
t2 = MockClass()
|
||||
|
||||
t1.initilize_once_per_instance()
|
||||
assert_equal(t1.count, 1)
|
||||
@ -333,7 +346,7 @@ class OncePerInstanceEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_sub_classes(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
sc = SubClass()
|
||||
ss = SubSubClass()
|
||||
asc = AnotherSubClass()
|
||||
@ -352,3 +365,30 @@ class OncePerInstanceEnvironmentTest(TestCase):
|
||||
asc.initilize_once_per_instance()
|
||||
asc.initilize_once_per_instance()
|
||||
assert_equal(asc.count, 2)
|
||||
|
||||
|
||||
class OncePerAttributeValueTest(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
activate_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def tearDown(self):
|
||||
reset_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def test_once_attribute_value(self):
|
||||
classes = [
|
||||
NamedClass('Rick'),
|
||||
NamedClass('Morty'),
|
||||
NamedClass('Rick'),
|
||||
NamedClass('Morty'),
|
||||
NamedClass('Morty'),
|
||||
NamedClass('Summer'),
|
||||
]
|
||||
|
||||
for c in classes:
|
||||
c.initilize()
|
||||
|
||||
for c in classes:
|
||||
c.initilize()
|
||||
|
||||
assert_equal(NamedClass.count, 3)
|
||||
|
315
tests/test_execution.py
Normal file
315
tests/test_execution.py
Normal file
@ -0,0 +1,315 @@
|
||||
# Copyright 2020 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from unittest import TestCase
|
||||
|
||||
from mock.mock import Mock
|
||||
from nose.tools import assert_equal
|
||||
from datetime import datetime
|
||||
|
||||
from wa.framework.configuration import RunConfiguration
|
||||
from wa.framework.configuration.core import JobSpec, Status
|
||||
from wa.framework.execution import ExecutionContext, Runner
|
||||
from wa.framework.job import Job
|
||||
from wa.framework.output import RunOutput, init_run_output
|
||||
from wa.framework.output_processor import ProcessorManager
|
||||
import wa.framework.signal as signal
|
||||
from wa.framework.run import JobState
|
||||
from wa.framework.exception import ExecutionError
|
||||
|
||||
|
||||
class MockConfigManager(Mock):
|
||||
|
||||
@property
|
||||
def jobs(self):
|
||||
return self._joblist
|
||||
|
||||
@property
|
||||
def loaded_config_sources(self):
|
||||
return []
|
||||
|
||||
@property
|
||||
def plugin_cache(self):
|
||||
return MockPluginCache()
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(MockConfigManager, self).__init__(*args, **kwargs)
|
||||
self._joblist = None
|
||||
self.run_config = RunConfiguration()
|
||||
|
||||
def to_pod(self):
|
||||
return {}
|
||||
|
||||
|
||||
class MockPluginCache(Mock):
|
||||
|
||||
def list_plugins(self, kind=None):
|
||||
return []
|
||||
|
||||
|
||||
class MockProcessorManager(Mock):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(MockProcessorManager, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_enabled(self):
|
||||
return []
|
||||
|
||||
|
||||
class JobState_force_retry(JobState):
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
return self._status
|
||||
|
||||
@status.setter
|
||||
def status(self, value):
|
||||
if(self.retries != self.times_to_retry) and (value == Status.RUNNING):
|
||||
self._status = Status.FAILED
|
||||
if self.output:
|
||||
self.output.status = Status.FAILED
|
||||
else:
|
||||
self._status = value
|
||||
if self.output:
|
||||
self.output.status = value
|
||||
|
||||
def __init__(self, to_retry, *args, **kwargs):
|
||||
self.retries = 0
|
||||
self._status = Status.NEW
|
||||
self.times_to_retry = to_retry
|
||||
self.output = None
|
||||
super(JobState_force_retry, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class Job_force_retry(Job):
|
||||
'''This class imitates a job that retries as many times as specified by
|
||||
``retries`` in its constructor'''
|
||||
|
||||
def __init__(self, to_retry, *args, **kwargs):
|
||||
super(Job_force_retry, self).__init__(*args, **kwargs)
|
||||
self.state = JobState_force_retry(to_retry, self.id, self.label, self.iteration, Status.NEW)
|
||||
self.initialized = False
|
||||
self.finalized = False
|
||||
|
||||
def initialize(self, context):
|
||||
self.initialized = True
|
||||
return super().initialize(context)
|
||||
|
||||
def finalize(self, context):
|
||||
self.finalized = True
|
||||
return super().finalize(context)
|
||||
|
||||
|
||||
class TestRunState(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.path = tempfile.mkstemp()[1]
|
||||
os.remove(self.path)
|
||||
self.initialise_signals()
|
||||
self.context = get_context(self.path)
|
||||
self.job_spec = get_jobspec()
|
||||
|
||||
def tearDown(self):
|
||||
signal.disconnect(self._verify_serialized_state, signal.RUN_INITIALIZED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_STARTED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_RESTARTED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_COMPLETED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_FAILED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_ABORTED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.RUN_FINALIZED)
|
||||
|
||||
def test_job_state_transitions_pass(self):
|
||||
'''Tests state equality when the job passes first try'''
|
||||
job = Job(self.job_spec, 1, self.context)
|
||||
job.workload = Mock()
|
||||
|
||||
self.context.cm._joblist = [job]
|
||||
self.context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(self.context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
def test_job_state_transitions_fail(self):
|
||||
'''Tests state equality when job fails completely'''
|
||||
job = Job_force_retry(3, self.job_spec, 1, self.context)
|
||||
job.workload = Mock()
|
||||
|
||||
self.context.cm._joblist = [job]
|
||||
self.context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(self.context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
def test_job_state_transitions_retry(self):
|
||||
'''Tests state equality when job fails initially'''
|
||||
job = Job_force_retry(1, self.job_spec, 1, self.context)
|
||||
job.workload = Mock()
|
||||
|
||||
self.context.cm._joblist = [job]
|
||||
self.context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(self.context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
def initialise_signals(self):
|
||||
signal.connect(self._verify_serialized_state, signal.RUN_INITIALIZED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_STARTED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_RESTARTED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_COMPLETED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_FAILED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_ABORTED)
|
||||
signal.connect(self._verify_serialized_state, signal.RUN_FINALIZED)
|
||||
|
||||
def _verify_serialized_state(self, _):
|
||||
fs_state = RunOutput(self.path).state
|
||||
ex_state = self.context.run_output.state
|
||||
|
||||
assert_equal(fs_state.status, ex_state.status)
|
||||
fs_js_zip = zip(
|
||||
[value for key, value in fs_state.jobs.items()],
|
||||
[value for key, value in ex_state.jobs.items()]
|
||||
)
|
||||
for fs_jobstate, ex_jobstate in fs_js_zip:
|
||||
assert_equal(fs_jobstate.iteration, ex_jobstate.iteration)
|
||||
assert_equal(fs_jobstate.retries, ex_jobstate.retries)
|
||||
assert_equal(fs_jobstate.status, ex_jobstate.status)
|
||||
|
||||
|
||||
class TestJobState(TestCase):
|
||||
|
||||
def test_job_retry_status(self):
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
job = Job_force_retry(2, job_spec, 1, context)
|
||||
job.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job]
|
||||
context.run_state.add_job(job)
|
||||
|
||||
verifier = lambda _: assert_equal(job.status, Status.PENDING)
|
||||
signal.connect(verifier, signal.JOB_RESTARTED)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
runner.run()
|
||||
signal.disconnect(verifier, signal.JOB_RESTARTED)
|
||||
|
||||
def test_skipped_job_state(self):
|
||||
# Test, if the first job fails and the bail parameter set,
|
||||
# that the remaining jobs have status: SKIPPED
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
context.cm.run_config.bail_on_job_failure = True
|
||||
|
||||
job1 = Job_force_retry(3, job_spec, 1, context)
|
||||
job2 = Job(job_spec, 1, context)
|
||||
job1.workload = Mock()
|
||||
job2.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job1, job2]
|
||||
context.run_state.add_job(job1)
|
||||
context.run_state.add_job(job2)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
try:
|
||||
runner.run()
|
||||
except ExecutionError:
|
||||
assert_equal(job2.status, Status.SKIPPED)
|
||||
else:
|
||||
assert False, "ExecutionError not raised"
|
||||
|
||||
def test_normal_job_finalized(self):
|
||||
# Test that a job is initialized then finalized normally
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
job = Job_force_retry(0, job_spec, 1, context)
|
||||
job.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job]
|
||||
context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
assert_equal(job.initialized, True)
|
||||
assert_equal(job.finalized, True)
|
||||
|
||||
def test_skipped_job_finalized(self):
|
||||
# Test that a skipped job has been finalized
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
context.cm.run_config.bail_on_job_failure = True
|
||||
|
||||
job1 = Job_force_retry(3, job_spec, 1, context)
|
||||
job2 = Job_force_retry(0, job_spec, 1, context)
|
||||
job1.workload = Mock()
|
||||
job2.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job1, job2]
|
||||
context.run_state.add_job(job1)
|
||||
context.run_state.add_job(job2)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
try:
|
||||
runner.run()
|
||||
except ExecutionError:
|
||||
assert_equal(job2.finalized, True)
|
||||
else:
|
||||
assert False, "ExecutionError not raised"
|
||||
|
||||
def test_failed_job_finalized(self):
|
||||
# Test that a failed job, while the bail parameter is set,
|
||||
# is finalized
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
context.cm.run_config.bail_on_job_failure = True
|
||||
|
||||
job1 = Job_force_retry(3, job_spec, 1, context)
|
||||
job1.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job1]
|
||||
context.run_state.add_job(job1)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
try:
|
||||
runner.run()
|
||||
except ExecutionError:
|
||||
assert_equal(job1.finalized, True)
|
||||
else:
|
||||
assert False, "ExecutionError not raised"
|
||||
|
||||
|
||||
def get_context(path=None):
|
||||
if not path:
|
||||
path = tempfile.mkstemp()[1]
|
||||
os.remove(path)
|
||||
|
||||
config = MockConfigManager()
|
||||
output = init_run_output(path, config)
|
||||
|
||||
return ExecutionContext(config, Mock(), output)
|
||||
|
||||
|
||||
def get_jobspec():
|
||||
job_spec = JobSpec()
|
||||
job_spec.augmentations = {}
|
||||
job_spec.finalize()
|
||||
return job_spec
|
@ -30,6 +30,27 @@ class Callable(object):
|
||||
return self.val
|
||||
|
||||
|
||||
class TestSignalDisconnect(unittest.TestCase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.callback_ctr = 0
|
||||
|
||||
def setUp(self):
|
||||
signal.connect(self._call_me_once, 'first')
|
||||
signal.connect(self._call_me_once, 'second')
|
||||
|
||||
def test_handler_disconnected(self):
|
||||
signal.send('first')
|
||||
signal.send('second')
|
||||
|
||||
def _call_me_once(self):
|
||||
assert_equal(self.callback_ctr, 0)
|
||||
self.callback_ctr += 1
|
||||
signal.disconnect(self._call_me_once, 'first')
|
||||
signal.disconnect(self._call_me_once, 'second')
|
||||
|
||||
|
||||
class TestPriorityDispatcher(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@ -61,12 +82,16 @@ class TestPriorityDispatcher(unittest.TestCase):
|
||||
|
||||
def test_wrap_propagate(self):
|
||||
d = {'before': False, 'after': False, 'success': False}
|
||||
|
||||
def before():
|
||||
d['before'] = True
|
||||
|
||||
def after():
|
||||
d['after'] = True
|
||||
|
||||
def success():
|
||||
d['success'] = True
|
||||
|
||||
signal.connect(before, signal.BEFORE_WORKLOAD_SETUP)
|
||||
signal.connect(after, signal.AFTER_WORKLOAD_SETUP)
|
||||
signal.connect(success, signal.SUCCESSFUL_WORKLOAD_SETUP)
|
||||
@ -76,7 +101,7 @@ class TestPriorityDispatcher(unittest.TestCase):
|
||||
with signal.wrap('WORKLOAD_SETUP'):
|
||||
raise RuntimeError()
|
||||
except RuntimeError:
|
||||
caught=True
|
||||
caught = True
|
||||
|
||||
assert_true(d['before'])
|
||||
assert_true(d['after'])
|
||||
|
@ -190,3 +190,10 @@ class TestToggleSet(TestCase):
|
||||
|
||||
ts6 = ts2.merge_into(ts3).merge_with(ts1)
|
||||
assert_equal(ts6, toggle_set(['one', 'two', 'three', 'four', 'five', '~~']))
|
||||
|
||||
def test_order_on_create(self):
|
||||
ts1 = toggle_set(['one', 'two', 'three', '~one'])
|
||||
assert_equal(ts1, toggle_set(['~one', 'two', 'three']))
|
||||
|
||||
ts1 = toggle_set(['~one', 'two', 'three', 'one'])
|
||||
assert_equal(ts1, toggle_set(['one', 'two', 'three']))
|
||||
|
@ -33,7 +33,7 @@ from wa.framework.target.descriptor import (TargetDescriptor, TargetDescription,
|
||||
create_target_description, add_description_for_target)
|
||||
from wa.framework.workload import (Workload, ApkWorkload, ApkUiautoWorkload,
|
||||
ApkReventWorkload, UIWorkload, UiautoWorkload,
|
||||
ReventWorkload)
|
||||
PackageHandler, ReventWorkload, TestPackageHandler)
|
||||
|
||||
|
||||
from wa.framework.version import get_wa_version, get_wa_version_with_commit
|
||||
|
@ -23,7 +23,6 @@ import re
|
||||
import uuid
|
||||
import getpass
|
||||
from collections import OrderedDict
|
||||
from distutils.dir_util import copy_tree # pylint: disable=no-name-in-module, import-error
|
||||
|
||||
from devlib.utils.types import identifier
|
||||
try:
|
||||
@ -43,6 +42,24 @@ from wa.utils.misc import (ensure_directory_exists as _d, capitalize,
|
||||
from wa.utils.postgres import get_schema, POSTGRES_SCHEMA_DIR
|
||||
from wa.utils.serializer import yaml
|
||||
|
||||
if sys.version_info >= (3, 8):
|
||||
def copy_tree(src, dst):
|
||||
from shutil import copy, copytree # pylint: disable=import-outside-toplevel
|
||||
copytree(
|
||||
src,
|
||||
dst,
|
||||
# dirs_exist_ok=True only exists in Python >= 3.8
|
||||
dirs_exist_ok=True,
|
||||
# Align with devlib and only copy the content without metadata
|
||||
copy_function=copy
|
||||
)
|
||||
else:
|
||||
def copy_tree(src, dst):
|
||||
# pylint: disable=import-outside-toplevel, redefined-outer-name
|
||||
from distutils.dir_util import copy_tree
|
||||
# Align with devlib and only copy the content without metadata
|
||||
copy_tree(src, dst, preserve_mode=False, preserve_times=False)
|
||||
|
||||
|
||||
TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates')
|
||||
|
||||
@ -106,8 +123,8 @@ class CreateDatabaseSubcommand(SubCommand):
|
||||
def execute(self, state, args): # pylint: disable=too-many-branches
|
||||
if not psycopg2:
|
||||
raise CommandError(
|
||||
'The module psycopg2 is required for the wa ' +
|
||||
'create database command.')
|
||||
'The module psycopg2 is required for the wa '
|
||||
+ 'create database command.')
|
||||
|
||||
if args.dbname == 'postgres':
|
||||
raise ValueError('Databasename to create cannot be postgres.')
|
||||
@ -131,8 +148,8 @@ class CreateDatabaseSubcommand(SubCommand):
|
||||
config = yaml.load(config_file)
|
||||
if 'postgres' in config and not args.force_update_config:
|
||||
raise CommandError(
|
||||
"The entry 'postgres' already exists in the config file. " +
|
||||
"Please specify the -F flag to force an update.")
|
||||
"The entry 'postgres' already exists in the config file. "
|
||||
+ "Please specify the -F flag to force an update.")
|
||||
|
||||
possible_connection_errors = [
|
||||
(
|
||||
@ -261,8 +278,8 @@ class CreateDatabaseSubcommand(SubCommand):
|
||||
else:
|
||||
if not self.force:
|
||||
raise CommandError(
|
||||
"Database {} already exists. ".format(self.dbname) +
|
||||
"Please specify the -f flag to create it from afresh."
|
||||
"Database {} already exists. ".format(self.dbname)
|
||||
+ "Please specify the -f flag to create it from afresh."
|
||||
)
|
||||
|
||||
def _create_database_postgres(self):
|
||||
@ -400,14 +417,14 @@ class CreateWorkloadSubcommand(SubCommand):
|
||||
self.parser.add_argument('name', metavar='NAME',
|
||||
help='Name of the workload to be created')
|
||||
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
|
||||
help='The location at which the workload will be created. If not specified, ' +
|
||||
'this defaults to "~/.workload_automation/plugins".')
|
||||
help='The location at which the workload will be created. If not specified, '
|
||||
+ 'this defaults to "~/.workload_automation/plugins".')
|
||||
self.parser.add_argument('-f', '--force', action='store_true',
|
||||
help='Create the new workload even if a workload with the specified ' +
|
||||
'name already exists.')
|
||||
help='Create the new workload even if a workload with the specified '
|
||||
+ 'name already exists.')
|
||||
self.parser.add_argument('-k', '--kind', metavar='KIND', default='basic', choices=list(create_funcs.keys()),
|
||||
help='The type of workload to be created. The available options ' +
|
||||
'are: {}'.format(', '.join(list(create_funcs.keys()))))
|
||||
help='The type of workload to be created. The available options '
|
||||
+ 'are: {}'.format(', '.join(list(create_funcs.keys()))))
|
||||
|
||||
def execute(self, state, args): # pylint: disable=R0201
|
||||
where = args.path or 'local'
|
||||
@ -430,8 +447,8 @@ class CreatePackageSubcommand(SubCommand):
|
||||
self.parser.add_argument('name', metavar='NAME',
|
||||
help='Name of the package to be created')
|
||||
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
|
||||
help='The location at which the new package will be created. If not specified, ' +
|
||||
'current working directory will be used.')
|
||||
help='The location at which the new package will be created. If not specified, '
|
||||
+ 'current working directory will be used.')
|
||||
self.parser.add_argument('-f', '--force', action='store_true',
|
||||
help='Create the new package even if a file or directory with the same name '
|
||||
'already exists at the specified location.')
|
||||
|
@ -1,4 +1,4 @@
|
||||
--!VERSION!1.2!ENDVERSION!
|
||||
--!VERSION!1.6!ENDVERSION!
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
CREATE EXTENSION IF NOT EXISTS "lo";
|
||||
|
||||
@ -61,7 +61,7 @@ CREATE TABLE Runs (
|
||||
|
||||
CREATE TABLE Jobs (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
status status_enum,
|
||||
retry int,
|
||||
label text,
|
||||
@ -76,12 +76,13 @@ CREATE TABLE Jobs (
|
||||
|
||||
CREATE TABLE Targets (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
target text,
|
||||
modules text[],
|
||||
cpus text[],
|
||||
os text,
|
||||
os_version jsonb,
|
||||
hostid int,
|
||||
hostid bigint,
|
||||
hostname text,
|
||||
abi text,
|
||||
is_rooted boolean,
|
||||
@ -96,12 +97,13 @@ CREATE TABLE Targets (
|
||||
android_id text,
|
||||
_pod_version int,
|
||||
_pod_serialization_version int,
|
||||
system_id text,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Events (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
timestamp timestamp,
|
||||
message text,
|
||||
@ -112,28 +114,28 @@ CREATE TABLE Events (
|
||||
|
||||
CREATE TABLE Resource_Getters (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
name text,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Augmentations (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
name text,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Jobs_Augs (
|
||||
oid uuid NOT NULL,
|
||||
job_oid uuid NOT NULL references Jobs(oid),
|
||||
augmentation_oid uuid NOT NULL references Augmentations(oid),
|
||||
job_oid uuid NOT NULL references Jobs(oid) ON DELETE CASCADE,
|
||||
augmentation_oid uuid NOT NULL references Augmentations(oid) ON DELETE CASCADE,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Metrics (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
name text,
|
||||
value double precision,
|
||||
@ -156,7 +158,7 @@ CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON LargeObjects
|
||||
|
||||
CREATE TABLE Artifacts (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
name text,
|
||||
large_object_uuid uuid NOT NULL references LargeObjects(oid),
|
||||
@ -164,15 +166,22 @@ CREATE TABLE Artifacts (
|
||||
kind text,
|
||||
_pod_version int,
|
||||
_pod_serialization_version int,
|
||||
is_dir boolean,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE RULE del_lo AS
|
||||
ON DELETE TO Artifacts
|
||||
DO DELETE FROM LargeObjects
|
||||
WHERE LargeObjects.oid = old.large_object_uuid
|
||||
;
|
||||
|
||||
CREATE TABLE Classifiers (
|
||||
oid uuid NOT NULL,
|
||||
artifact_oid uuid references Artifacts(oid),
|
||||
metric_oid uuid references Metrics(oid),
|
||||
job_oid uuid references Jobs(oid),
|
||||
run_oid uuid references Runs(oid),
|
||||
artifact_oid uuid references Artifacts(oid) ON DELETE CASCADE,
|
||||
metric_oid uuid references Metrics(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid) ON DELETE CASCADE,
|
||||
run_oid uuid references Runs(oid) ON DELETE CASCADE,
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (oid)
|
||||
@ -180,7 +189,7 @@ CREATE TABLE Classifiers (
|
||||
|
||||
CREATE TABLE Parameters (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
augmentation_oid uuid references Augmentations(oid),
|
||||
resource_getter_oid uuid references Resource_Getters(oid),
|
||||
|
@ -0,0 +1,3 @@
|
||||
ALTER TABLE targets ADD COLUMN system_id text;
|
||||
|
||||
ALTER TABLE artifacts ADD COLUMN is_dir boolean;
|
@ -0,0 +1,2 @@
|
||||
ALTER TABLE targets ADD COLUMN modules text[];
|
||||
|
@ -0,0 +1 @@
|
||||
ALTER TABLE targets ALTER hostid TYPE BIGINT;
|
109
wa/commands/postgres_schemas/postgres_schema_update_v1.6.sql
Normal file
109
wa/commands/postgres_schemas/postgres_schema_update_v1.6.sql
Normal file
@ -0,0 +1,109 @@
|
||||
ALTER TABLE jobs
|
||||
DROP CONSTRAINT jobs_run_oid_fkey,
|
||||
ADD CONSTRAINT jobs_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE targets
|
||||
DROP CONSTRAINT targets_run_oid_fkey,
|
||||
ADD CONSTRAINT targets_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE events
|
||||
DROP CONSTRAINT events_run_oid_fkey,
|
||||
ADD CONSTRAINT events_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE resource_getters
|
||||
DROP CONSTRAINT resource_getters_run_oid_fkey,
|
||||
ADD CONSTRAINT resource_getters_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE augmentations
|
||||
DROP CONSTRAINT augmentations_run_oid_fkey,
|
||||
ADD CONSTRAINT augmentations_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE jobs_augs
|
||||
DROP CONSTRAINT jobs_augs_job_oid_fkey,
|
||||
DROP CONSTRAINT jobs_augs_augmentation_oid_fkey,
|
||||
ADD CONSTRAINT jobs_augs_job_oid_fkey
|
||||
FOREIGN KEY (job_oid)
|
||||
REFERENCES Jobs(oid)
|
||||
ON DELETE CASCADE,
|
||||
ADD CONSTRAINT jobs_augs_augmentation_oid_fkey
|
||||
FOREIGN KEY (augmentation_oid)
|
||||
REFERENCES Augmentations(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE metrics
|
||||
DROP CONSTRAINT metrics_run_oid_fkey,
|
||||
ADD CONSTRAINT metrics_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE artifacts
|
||||
DROP CONSTRAINT artifacts_run_oid_fkey,
|
||||
ADD CONSTRAINT artifacts_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
CREATE RULE del_lo AS
|
||||
ON DELETE TO Artifacts
|
||||
DO DELETE FROM LargeObjects
|
||||
WHERE LargeObjects.oid = old.large_object_uuid
|
||||
;
|
||||
|
||||
ALTER TABLE classifiers
|
||||
DROP CONSTRAINT classifiers_artifact_oid_fkey,
|
||||
DROP CONSTRAINT classifiers_metric_oid_fkey,
|
||||
DROP CONSTRAINT classifiers_job_oid_fkey,
|
||||
DROP CONSTRAINT classifiers_run_oid_fkey,
|
||||
|
||||
ADD CONSTRAINT classifiers_artifact_oid_fkey
|
||||
FOREIGN KEY (artifact_oid)
|
||||
REFERENCES artifacts(oid)
|
||||
ON DELETE CASCADE,
|
||||
|
||||
ADD CONSTRAINT classifiers_metric_oid_fkey
|
||||
FOREIGN KEY (metric_oid)
|
||||
REFERENCES metrics(oid)
|
||||
ON DELETE CASCADE,
|
||||
|
||||
ADD CONSTRAINT classifiers_job_oid_fkey
|
||||
FOREIGN KEY (job_oid)
|
||||
REFERENCES jobs(oid)
|
||||
ON DELETE CASCADE,
|
||||
|
||||
ADD CONSTRAINT classifiers_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE parameters
|
||||
DROP CONSTRAINT parameters_run_oid_fkey,
|
||||
ADD CONSTRAINT parameters_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
@ -17,6 +17,7 @@ import os
|
||||
|
||||
from wa import Command
|
||||
from wa import discover_wa_outputs
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.framework.exception import CommandError
|
||||
from wa.framework.output import RunOutput
|
||||
from wa.framework.output_processor import ProcessorManager
|
||||
@ -57,8 +58,9 @@ class ProcessCommand(Command):
|
||||
""")
|
||||
self.parser.add_argument('-f', '--force', action='store_true',
|
||||
help="""
|
||||
Run processors that have already been
|
||||
run. By default these will be skipped.
|
||||
Run processors that have already been run. By
|
||||
default these will be skipped. Also, forces
|
||||
processing of in-progress runs.
|
||||
""")
|
||||
self.parser.add_argument('-r', '--recursive', action='store_true',
|
||||
help="""
|
||||
@ -76,10 +78,15 @@ class ProcessCommand(Command):
|
||||
if not args.recursive:
|
||||
output_list = [RunOutput(process_directory)]
|
||||
else:
|
||||
output_list = [output for output in discover_wa_outputs(process_directory)]
|
||||
output_list = list(discover_wa_outputs(process_directory))
|
||||
|
||||
pc = ProcessContext()
|
||||
for run_output in output_list:
|
||||
if run_output.status < Status.OK and not args.force:
|
||||
msg = 'Skipping {} as it has not completed -- {}'
|
||||
self.logger.info(msg.format(run_output.basepath, run_output.status))
|
||||
continue
|
||||
|
||||
pc.run_output = run_output
|
||||
pc.target_info = run_output.target_info
|
||||
|
||||
@ -112,6 +119,12 @@ class ProcessCommand(Command):
|
||||
pm.initialize(pc)
|
||||
|
||||
for job_output in run_output.jobs:
|
||||
if job_output.status < Status.OK or job_output.status in [Status.SKIPPED, Status.ABORTED]:
|
||||
msg = 'Skipping job {} {} iteration {} -- {}'
|
||||
self.logger.info(msg.format(job_output.id, job_output.label,
|
||||
job_output.iteration, job_output.status))
|
||||
continue
|
||||
|
||||
pc.job_output = job_output
|
||||
pm.enable_all()
|
||||
if not args.force:
|
||||
@ -142,5 +155,6 @@ class ProcessCommand(Command):
|
||||
pm.export_run_output(pc)
|
||||
pm.finalize(pc)
|
||||
|
||||
run_output.write_info()
|
||||
run_output.write_result()
|
||||
self.logger.info('Done.')
|
||||
|
288
wa/commands/report.py
Normal file
288
wa/commands/report.py
Normal file
@ -0,0 +1,288 @@
|
||||
from collections import Counter
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
import os
|
||||
|
||||
from wa import Command, settings
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.framework.output import RunOutput, discover_wa_outputs
|
||||
from wa.utils.doc import underline
|
||||
from wa.utils.log import COLOR_MAP, RESET_COLOR
|
||||
from wa.utils.terminalsize import get_terminal_size
|
||||
|
||||
|
||||
class ReportCommand(Command):
|
||||
|
||||
name = 'report'
|
||||
description = '''
|
||||
Monitor an ongoing run and provide information on its progress.
|
||||
|
||||
Specify the output directory of the run you would like the monitor;
|
||||
alternatively report will attempt to discover wa output directories
|
||||
within the current directory. The output includes run information such as
|
||||
the UUID, start time, duration, project name and a short summary of the
|
||||
run's progress (number of completed jobs, the number of jobs in each
|
||||
different status).
|
||||
|
||||
If verbose output is specified, the output includes a list of all events
|
||||
labelled as not specific to any job, followed by a list of the jobs in the
|
||||
order executed, with their retries (if any), current status and, if the job
|
||||
is finished, a list of events that occurred during that job's execution.
|
||||
|
||||
This is an example of a job status line:
|
||||
|
||||
wk1 (exoplayer) [1] - 2, PARTIAL
|
||||
|
||||
It contains two entries delimited by a comma: the job's descriptor followed
|
||||
by its completion status (``PARTIAL``, in this case). The descriptor
|
||||
consists of the following elements:
|
||||
|
||||
- the job ID (``wk1``)
|
||||
- the job label (which defaults to the workload name) in parentheses
|
||||
- job iteration number in square brakets (``1`` in this case)
|
||||
- a hyphen followed by the retry attempt number.
|
||||
(note: this will only be shown if the job has been retried as least
|
||||
once. If the job has not yet run, or if it completed on the first
|
||||
attempt, the hyphen and retry count -- which in that case would be
|
||||
zero -- will not appear).
|
||||
'''
|
||||
|
||||
def initialize(self, context):
|
||||
self.parser.add_argument('-d', '--directory',
|
||||
help='''
|
||||
Specify the WA output path. report will
|
||||
otherwise attempt to discover output
|
||||
directories in the current directory.
|
||||
''')
|
||||
|
||||
def execute(self, state, args):
|
||||
if args.directory:
|
||||
output_path = args.directory
|
||||
run_output = RunOutput(output_path)
|
||||
else:
|
||||
possible_outputs = list(discover_wa_outputs(os.getcwd()))
|
||||
num_paths = len(possible_outputs)
|
||||
|
||||
if num_paths > 1:
|
||||
print('More than one possible output directory found,'
|
||||
' please choose a path from the following:'
|
||||
)
|
||||
|
||||
for i in range(num_paths):
|
||||
print("{}: {}".format(i, possible_outputs[i].basepath))
|
||||
|
||||
while True:
|
||||
try:
|
||||
select = int(input())
|
||||
except ValueError:
|
||||
print("Please select a valid path number")
|
||||
continue
|
||||
|
||||
if select not in range(num_paths):
|
||||
print("Please select a valid path number")
|
||||
continue
|
||||
break
|
||||
|
||||
run_output = possible_outputs[select]
|
||||
|
||||
else:
|
||||
run_output = possible_outputs[0]
|
||||
|
||||
rm = RunMonitor(run_output)
|
||||
print(rm.generate_output(args.verbose))
|
||||
|
||||
|
||||
class RunMonitor:
|
||||
|
||||
@property
|
||||
def elapsed_time(self):
|
||||
if self._elapsed is None:
|
||||
if self.ro.info.duration is None:
|
||||
self._elapsed = datetime.utcnow() - self.ro.info.start_time
|
||||
else:
|
||||
self._elapsed = self.ro.info.duration
|
||||
return self._elapsed
|
||||
|
||||
@property
|
||||
def job_outputs(self):
|
||||
if self._job_outputs is None:
|
||||
self._job_outputs = {
|
||||
(j_o.id, j_o.label, j_o.iteration): j_o for j_o in self.ro.jobs
|
||||
}
|
||||
return self._job_outputs
|
||||
|
||||
@property
|
||||
def projected_duration(self):
|
||||
elapsed = self.elapsed_time.total_seconds()
|
||||
proj = timedelta(seconds=elapsed * (len(self.jobs) / len(self.segmented['finished'])))
|
||||
return proj - self.elapsed_time
|
||||
|
||||
def __init__(self, ro):
|
||||
self.ro = ro
|
||||
self._elapsed = None
|
||||
self._p_duration = None
|
||||
self._job_outputs = None
|
||||
self._termwidth = None
|
||||
self._fmt = _simple_formatter()
|
||||
self.get_data()
|
||||
|
||||
def get_data(self):
|
||||
self.jobs = [state for label_id, state in self.ro.state.jobs.items()]
|
||||
if self.jobs:
|
||||
rc = self.ro.run_config
|
||||
self.segmented = segment_jobs_by_state(self.jobs,
|
||||
rc.max_retries,
|
||||
rc.retry_on_status
|
||||
)
|
||||
|
||||
def generate_run_header(self):
|
||||
info = self.ro.info
|
||||
|
||||
header = underline('Run Info')
|
||||
header += "UUID: {}\n".format(info.uuid)
|
||||
if info.run_name:
|
||||
header += "Run name: {}\n".format(info.run_name)
|
||||
if info.project:
|
||||
header += "Project: {}\n".format(info.project)
|
||||
if info.project_stage:
|
||||
header += "Project stage: {}\n".format(info.project_stage)
|
||||
|
||||
if info.start_time:
|
||||
duration = _seconds_as_smh(self.elapsed_time.total_seconds())
|
||||
header += ("Start time: {}\n"
|
||||
"Duration: {:02}:{:02}:{:02}\n"
|
||||
).format(info.start_time,
|
||||
duration[2], duration[1], duration[0],
|
||||
)
|
||||
if self.segmented['finished'] and not info.end_time:
|
||||
p_duration = _seconds_as_smh(self.projected_duration.total_seconds())
|
||||
header += "Projected time remaining: {:02}:{:02}:{:02}\n".format(
|
||||
p_duration[2], p_duration[1], p_duration[0]
|
||||
)
|
||||
|
||||
elif self.ro.info.end_time:
|
||||
header += "End time: {}\n".format(info.end_time)
|
||||
|
||||
return header + '\n'
|
||||
|
||||
def generate_job_summary(self):
|
||||
total = len(self.jobs)
|
||||
num_fin = len(self.segmented['finished'])
|
||||
|
||||
summary = underline('Job Summary')
|
||||
summary += 'Total: {}, Completed: {} ({}%)\n'.format(
|
||||
total, num_fin, (num_fin / total) * 100
|
||||
) if total > 0 else 'No jobs created\n'
|
||||
|
||||
ctr = Counter()
|
||||
for run_state, jobs in ((k, v) for k, v in self.segmented.items() if v):
|
||||
if run_state == 'finished':
|
||||
ctr.update([job.status.name.lower() for job in jobs])
|
||||
else:
|
||||
ctr[run_state] += len(jobs)
|
||||
|
||||
return summary + ', '.join(
|
||||
[str(count) + ' ' + self._fmt.highlight_keyword(status) for status, count in ctr.items()]
|
||||
) + '\n\n'
|
||||
|
||||
def generate_job_detail(self):
|
||||
detail = underline('Job Detail')
|
||||
for job in self.jobs:
|
||||
detail += ('{} ({}) [{}]{}, {}\n').format(
|
||||
job.id,
|
||||
job.label,
|
||||
job.iteration,
|
||||
' - ' + str(job.retries)if job.retries else '',
|
||||
self._fmt.highlight_keyword(str(job.status))
|
||||
)
|
||||
|
||||
job_output = self.job_outputs[(job.id, job.label, job.iteration)]
|
||||
for event in job_output.events:
|
||||
detail += self._fmt.fit_term_width(
|
||||
'\t{}\n'.format(event.summary)
|
||||
)
|
||||
return detail
|
||||
|
||||
def generate_run_detail(self):
|
||||
detail = underline('Run Events') if self.ro.events else ''
|
||||
|
||||
for event in self.ro.events:
|
||||
detail += '{}\n'.format(event.summary)
|
||||
|
||||
return detail + '\n'
|
||||
|
||||
def generate_output(self, verbose):
|
||||
if not self.jobs:
|
||||
return 'No jobs found in output directory\n'
|
||||
|
||||
output = self.generate_run_header()
|
||||
output += self.generate_job_summary()
|
||||
|
||||
if verbose:
|
||||
output += self.generate_run_detail()
|
||||
output += self.generate_job_detail()
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def _seconds_as_smh(seconds):
|
||||
seconds = int(seconds)
|
||||
hours = seconds // 3600
|
||||
minutes = (seconds % 3600) // 60
|
||||
seconds = seconds % 60
|
||||
return seconds, minutes, hours
|
||||
|
||||
|
||||
def segment_jobs_by_state(jobstates, max_retries, retry_status):
|
||||
finished_states = [
|
||||
Status.PARTIAL, Status.FAILED,
|
||||
Status.ABORTED, Status.OK, Status.SKIPPED
|
||||
]
|
||||
|
||||
segmented = {
|
||||
'finished': [], 'other': [], 'running': [],
|
||||
'pending': [], 'uninitialized': []
|
||||
}
|
||||
|
||||
for jobstate in jobstates:
|
||||
if (jobstate.status in retry_status) and jobstate.retries < max_retries:
|
||||
segmented['running'].append(jobstate)
|
||||
elif jobstate.status in finished_states:
|
||||
segmented['finished'].append(jobstate)
|
||||
elif jobstate.status == Status.RUNNING:
|
||||
segmented['running'].append(jobstate)
|
||||
elif jobstate.status == Status.PENDING:
|
||||
segmented['pending'].append(jobstate)
|
||||
elif jobstate.status == Status.NEW:
|
||||
segmented['uninitialized'].append(jobstate)
|
||||
else:
|
||||
segmented['other'].append(jobstate)
|
||||
|
||||
return segmented
|
||||
|
||||
|
||||
class _simple_formatter:
|
||||
color_map = {
|
||||
'running': COLOR_MAP[logging.INFO],
|
||||
'partial': COLOR_MAP[logging.WARNING],
|
||||
'failed': COLOR_MAP[logging.CRITICAL],
|
||||
'aborted': COLOR_MAP[logging.ERROR]
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.termwidth = get_terminal_size()[0]
|
||||
self.color = settings.logging['color']
|
||||
|
||||
def fit_term_width(self, text):
|
||||
text = text.expandtabs()
|
||||
if len(text) <= self.termwidth:
|
||||
return text
|
||||
else:
|
||||
return text[0:self.termwidth - 4] + " ...\n"
|
||||
|
||||
def highlight_keyword(self, kw):
|
||||
if not self.color or kw not in _simple_formatter.color_map:
|
||||
return kw
|
||||
|
||||
color = _simple_formatter.color_map[kw.lower()]
|
||||
return '{}{}{}'.format(color, kw, RESET_COLOR)
|
@ -25,10 +25,6 @@ from wa.framework.target.manager import TargetManager
|
||||
from wa.utils.revent import ReventRecorder
|
||||
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
raw_input = input # pylint: disable=redefined-builtin
|
||||
|
||||
|
||||
class RecordCommand(Command):
|
||||
|
||||
name = 'record'
|
||||
@ -96,8 +92,8 @@ class RecordCommand(Command):
|
||||
if args.workload and args.output:
|
||||
self.logger.error("Output file cannot be specified with Workload")
|
||||
sys.exit()
|
||||
if not args.workload and (args.setup or args.extract_results or
|
||||
args.teardown or args.all):
|
||||
if not args.workload and (args.setup or args.extract_results
|
||||
or args.teardown or args.all):
|
||||
self.logger.error("Cannot specify a recording stage without a Workload")
|
||||
sys.exit()
|
||||
if args.workload and not any([args.all, args.teardown, args.extract_results, args.run, args.setup]):
|
||||
@ -137,11 +133,11 @@ class RecordCommand(Command):
|
||||
def record(self, revent_file, name, output_path):
|
||||
msg = 'Press Enter when you are ready to record {}...'
|
||||
self.logger.info(msg.format(name))
|
||||
raw_input('')
|
||||
input('')
|
||||
self.revent_recorder.start_record(revent_file)
|
||||
msg = 'Press Enter when you have finished recording {}...'
|
||||
self.logger.info(msg.format(name))
|
||||
raw_input('')
|
||||
input('')
|
||||
self.revent_recorder.stop_record()
|
||||
|
||||
if not os.path.isdir(output_path):
|
||||
|
@ -7,3 +7,22 @@
|
||||
was done following an extended discussion and tests that verified
|
||||
that the savings in processing power were not enough to warrant
|
||||
the creation of a dedicated server or file handler.
|
||||
## 1.2
|
||||
- Rename the `resourcegetters` table to `resource_getters` for consistency.
|
||||
- Add Job and Run level classifiers.
|
||||
- Add missing android specific properties to targets.
|
||||
- Add new POD meta data to relevant tables.
|
||||
- Correct job column name from `retires` to `retry`.
|
||||
- Add missing run information.
|
||||
## 1.3
|
||||
- Add missing "system_id" field from TargetInfo.
|
||||
- Enable support for uploading Artifact that represent directories.
|
||||
## 1.4
|
||||
- Add "modules" field to TargetInfo to list the modules loaded by the target
|
||||
during the run.
|
||||
## 1.5
|
||||
- Change the type of the "hostid" in TargetInfo from Int to Bigint.
|
||||
## 1.6
|
||||
- Add cascading deletes to most tables to allow easy deletion of a run
|
||||
and its associated data
|
||||
- Add rule to delete associated large object on deletion of artifact
|
@ -73,11 +73,8 @@ class ShowCommand(Command):
|
||||
|
||||
if which('pandoc'):
|
||||
p = Popen(['pandoc', '-f', 'rst', '-t', 'man'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
if sys.version_info[0] == 3:
|
||||
output, _ = p.communicate(rst_output.encode(sys.stdin.encoding))
|
||||
output = output.decode(sys.stdout.encoding)
|
||||
else:
|
||||
output, _ = p.communicate(rst_output)
|
||||
output, _ = p.communicate(rst_output.encode(sys.stdin.encoding))
|
||||
output = output.decode(sys.stdout.encoding)
|
||||
|
||||
# Make sure to double escape back slashes
|
||||
output = output.replace('\\', '\\\\\\')
|
||||
|
@ -59,7 +59,7 @@ params = dict(
|
||||
'Environment :: Console',
|
||||
'License :: Other/Proprietary License',
|
||||
'Operating System :: Unix',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -1,18 +1,18 @@
|
||||
apply plugin: 'com.android.application'
|
||||
|
||||
android {
|
||||
compileSdkVersion 18
|
||||
buildToolsVersion '25.0.0'
|
||||
compileSdkVersion 28
|
||||
buildToolsVersion '28.0.0'
|
||||
defaultConfig {
|
||||
applicationId "${package_name}"
|
||||
minSdkVersion 18
|
||||
targetSdkVersion 25
|
||||
targetSdkVersion 28
|
||||
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
|
||||
}
|
||||
buildTypes {
|
||||
applicationVariants.all { variant ->
|
||||
variant.outputs.each { output ->
|
||||
output.outputFile = file("$$project.buildDir/apk/${package_name}.apk")
|
||||
output.outputFileName = "${package_name}.apk"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ fi
|
||||
|
||||
# Copy base class library from wlauto dist
|
||||
libs_dir=app/libs
|
||||
base_class=`python -c "import os, wa; print os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar')"`
|
||||
base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"`
|
||||
mkdir -p $$libs_dir
|
||||
cp $$base_class $$libs_dir
|
||||
|
||||
@ -31,8 +31,8 @@ fi
|
||||
|
||||
# If successful move APK file to workload folder (overwrite previous)
|
||||
rm -f ../$package_name
|
||||
if [[ -f app/build/apk/$package_name.apk ]]; then
|
||||
cp app/build/apk/$package_name.apk ../$package_name.apk
|
||||
if [[ -f app/build/outputs/apk/debug/$package_name.apk ]]; then
|
||||
cp app/build/outputs/apk/debug/$package_name.apk ../$package_name.apk
|
||||
else
|
||||
echo 'ERROR: UiAutomator apk could not be found!'
|
||||
exit 9
|
||||
|
@ -3,9 +3,10 @@
|
||||
buildscript {
|
||||
repositories {
|
||||
jcenter()
|
||||
google()
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:2.3.1'
|
||||
classpath 'com.android.tools.build:gradle:7.2.1'
|
||||
|
||||
// NOTE: Do not place your application dependencies here; they belong
|
||||
// in the individual module build.gradle files
|
||||
@ -15,6 +16,7 @@ buildscript {
|
||||
allprojects {
|
||||
repositories {
|
||||
jcenter()
|
||||
google()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-3.3-all.zip
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip
|
||||
|
@ -65,7 +65,6 @@ class SubCommand(object):
|
||||
options to the command's parser). ``context`` is always ``None``.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def execute(self, state, args):
|
||||
"""
|
||||
|
@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import logging
|
||||
from copy import copy, deepcopy
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
@ -36,6 +37,8 @@ Status = enum(['UNKNOWN', 'NEW', 'PENDING',
|
||||
'STARTED', 'CONNECTED', 'INITIALIZED', 'RUNNING',
|
||||
'OK', 'PARTIAL', 'FAILED', 'ABORTED', 'SKIPPED'])
|
||||
|
||||
logger = logging.getLogger('config')
|
||||
|
||||
|
||||
##########################
|
||||
### CONFIG POINT TYPES ###
|
||||
@ -55,10 +58,11 @@ class RebootPolicy(object):
|
||||
executing the first workload spec.
|
||||
:each_spec: The device will be rebooted before running a new workload spec.
|
||||
:each_iteration: The device will be rebooted before each new iteration.
|
||||
:run_completion: The device will be rebooted after the run has been completed.
|
||||
|
||||
"""
|
||||
|
||||
valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_job']
|
||||
valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_job', 'run_completion']
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
@ -89,6 +93,10 @@ class RebootPolicy(object):
|
||||
def reboot_on_each_spec(self):
|
||||
return self.policy == 'each_spec'
|
||||
|
||||
@property
|
||||
def reboot_on_run_completion(self):
|
||||
return self.policy == 'run_completion'
|
||||
|
||||
def __str__(self):
|
||||
return self.policy
|
||||
|
||||
@ -192,7 +200,8 @@ class ConfigurationPoint(object):
|
||||
constraint=None,
|
||||
merge=False,
|
||||
aliases=None,
|
||||
global_alias=None):
|
||||
global_alias=None,
|
||||
deprecated=False):
|
||||
"""
|
||||
Create a new Parameter object.
|
||||
|
||||
@ -243,10 +252,12 @@ class ConfigurationPoint(object):
|
||||
:param global_alias: An alias for this parameter that can be specified at
|
||||
the global level. A global_alias can map onto many
|
||||
ConfigurationPoints.
|
||||
:param deprecated: Specify that this parameter is deprecated and its
|
||||
config should be ignored. If supplied WA will display
|
||||
a warning to the user however will continue execution.
|
||||
"""
|
||||
self.name = identifier(name)
|
||||
if kind in KIND_MAP:
|
||||
kind = KIND_MAP[kind]
|
||||
kind = KIND_MAP.get(kind, kind)
|
||||
if kind is not None and not callable(kind):
|
||||
raise ValueError('Kind must be callable.')
|
||||
self.kind = kind
|
||||
@ -266,6 +277,7 @@ class ConfigurationPoint(object):
|
||||
self.merge = merge
|
||||
self.aliases = aliases or []
|
||||
self.global_alias = global_alias
|
||||
self.deprecated = deprecated
|
||||
|
||||
if self.default is not None:
|
||||
try:
|
||||
@ -281,6 +293,11 @@ class ConfigurationPoint(object):
|
||||
return False
|
||||
|
||||
def set_value(self, obj, value=None, check_mandatory=True):
|
||||
if self.deprecated:
|
||||
if value is not None:
|
||||
msg = 'Depreciated parameter supplied for "{}" in "{}". The value will be ignored.'
|
||||
logger.warning(msg.format(self.name, obj.name))
|
||||
return
|
||||
if value is None:
|
||||
if self.default is not None:
|
||||
value = self.kind(self.default)
|
||||
@ -302,6 +319,8 @@ class ConfigurationPoint(object):
|
||||
setattr(obj, self.name, value)
|
||||
|
||||
def validate(self, obj, check_mandatory=True):
|
||||
if self.deprecated:
|
||||
return
|
||||
value = getattr(obj, self.name, None)
|
||||
if value is not None:
|
||||
self.validate_value(obj.name, value)
|
||||
@ -450,6 +469,7 @@ class MetaConfiguration(Configuration):
|
||||
description="""
|
||||
The local mount point for the filer hosting WA assets.
|
||||
""",
|
||||
default=''
|
||||
),
|
||||
ConfigurationPoint(
|
||||
'logging',
|
||||
@ -466,7 +486,6 @@ class MetaConfiguration(Configuration):
|
||||
contain bash color escape codes. Set this to ``False`` if
|
||||
console output will be piped somewhere that does not know
|
||||
how to handle those.
|
||||
|
||||
""",
|
||||
),
|
||||
ConfigurationPoint(
|
||||
@ -523,6 +542,10 @@ class MetaConfiguration(Configuration):
|
||||
def target_info_cache_file(self):
|
||||
return os.path.join(self.cache_directory, 'targets.json')
|
||||
|
||||
@property
|
||||
def apk_info_cache_file(self):
|
||||
return os.path.join(self.cache_directory, 'apk_info.json')
|
||||
|
||||
def __init__(self, environ=None):
|
||||
super(MetaConfiguration, self).__init__()
|
||||
if environ is None:
|
||||
@ -644,15 +667,18 @@ class RunConfiguration(Configuration):
|
||||
``"each_spec"``
|
||||
The device will be rebooted before running a new workload spec.
|
||||
|
||||
.. note:: this acts the same as each_job when execution order
|
||||
.. note:: This acts the same as ``each_job`` when execution order
|
||||
is set to by_iteration
|
||||
|
||||
``"run_completion"``
|
||||
The device will be rebooted after the run has been completed.
|
||||
'''),
|
||||
ConfigurationPoint(
|
||||
'device',
|
||||
kind=str,
|
||||
default='generic_android',
|
||||
description='''
|
||||
This setting defines what specific Device subclass will be used to
|
||||
This setting defines what specific ``Device`` subclass will be used to
|
||||
interact the connected device. Obviously, this must match your
|
||||
setup.
|
||||
''',
|
||||
@ -706,6 +732,17 @@ class RunConfiguration(Configuration):
|
||||
failed, but continue attempting to run others.
|
||||
'''
|
||||
),
|
||||
ConfigurationPoint(
|
||||
'bail_on_job_failure',
|
||||
kind=bool,
|
||||
default=False,
|
||||
description='''
|
||||
When a job fails during its run phase, WA will attempt to retry the
|
||||
job, then continue with remaining jobs after. Setting this to
|
||||
``True`` means WA will skip remaining jobs and end the run if a job
|
||||
has retried the maximum number of times, and still fails.
|
||||
'''
|
||||
),
|
||||
ConfigurationPoint(
|
||||
'allow_phone_home',
|
||||
kind=bool, default=True,
|
||||
@ -793,12 +830,12 @@ class JobSpec(Configuration):
|
||||
description='''
|
||||
The name of the workload to run.
|
||||
'''),
|
||||
ConfigurationPoint('workload_parameters', kind=obj_dict,
|
||||
ConfigurationPoint('workload_parameters', kind=obj_dict, merge=True,
|
||||
aliases=["params", "workload_params", "parameters"],
|
||||
description='''
|
||||
Parameter to be passed to the workload
|
||||
'''),
|
||||
ConfigurationPoint('runtime_parameters', kind=obj_dict,
|
||||
ConfigurationPoint('runtime_parameters', kind=obj_dict, merge=True,
|
||||
aliases=["runtime_params"],
|
||||
description='''
|
||||
Runtime parameters to be set prior to running
|
||||
|
@ -24,7 +24,7 @@ from wa.framework.configuration.core import (MetaConfiguration, RunConfiguration
|
||||
JobGenerator, settings)
|
||||
from wa.framework.configuration.parsers import ConfigParser
|
||||
from wa.framework.configuration.plugin_cache import PluginCache
|
||||
from wa.framework.exception import NotFoundError
|
||||
from wa.framework.exception import NotFoundError, ConfigError
|
||||
from wa.framework.job import Job
|
||||
from wa.utils import log
|
||||
from wa.utils.serializer import Podable
|
||||
@ -148,6 +148,9 @@ class ConfigManager(object):
|
||||
|
||||
def generate_jobs(self, context):
|
||||
job_specs = self.jobs_config.generate_job_specs(context.tm)
|
||||
if not job_specs:
|
||||
msg = 'No jobs available for running.'
|
||||
raise ConfigError(msg)
|
||||
exec_order = self.run_config.execution_order
|
||||
log.indent()
|
||||
for spec, i in permute_iterations(job_specs, exec_order):
|
||||
|
@ -238,20 +238,47 @@ def _load_file(filepath, error_name):
|
||||
return raw, includes
|
||||
|
||||
|
||||
def _config_values_from_includes(filepath, include_path, error_name):
|
||||
source_dir = os.path.dirname(filepath)
|
||||
included_files = []
|
||||
|
||||
if isinstance(include_path, str):
|
||||
include_path = os.path.expanduser(os.path.join(source_dir, include_path))
|
||||
|
||||
replace_value, includes = _load_file(include_path, error_name)
|
||||
|
||||
included_files.append(include_path)
|
||||
included_files.extend(includes)
|
||||
elif isinstance(include_path, list):
|
||||
replace_value = {}
|
||||
|
||||
for path in include_path:
|
||||
include_path = os.path.expanduser(os.path.join(source_dir, path))
|
||||
|
||||
sub_replace_value, includes = _load_file(include_path, error_name)
|
||||
for key, val in sub_replace_value.items():
|
||||
replace_value[key] = merge_config_values(val, replace_value.get(key, None))
|
||||
|
||||
included_files.append(include_path)
|
||||
included_files.extend(includes)
|
||||
else:
|
||||
message = "{} does not contain a valid {} structure; value for 'include#' must be a string or a list"
|
||||
raise ConfigError(message.format(filepath, error_name))
|
||||
|
||||
return replace_value, included_files
|
||||
|
||||
|
||||
def _process_includes(raw, filepath, error_name):
|
||||
if not raw:
|
||||
return []
|
||||
|
||||
source_dir = os.path.dirname(filepath)
|
||||
included_files = []
|
||||
replace_value = None
|
||||
|
||||
if hasattr(raw, 'items'):
|
||||
for key, value in raw.items():
|
||||
if key == 'include#':
|
||||
include_path = os.path.expanduser(os.path.join(source_dir, value))
|
||||
included_files.append(include_path)
|
||||
replace_value, includes = _load_file(include_path, error_name)
|
||||
replace_value, includes = _config_values_from_includes(filepath, value, error_name)
|
||||
included_files.extend(includes)
|
||||
elif hasattr(value, 'items') or isiterable(value):
|
||||
includes = _process_includes(value, filepath, error_name)
|
||||
@ -297,7 +324,7 @@ def merge_augmentations(raw):
|
||||
raise ConfigError(msg.format(value, n, exc))
|
||||
|
||||
# Make sure none of the specified aliases conflict with each other
|
||||
to_check = [e for e in entries]
|
||||
to_check = list(entries)
|
||||
while len(to_check) > 1:
|
||||
check_entry = to_check.pop()
|
||||
for e in to_check:
|
||||
|
@ -84,9 +84,9 @@ class PluginCache(object):
|
||||
'defined in a config file, move the entry content into the top level'
|
||||
raise ConfigError(msg.format((plugin_name)))
|
||||
|
||||
if (not self.loader.has_plugin(plugin_name) and
|
||||
plugin_name not in self.targets and
|
||||
plugin_name not in GENERIC_CONFIGS):
|
||||
if (not self.loader.has_plugin(plugin_name)
|
||||
and plugin_name not in self.targets
|
||||
and plugin_name not in GENERIC_CONFIGS):
|
||||
msg = 'configuration provided for unknown plugin "{}"'
|
||||
raise ConfigError(msg.format(plugin_name))
|
||||
|
||||
@ -95,8 +95,8 @@ class PluginCache(object):
|
||||
raise ConfigError(msg.format(plugin_name, repr(values), type(values)))
|
||||
|
||||
for name, value in values.items():
|
||||
if (plugin_name not in GENERIC_CONFIGS and
|
||||
name not in self.get_plugin_parameters(plugin_name)):
|
||||
if (plugin_name not in GENERIC_CONFIGS
|
||||
and name not in self.get_plugin_parameters(plugin_name)):
|
||||
msg = "'{}' is not a valid parameter for '{}'"
|
||||
raise ConfigError(msg.format(name, plugin_name))
|
||||
|
||||
|
@ -33,6 +33,7 @@ class JobSpecSource(object):
|
||||
def id(self):
|
||||
return self.config['id']
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
@ -71,9 +72,23 @@ def split_joined_options(argv):
|
||||
|
||||
# Instead of presenting an obscure error due to a version mismatch explicitly warn the user.
|
||||
def check_devlib_version():
|
||||
if not installed_devlib_version or installed_devlib_version < required_devlib_version:
|
||||
msg = 'WA requires Devlib version >={}. Please update the currently installed version {}'
|
||||
raise HostError(msg.format(format_version(required_devlib_version), devlib.__version__))
|
||||
if not installed_devlib_version or installed_devlib_version[:-1] <= required_devlib_version[:-1]:
|
||||
# Check the 'dev' field separately to account for comparing with release versions.
|
||||
if installed_devlib_version.dev and installed_devlib_version.dev < required_devlib_version.dev:
|
||||
msg = 'WA requires Devlib version >={}. Please update the currently installed version {}'
|
||||
raise HostError(msg.format(format_version(required_devlib_version), devlib.__version__))
|
||||
|
||||
|
||||
# If the default encoding is not UTF-8 warn the user as this may cause compatibility issues
|
||||
# when parsing files.
|
||||
def check_system_encoding():
|
||||
system_encoding = locale.getpreferredencoding()
|
||||
msg = 'System Encoding: {}'.format(system_encoding)
|
||||
if 'UTF-8' not in system_encoding:
|
||||
logger.warning(msg)
|
||||
logger.warning('To prevent encoding issues please use a locale setting which supports UTF-8')
|
||||
else:
|
||||
logger.debug(msg)
|
||||
|
||||
|
||||
def main():
|
||||
@ -115,6 +130,7 @@ def main():
|
||||
logger.debug('devlib version: {}'.format(devlib.__full_version__))
|
||||
logger.debug('Command Line: {}'.format(' '.join(sys.argv)))
|
||||
check_devlib_version()
|
||||
check_system_encoding()
|
||||
|
||||
# each command will add its own subparser
|
||||
subparsers = parser.add_subparsers(dest='command')
|
||||
|
@ -30,60 +30,49 @@ class WAError(Exception):
|
||||
|
||||
class NotFoundError(WAError):
|
||||
"""Raised when the specified item is not found."""
|
||||
pass
|
||||
|
||||
|
||||
class ValidationError(WAError):
|
||||
"""Raised on failure to validate an extension."""
|
||||
pass
|
||||
|
||||
|
||||
class ExecutionError(WAError):
|
||||
"""Error encountered by the execution framework."""
|
||||
pass
|
||||
|
||||
|
||||
class WorkloadError(WAError):
|
||||
"""General Workload error."""
|
||||
pass
|
||||
|
||||
|
||||
class JobError(WAError):
|
||||
"""Job execution error."""
|
||||
pass
|
||||
|
||||
|
||||
class InstrumentError(WAError):
|
||||
"""General Instrument error."""
|
||||
pass
|
||||
|
||||
|
||||
class OutputProcessorError(WAError):
|
||||
"""General OutputProcessor error."""
|
||||
pass
|
||||
|
||||
|
||||
class ResourceError(WAError):
|
||||
"""General Resolver error."""
|
||||
pass
|
||||
|
||||
|
||||
class CommandError(WAError):
|
||||
"""Raised by commands when they have encountered an error condition
|
||||
during execution."""
|
||||
pass
|
||||
|
||||
|
||||
class ToolError(WAError):
|
||||
"""Raised by tools when they have encountered an error condition
|
||||
during execution."""
|
||||
pass
|
||||
|
||||
|
||||
class ConfigError(WAError):
|
||||
"""Raised when configuration provided is invalid. This error suggests that
|
||||
the user should modify their config and try again."""
|
||||
pass
|
||||
|
||||
|
||||
class SerializerSyntaxError(Exception):
|
||||
|
@ -25,7 +25,7 @@ from datetime import datetime
|
||||
import wa.framework.signal as signal
|
||||
from wa.framework import instrument as instrumentation
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.framework.exception import TargetError, HostError, WorkloadError
|
||||
from wa.framework.exception import TargetError, HostError, WorkloadError, ExecutionError
|
||||
from wa.framework.exception import TargetNotRespondingError, TimeoutError # pylint: disable=redefined-builtin
|
||||
from wa.framework.job import Job
|
||||
from wa.framework.output import init_job_output
|
||||
@ -128,8 +128,8 @@ class ExecutionContext(object):
|
||||
self.run_state.status = status
|
||||
self.run_output.status = status
|
||||
self.run_output.info.end_time = datetime.utcnow()
|
||||
self.run_output.info.duration = (self.run_output.info.end_time -
|
||||
self.run_output.info.start_time)
|
||||
self.run_output.info.duration = (self.run_output.info.end_time
|
||||
- self.run_output.info.start_time)
|
||||
self.write_output()
|
||||
|
||||
def finalize(self):
|
||||
@ -141,21 +141,24 @@ class ExecutionContext(object):
|
||||
self.current_job = self.job_queue.pop(0)
|
||||
job_output = init_job_output(self.run_output, self.current_job)
|
||||
self.current_job.set_output(job_output)
|
||||
self.update_job_state(self.current_job)
|
||||
return self.current_job
|
||||
|
||||
def end_job(self):
|
||||
if not self.current_job:
|
||||
raise RuntimeError('No jobs in progress')
|
||||
self.completed_jobs.append(self.current_job)
|
||||
self.update_job_state(self.current_job)
|
||||
self.output.write_result()
|
||||
self.current_job = None
|
||||
|
||||
def set_status(self, status, force=False):
|
||||
def set_status(self, status, force=False, write=True):
|
||||
if not self.current_job:
|
||||
raise RuntimeError('No jobs in progress')
|
||||
self.current_job.set_status(status, force)
|
||||
self.set_job_status(self.current_job, status, force, write)
|
||||
|
||||
def set_job_status(self, job, status, force=False, write=True):
|
||||
job.set_status(status, force)
|
||||
if write:
|
||||
self.run_output.write_state()
|
||||
|
||||
def extract_results(self):
|
||||
self.tm.extract_results(self)
|
||||
@ -163,13 +166,8 @@ class ExecutionContext(object):
|
||||
def move_failed(self, job):
|
||||
self.run_output.move_failed(job.output)
|
||||
|
||||
def update_job_state(self, job):
|
||||
self.run_state.update_job(job)
|
||||
self.run_output.write_state()
|
||||
|
||||
def skip_job(self, job):
|
||||
job.status = Status.SKIPPED
|
||||
self.run_state.update_job(job)
|
||||
self.set_job_status(job, Status.SKIPPED, force=True)
|
||||
self.completed_jobs.append(job)
|
||||
|
||||
def skip_remaining_jobs(self):
|
||||
@ -249,6 +247,11 @@ class ExecutionContext(object):
|
||||
def add_event(self, message):
|
||||
self.output.add_event(message)
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
self.output.add_classifier(name, value, overwrite)
|
||||
if self.current_job:
|
||||
self.current_job.add_classifier(name, value, overwrite)
|
||||
|
||||
def add_metadata(self, key, *args, **kwargs):
|
||||
self.output.add_metadata(key, *args, **kwargs)
|
||||
|
||||
@ -288,7 +291,7 @@ class ExecutionContext(object):
|
||||
try:
|
||||
job.initialize(self)
|
||||
except WorkloadError as e:
|
||||
job.set_status(Status.FAILED)
|
||||
self.set_job_status(job, Status.FAILED, write=False)
|
||||
log.log_error(e, self.logger)
|
||||
failed_ids.append(job.id)
|
||||
|
||||
@ -298,6 +301,7 @@ class ExecutionContext(object):
|
||||
new_queue.append(job)
|
||||
|
||||
self.job_queue = new_queue
|
||||
self.write_state()
|
||||
|
||||
def _load_resource_getters(self):
|
||||
self.logger.debug('Loading resource discoverers')
|
||||
@ -333,7 +337,7 @@ class Executor(object):
|
||||
returning.
|
||||
|
||||
The initial context set up involves combining configuration from various
|
||||
sources, loading of requided workloads, loading and installation of
|
||||
sources, loading of required workloads, loading and installation of
|
||||
instruments and output processors, etc. Static validation of the combined
|
||||
configuration is also performed.
|
||||
|
||||
@ -349,7 +353,7 @@ class Executor(object):
|
||||
def execute(self, config_manager, output):
|
||||
"""
|
||||
Execute the run specified by an agenda. Optionally, selectors may be
|
||||
used to only selecute a subset of the specified agenda.
|
||||
used to only execute a subset of the specified agenda.
|
||||
|
||||
Params::
|
||||
|
||||
@ -399,7 +403,7 @@ class Executor(object):
|
||||
attempts = context.cm.run_config.max_retries
|
||||
while attempts:
|
||||
try:
|
||||
self.target_manager.reboot()
|
||||
self.target_manager.reboot(context)
|
||||
except TargetError as e:
|
||||
if attempts:
|
||||
attempts -= 1
|
||||
@ -445,7 +449,7 @@ class Executor(object):
|
||||
for status in reversed(Status.levels):
|
||||
if status in counter:
|
||||
parts.append('{} {}'.format(counter[status], status))
|
||||
self.logger.info(status_summary + ', '.join(parts))
|
||||
self.logger.info('{}{}'.format(status_summary, ', '.join(parts)))
|
||||
|
||||
self.logger.info('Results can be found in {}'.format(output.basepath))
|
||||
|
||||
@ -533,6 +537,9 @@ class Runner(object):
|
||||
self.pm.process_run_output(self.context)
|
||||
self.pm.export_run_output(self.context)
|
||||
self.pm.finalize(self.context)
|
||||
if self.context.reboot_policy.reboot_on_run_completion:
|
||||
self.logger.info('Rebooting target on run completion.')
|
||||
self.context.tm.reboot(self.context)
|
||||
signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED)
|
||||
signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED)
|
||||
|
||||
@ -552,15 +559,15 @@ class Runner(object):
|
||||
with signal.wrap('JOB', self, context):
|
||||
context.tm.start()
|
||||
self.do_run_job(job, context)
|
||||
job.set_status(Status.OK)
|
||||
context.set_job_status(job, Status.OK)
|
||||
except (Exception, KeyboardInterrupt) as e: # pylint: disable=broad-except
|
||||
log.log_error(e, self.logger)
|
||||
if isinstance(e, KeyboardInterrupt):
|
||||
context.run_interrupted = True
|
||||
job.set_status(Status.ABORTED)
|
||||
context.set_job_status(job, Status.ABORTED)
|
||||
raise e
|
||||
else:
|
||||
job.set_status(Status.FAILED)
|
||||
context.set_job_status(job, Status.FAILED)
|
||||
if isinstance(e, TargetNotRespondingError):
|
||||
raise e
|
||||
elif isinstance(e, TargetError):
|
||||
@ -583,7 +590,7 @@ class Runner(object):
|
||||
self.context.skip_job(job)
|
||||
return
|
||||
|
||||
job.set_status(Status.RUNNING)
|
||||
context.set_job_status(job, Status.RUNNING)
|
||||
self.send(signal.JOB_STARTED)
|
||||
|
||||
job.configure_augmentations(context, self.pm)
|
||||
@ -594,7 +601,7 @@ class Runner(object):
|
||||
try:
|
||||
job.setup(context)
|
||||
except Exception as e:
|
||||
job.set_status(Status.FAILED)
|
||||
context.set_job_status(job, Status.FAILED)
|
||||
log.log_error(e, self.logger)
|
||||
if isinstance(e, (TargetError, TimeoutError)):
|
||||
context.tm.verify_target_responsive(context)
|
||||
@ -607,10 +614,10 @@ class Runner(object):
|
||||
job.run(context)
|
||||
except KeyboardInterrupt:
|
||||
context.run_interrupted = True
|
||||
job.set_status(Status.ABORTED)
|
||||
context.set_job_status(job, Status.ABORTED)
|
||||
raise
|
||||
except Exception as e:
|
||||
job.set_status(Status.FAILED)
|
||||
context.set_job_status(job, Status.FAILED)
|
||||
log.log_error(e, self.logger)
|
||||
if isinstance(e, (TargetError, TimeoutError)):
|
||||
context.tm.verify_target_responsive(context)
|
||||
@ -623,7 +630,7 @@ class Runner(object):
|
||||
self.pm.process_job_output(context)
|
||||
self.pm.export_job_output(context)
|
||||
except Exception as e:
|
||||
job.set_status(Status.PARTIAL)
|
||||
context.set_job_status(job, Status.PARTIAL)
|
||||
if isinstance(e, (TargetError, TimeoutError)):
|
||||
context.tm.verify_target_responsive(context)
|
||||
self.context.record_ui_state('output-error')
|
||||
@ -631,7 +638,7 @@ class Runner(object):
|
||||
|
||||
except KeyboardInterrupt:
|
||||
context.run_interrupted = True
|
||||
job.set_status(Status.ABORTED)
|
||||
context.set_status(Status.ABORTED)
|
||||
raise
|
||||
finally:
|
||||
# If setup was successfully completed, teardown must
|
||||
@ -653,6 +660,9 @@ class Runner(object):
|
||||
self.logger.error(msg.format(job.id, job.iteration, job.status))
|
||||
self.context.failed_jobs += 1
|
||||
self.send(signal.JOB_FAILED)
|
||||
if rc.bail_on_job_failure:
|
||||
raise ExecutionError('Job {} failed, bailing.'.format(job.id))
|
||||
|
||||
else: # status not in retry_on_status
|
||||
self.logger.info('Job completed with status {}'.format(job.status))
|
||||
if job.status != 'ABORTED':
|
||||
@ -664,8 +674,9 @@ class Runner(object):
|
||||
def retry_job(self, job):
|
||||
retry_job = Job(job.spec, job.iteration, self.context)
|
||||
retry_job.workload = job.workload
|
||||
retry_job.state = job.state
|
||||
retry_job.retries = job.retries + 1
|
||||
retry_job.set_status(Status.PENDING)
|
||||
self.context.set_job_status(retry_job, Status.PENDING, force=True)
|
||||
self.context.job_queue.insert(0, retry_job)
|
||||
self.send(signal.JOB_RESTARTED)
|
||||
|
||||
|
@ -31,7 +31,7 @@ import requests
|
||||
from wa import Parameter, settings, __file__ as _base_filepath
|
||||
from wa.framework.resource import ResourceGetter, SourcePriority, NO_ONE
|
||||
from wa.framework.exception import ResourceError
|
||||
from wa.utils.misc import (ensure_directory_exists as _d,
|
||||
from wa.utils.misc import (ensure_directory_exists as _d, atomic_write_path,
|
||||
ensure_file_directory_exists as _f, sha256, urljoin)
|
||||
from wa.utils.types import boolean, caseless_string
|
||||
|
||||
@ -78,15 +78,20 @@ def get_path_matches(resource, files):
|
||||
return matches
|
||||
|
||||
|
||||
# pylint: disable=too-many-return-statements
|
||||
def get_from_location(basepath, resource):
|
||||
if resource.kind == 'file':
|
||||
path = os.path.join(basepath, resource.path)
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
elif resource.kind == 'executable':
|
||||
path = os.path.join(basepath, 'bin', resource.abi, resource.filename)
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
bin_dir = os.path.join(basepath, 'bin', resource.abi)
|
||||
if not os.path.exists(bin_dir):
|
||||
return None
|
||||
for entry in os.listdir(bin_dir):
|
||||
path = os.path.join(bin_dir, entry)
|
||||
if resource.match(path):
|
||||
return path
|
||||
elif resource.kind == 'revent':
|
||||
path = os.path.join(basepath, 'revent_files')
|
||||
if os.path.exists(path):
|
||||
@ -234,21 +239,19 @@ class Http(ResourceGetter):
|
||||
index_url = urljoin(self.url, 'index.json')
|
||||
response = self.geturl(index_url)
|
||||
if response.status_code != http.client.OK:
|
||||
message = 'Could not fetch "{}"; recieved "{} {}"'
|
||||
message = 'Could not fetch "{}"; received "{} {}"'
|
||||
self.logger.error(message.format(index_url,
|
||||
response.status_code,
|
||||
response.reason))
|
||||
return {}
|
||||
if sys.version_info[0] == 3:
|
||||
content = response.content.decode('utf-8')
|
||||
else:
|
||||
content = response.content
|
||||
content = response.content.decode('utf-8')
|
||||
return json.loads(content)
|
||||
|
||||
def download_asset(self, asset, owner_name):
|
||||
url = urljoin(self.url, owner_name, asset['path'])
|
||||
local_path = _f(os.path.join(settings.dependencies_directory, '__remote',
|
||||
owner_name, asset['path'].replace('/', os.sep)))
|
||||
|
||||
if os.path.exists(local_path) and not self.always_fetch:
|
||||
local_sha = sha256(local_path)
|
||||
if local_sha == asset['sha256']:
|
||||
@ -257,14 +260,15 @@ class Http(ResourceGetter):
|
||||
self.logger.debug('Downloading {}'.format(url))
|
||||
response = self.geturl(url, stream=True)
|
||||
if response.status_code != http.client.OK:
|
||||
message = 'Could not download asset "{}"; recieved "{} {}"'
|
||||
message = 'Could not download asset "{}"; received "{} {}"'
|
||||
self.logger.warning(message.format(url,
|
||||
response.status_code,
|
||||
response.reason))
|
||||
return
|
||||
with open(local_path, 'wb') as wfh:
|
||||
for chunk in response.iter_content(chunk_size=self.chunk_size):
|
||||
wfh.write(chunk)
|
||||
with atomic_write_path(local_path) as at_path:
|
||||
with open(at_path, 'wb') as wfh:
|
||||
for chunk in response.iter_content(chunk_size=self.chunk_size):
|
||||
wfh.write(chunk)
|
||||
return local_path
|
||||
|
||||
def geturl(self, url, stream=False):
|
||||
@ -322,7 +326,8 @@ class Filer(ResourceGetter):
|
||||
|
||||
"""
|
||||
parameters = [
|
||||
Parameter('remote_path', global_alias='remote_assets_path', default='',
|
||||
Parameter('remote_path', global_alias='remote_assets_path',
|
||||
default=settings.assets_repository,
|
||||
description="""
|
||||
Path, on the local system, where the assets are located.
|
||||
"""),
|
||||
|
@ -50,6 +50,7 @@ def init_user_directory(overwrite_existing=False): # pylint: disable=R0914
|
||||
# If running with sudo on POSIX, change the ownership to the real user.
|
||||
real_user = os.getenv('SUDO_USER')
|
||||
if real_user:
|
||||
# pylint: disable=import-outside-toplevel
|
||||
import pwd # done here as module won't import on win32
|
||||
user_entry = pwd.getpwnam(real_user)
|
||||
uid, gid = user_entry.pw_uid, user_entry.pw_gid
|
||||
|
@ -98,13 +98,12 @@ and the code to clear these file goes in teardown method. ::
|
||||
|
||||
"""
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import inspect
|
||||
from collections import OrderedDict
|
||||
|
||||
from wa.framework import signal
|
||||
from wa.framework.plugin import Plugin
|
||||
from wa.framework.plugin import TargetedPlugin
|
||||
from wa.framework.exception import (TargetNotRespondingError, TimeoutError, # pylint: disable=redefined-builtin
|
||||
WorkloadError, TargetError)
|
||||
from wa.utils.log import log_error
|
||||
@ -325,10 +324,7 @@ def install(instrument, context):
|
||||
if not callable(attr):
|
||||
msg = 'Attribute {} not callable in {}.'
|
||||
raise ValueError(msg.format(attr_name, instrument))
|
||||
if sys.version_info[0] == 3:
|
||||
argspec = inspect.getfullargspec(attr)
|
||||
else:
|
||||
argspec = inspect.getargspec(attr) # pylint: disable=deprecated-method
|
||||
argspec = inspect.getfullargspec(attr)
|
||||
arg_num = len(argspec.args)
|
||||
# Instrument callbacks will be passed exactly two arguments: self
|
||||
# (the instrument instance to which the callback is bound) and
|
||||
@ -421,14 +417,13 @@ def get_disabled():
|
||||
return [i for i in installed if not i.is_enabled]
|
||||
|
||||
|
||||
class Instrument(Plugin):
|
||||
class Instrument(TargetedPlugin):
|
||||
"""
|
||||
Base class for instrument implementations.
|
||||
"""
|
||||
kind = "instrument"
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
super(Instrument, self).__init__(**kwargs)
|
||||
self.target = target
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Instrument, self).__init__(*args, **kwargs)
|
||||
self.is_enabled = True
|
||||
self.is_broken = False
|
||||
|
@ -23,6 +23,7 @@ from datetime import datetime
|
||||
from wa.framework import pluginloader, signal, instrument
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.utils.log import indentcontext
|
||||
from wa.framework.run import JobState
|
||||
|
||||
|
||||
class Job(object):
|
||||
@ -37,24 +38,29 @@ class Job(object):
|
||||
def label(self):
|
||||
return self.spec.label
|
||||
|
||||
@property
|
||||
def classifiers(self):
|
||||
return self.spec.classifiers
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
return self._status
|
||||
return self.state.status
|
||||
|
||||
@property
|
||||
def has_been_initialized(self):
|
||||
return self._has_been_initialized
|
||||
|
||||
@property
|
||||
def retries(self):
|
||||
return self.state.retries
|
||||
|
||||
@status.setter
|
||||
def status(self, value):
|
||||
self._status = value
|
||||
self.state.status = value
|
||||
self.state.timestamp = datetime.utcnow()
|
||||
if self.output:
|
||||
self.output.status = value
|
||||
|
||||
@retries.setter
|
||||
def retries(self, value):
|
||||
self.state.retries = value
|
||||
|
||||
def __init__(self, spec, iteration, context):
|
||||
self.logger = logging.getLogger('job')
|
||||
self.spec = spec
|
||||
@ -63,13 +69,13 @@ class Job(object):
|
||||
self.workload = None
|
||||
self.output = None
|
||||
self.run_time = None
|
||||
self.retries = 0
|
||||
self.classifiers = copy(self.spec.classifiers)
|
||||
self._has_been_initialized = False
|
||||
self._status = Status.NEW
|
||||
self.state = JobState(self.id, self.label, self.iteration, Status.NEW)
|
||||
|
||||
def load(self, target, loader=pluginloader):
|
||||
self.logger.info('Loading job {}'.format(self))
|
||||
if self.iteration == 1:
|
||||
if self.id not in self._workload_cache:
|
||||
self.workload = loader.get_workload(self.spec.workload_name,
|
||||
target,
|
||||
**self.spec.workload_parameters)
|
||||
@ -91,7 +97,6 @@ class Job(object):
|
||||
self.workload.initialize(context)
|
||||
self.set_status(Status.PENDING)
|
||||
self._has_been_initialized = True
|
||||
context.update_job_state(self)
|
||||
|
||||
def configure_augmentations(self, context, pm):
|
||||
self.logger.info('Configuring augmentations')
|
||||
@ -181,6 +186,11 @@ class Job(object):
|
||||
if force or self.status < status:
|
||||
self.status = status
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
if name in self.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier.'.format(name))
|
||||
self.classifiers[name] = value
|
||||
|
||||
def __str__(self):
|
||||
return '{} ({}) [{}]'.format(self.id, self.label, self.iteration)
|
||||
|
||||
|
@ -23,6 +23,8 @@ except ImportError:
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import tarfile
|
||||
import tempfile
|
||||
from collections import OrderedDict, defaultdict
|
||||
from copy import copy, deepcopy
|
||||
from datetime import datetime
|
||||
@ -37,7 +39,8 @@ from wa.framework.run import RunState, RunInfo
|
||||
from wa.framework.target.info import TargetInfo
|
||||
from wa.framework.version import get_wa_version_with_commit
|
||||
from wa.utils.doc import format_simple_table
|
||||
from wa.utils.misc import touch, ensure_directory_exists, isiterable, format_ordered_dict
|
||||
from wa.utils.misc import (touch, ensure_directory_exists, isiterable,
|
||||
format_ordered_dict, safe_extract)
|
||||
from wa.utils.postgres import get_schema_versions
|
||||
from wa.utils.serializer import write_pod, read_pod, Podable, json
|
||||
from wa.utils.types import enum, numeric
|
||||
@ -145,9 +148,10 @@ class Output(object):
|
||||
if not os.path.exists(path):
|
||||
msg = 'Attempting to add non-existing artifact: {}'
|
||||
raise HostError(msg.format(path))
|
||||
is_dir = os.path.isdir(path)
|
||||
path = os.path.relpath(path, self.basepath)
|
||||
|
||||
self.result.add_artifact(name, path, kind, description, classifiers)
|
||||
self.result.add_artifact(name, path, kind, description, classifiers, is_dir)
|
||||
|
||||
def add_event(self, message):
|
||||
self.result.add_event(message)
|
||||
@ -162,6 +166,9 @@ class Output(object):
|
||||
artifact = self.get_artifact(name)
|
||||
return self.get_path(artifact.path)
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
self.result.add_classifier(name, value, overwrite)
|
||||
|
||||
def add_metadata(self, key, *args, **kwargs):
|
||||
self.result.add_metadata(key, *args, **kwargs)
|
||||
|
||||
@ -262,8 +269,8 @@ class RunOutput(Output, RunOutputCommon):
|
||||
self._combined_config = None
|
||||
self.jobs = []
|
||||
self.job_specs = []
|
||||
if (not os.path.isfile(self.statefile) or
|
||||
not os.path.isfile(self.infofile)):
|
||||
if (not os.path.isfile(self.statefile)
|
||||
or not os.path.isfile(self.infofile)):
|
||||
msg = '"{}" does not exist or is not a valid WA output directory.'
|
||||
raise ValueError(msg.format(self.basepath))
|
||||
self.reload()
|
||||
@ -346,6 +353,13 @@ class JobOutput(Output):
|
||||
self.spec = None
|
||||
self.reload()
|
||||
|
||||
@property
|
||||
def augmentations(self):
|
||||
job_augs = set([])
|
||||
for aug in self.spec.augmentations:
|
||||
job_augs.add(aug)
|
||||
return list(job_augs)
|
||||
|
||||
|
||||
class Result(Podable):
|
||||
|
||||
@ -378,9 +392,10 @@ class Result(Podable):
|
||||
logger.debug('Adding metric: {}'.format(metric))
|
||||
self.metrics.append(metric)
|
||||
|
||||
def add_artifact(self, name, path, kind, description=None, classifiers=None):
|
||||
def add_artifact(self, name, path, kind, description=None, classifiers=None,
|
||||
is_dir=False):
|
||||
artifact = Artifact(name, path, kind, description=description,
|
||||
classifiers=classifiers)
|
||||
classifiers=classifiers, is_dir=is_dir)
|
||||
logger.debug('Adding artifact: {}'.format(artifact))
|
||||
self.artifacts.append(artifact)
|
||||
|
||||
@ -399,6 +414,21 @@ class Result(Podable):
|
||||
return artifact
|
||||
raise HostError('Artifact "{}" not found'.format(name))
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
if name in self.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier.'.format(name))
|
||||
self.classifiers[name] = value
|
||||
|
||||
for metric in self.metrics:
|
||||
if name in metric.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier; clashes with {}.'.format(name, metric))
|
||||
metric.classifiers[name] = value
|
||||
|
||||
for artifact in self.artifacts:
|
||||
if name in artifact.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier; clashes with {}.'.format(name, artifact))
|
||||
artifact.classifiers[name] = value
|
||||
|
||||
def add_metadata(self, key, *args, **kwargs):
|
||||
force = kwargs.pop('force', False)
|
||||
if kwargs:
|
||||
@ -516,7 +546,7 @@ class Artifact(Podable):
|
||||
|
||||
"""
|
||||
|
||||
_pod_serialization_version = 1
|
||||
_pod_serialization_version = 2
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
@ -525,9 +555,11 @@ class Artifact(Podable):
|
||||
pod['kind'] = ArtifactType(pod['kind'])
|
||||
instance = Artifact(**pod)
|
||||
instance._pod_version = pod_version # pylint: disable =protected-access
|
||||
instance.is_dir = pod.pop('is_dir')
|
||||
return instance
|
||||
|
||||
def __init__(self, name, path, kind, description=None, classifiers=None):
|
||||
def __init__(self, name, path, kind, description=None, classifiers=None,
|
||||
is_dir=False):
|
||||
""""
|
||||
:param name: Name that uniquely identifies this artifact.
|
||||
:param path: The *relative* path of the artifact. Depending on the
|
||||
@ -543,7 +575,6 @@ class Artifact(Podable):
|
||||
:param classifiers: A set of key-value pairs to further classify this
|
||||
metric beyond current iteration (e.g. this can be
|
||||
used to identify sub-tests).
|
||||
|
||||
"""
|
||||
super(Artifact, self).__init__()
|
||||
self.name = name
|
||||
@ -555,11 +586,13 @@ class Artifact(Podable):
|
||||
raise ValueError(msg.format(kind, ARTIFACT_TYPES))
|
||||
self.description = description
|
||||
self.classifiers = classifiers or {}
|
||||
self.is_dir = is_dir
|
||||
|
||||
def to_pod(self):
|
||||
pod = super(Artifact, self).to_pod()
|
||||
pod.update(self.__dict__)
|
||||
pod['kind'] = str(self.kind)
|
||||
pod['is_dir'] = self.is_dir
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
@ -567,11 +600,17 @@ class Artifact(Podable):
|
||||
pod['_pod_version'] = pod.get('_pod_version', 1)
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
def _pod_upgrade_v2(pod):
|
||||
pod['is_dir'] = pod.get('is_dir', False)
|
||||
return pod
|
||||
|
||||
def __str__(self):
|
||||
return self.path
|
||||
|
||||
def __repr__(self):
|
||||
return '{} ({}): {}'.format(self.name, self.kind, self.path)
|
||||
ft = 'dir' if self.is_dir else 'file'
|
||||
return '{} ({}) ({}): {}'.format(self.name, ft, self.kind, self.path)
|
||||
|
||||
|
||||
class Metric(Podable):
|
||||
@ -738,9 +777,13 @@ def init_job_output(run_output, job):
|
||||
|
||||
|
||||
def discover_wa_outputs(path):
|
||||
for root, dirs, _ in os.walk(path):
|
||||
# Use topdown=True to allow pruning dirs
|
||||
for root, dirs, _ in os.walk(path, topdown=True):
|
||||
if '__meta' in dirs:
|
||||
yield RunOutput(root)
|
||||
# Avoid recursing into the artifact as it can be very lengthy if a
|
||||
# large number of file is present (sysfs dump)
|
||||
dirs.clear()
|
||||
|
||||
|
||||
def _save_raw_config(meta_dir, state):
|
||||
@ -804,6 +847,19 @@ class DatabaseOutput(Output):
|
||||
|
||||
def get_artifact_path(self, name):
|
||||
artifact = self.get_artifact(name)
|
||||
if artifact.is_dir:
|
||||
return self._read_dir_artifact(artifact)
|
||||
else:
|
||||
return self._read_file_artifact(artifact)
|
||||
|
||||
def _read_dir_artifact(self, artifact):
|
||||
artifact_path = tempfile.mkdtemp(prefix='wa_')
|
||||
with tarfile.open(fileobj=self.conn.lobject(int(artifact.path), mode='b'), mode='r|gz') as tar_file:
|
||||
safe_extract(tar_file, artifact_path)
|
||||
self.conn.commit()
|
||||
return artifact_path
|
||||
|
||||
def _read_file_artifact(self, artifact):
|
||||
artifact = StringIO(self.conn.lobject(int(artifact.path)).read())
|
||||
self.conn.commit()
|
||||
return artifact
|
||||
@ -892,13 +948,15 @@ class DatabaseOutput(Output):
|
||||
|
||||
def _get_artifacts(self):
|
||||
columns = ['artifacts.name', 'artifacts.description', 'artifacts.kind',
|
||||
('largeobjects.lo_oid', 'path'), 'artifacts.oid',
|
||||
('largeobjects.lo_oid', 'path'), 'artifacts.oid', 'artifacts.is_dir',
|
||||
'artifacts._pod_version', 'artifacts._pod_serialization_version']
|
||||
tables = ['largeobjects', 'artifacts']
|
||||
joins = [('classifiers', 'classifiers.artifact_oid = artifacts.oid')]
|
||||
conditions = ['artifacts.{}_oid = \'{}\''.format(self.kind, self.oid),
|
||||
'artifacts.large_object_uuid = largeobjects.oid',
|
||||
'artifacts.job_oid IS NULL']
|
||||
'artifacts.large_object_uuid = largeobjects.oid']
|
||||
# If retrieving run level artifacts we want those that don't also belong to a job
|
||||
if self.kind == 'run':
|
||||
conditions.append('artifacts.job_oid IS NULL')
|
||||
pod = self._read_db(columns, tables, conditions, joins)
|
||||
for artifact in pod:
|
||||
artifact['path'] = str(artifact['path'])
|
||||
@ -913,8 +971,9 @@ class DatabaseOutput(Output):
|
||||
|
||||
def kernel_config_from_db(raw):
|
||||
kernel_config = {}
|
||||
for k, v in zip(raw[0], raw[1]):
|
||||
kernel_config[k] = v
|
||||
if raw:
|
||||
for k, v in zip(raw[0], raw[1]):
|
||||
kernel_config[k] = v
|
||||
return kernel_config
|
||||
|
||||
|
||||
@ -948,9 +1007,10 @@ class RunDatabaseOutput(DatabaseOutput, RunOutputCommon):
|
||||
|
||||
@property
|
||||
def _db_targetfile(self):
|
||||
columns = ['os', 'is_rooted', 'target', 'abi', 'cpus', 'os_version',
|
||||
columns = ['os', 'is_rooted', 'target', 'modules', 'abi', 'cpus', 'os_version',
|
||||
'hostid', 'hostname', 'kernel_version', 'kernel_release',
|
||||
'kernel_sha1', 'kernel_config', 'sched_features',
|
||||
'kernel_sha1', 'kernel_config', 'sched_features', 'page_size_kb',
|
||||
'system_id', 'screen_resolution', 'prop', 'android_id',
|
||||
'_pod_version', '_pod_serialization_version']
|
||||
tables = ['targets']
|
||||
conditions = ['targets.run_oid = \'{}\''.format(self.oid)]
|
||||
@ -1003,6 +1063,7 @@ class RunDatabaseOutput(DatabaseOutput, RunOutputCommon):
|
||||
jobs = self._read_db(columns, tables, conditions)
|
||||
|
||||
for job in jobs:
|
||||
job['augmentations'] = self._get_job_augmentations(job['oid'])
|
||||
job['workload_parameters'] = workload_params.pop(job['oid'], {})
|
||||
job['runtime_parameters'] = runtime_params.pop(job['oid'], {})
|
||||
job.pop('oid')
|
||||
@ -1166,6 +1227,15 @@ class RunDatabaseOutput(DatabaseOutput, RunOutputCommon):
|
||||
logger.debug('Failed to deserialize job_oid:{}-"{}":"{}"'.format(job_oid, k, v))
|
||||
return parm_dict
|
||||
|
||||
def _get_job_augmentations(self, job_oid):
|
||||
columns = ['jobs_augs.augmentation_oid', 'augmentations.name',
|
||||
'augmentations.oid', 'jobs_augs.job_oid']
|
||||
tables = ['jobs_augs', 'augmentations']
|
||||
conditions = ['jobs_augs.job_oid = \'{}\''.format(job_oid),
|
||||
'jobs_augs.augmentation_oid = augmentations.oid']
|
||||
augmentations = self._read_db(columns, tables, conditions)
|
||||
return [aug['name'] for aug in augmentations]
|
||||
|
||||
def _list_runs(self):
|
||||
columns = ['runs.run_uuid', 'runs.run_name', 'runs.project',
|
||||
'runs.project_stage', 'runs.status', 'runs.start_time', 'runs.end_time']
|
||||
@ -1217,3 +1287,11 @@ class JobDatabaseOutput(DatabaseOutput):
|
||||
|
||||
def __str__(self):
|
||||
return '{}-{}-{}'.format(self.id, self.label, self.iteration)
|
||||
|
||||
@property
|
||||
def augmentations(self):
|
||||
job_augs = set([])
|
||||
if self.spec:
|
||||
for aug in self.spec.augmentations:
|
||||
job_augs.add(aug)
|
||||
return list(job_augs)
|
||||
|
@ -18,8 +18,6 @@
|
||||
import os
|
||||
import sys
|
||||
import inspect
|
||||
import imp
|
||||
import string
|
||||
import logging
|
||||
from collections import OrderedDict, defaultdict
|
||||
from itertools import chain
|
||||
@ -32,16 +30,10 @@ from wa.framework.exception import (NotFoundError, PluginLoaderError, TargetErro
|
||||
ValidationError, ConfigError, HostError)
|
||||
from wa.utils import log
|
||||
from wa.utils.misc import (ensure_directory_exists as _d, walk_modules, load_class,
|
||||
merge_dicts_simple, get_article)
|
||||
merge_dicts_simple, get_article, import_path)
|
||||
from wa.utils.types import identifier
|
||||
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
MODNAME_TRANS = str.maketrans(':/\\.', '____')
|
||||
else:
|
||||
MODNAME_TRANS = string.maketrans(':/\\.', '____')
|
||||
|
||||
|
||||
class AttributeCollection(object):
|
||||
"""
|
||||
Accumulator for plugin attribute objects (such as Parameters or Artifacts).
|
||||
@ -157,6 +149,7 @@ class Alias(object):
|
||||
raise ConfigError(msg.format(param, self.name, ext.name))
|
||||
|
||||
|
||||
# pylint: disable=bad-mcs-classmethod-argument
|
||||
class PluginMeta(type):
|
||||
"""
|
||||
This basically adds some magic to plugins to make implementing new plugins,
|
||||
@ -246,7 +239,7 @@ class Plugin(with_metaclass(PluginMeta, object)):
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls):
|
||||
return {p.name: p.default for p in cls.parameters}
|
||||
return {p.name: p.default for p in cls.parameters if not p.deprecated}
|
||||
|
||||
@property
|
||||
def dependencies_directory(self):
|
||||
@ -367,7 +360,7 @@ class Plugin(with_metaclass(PluginMeta, object)):
|
||||
self._modules.append(module)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
return str(self.name)
|
||||
|
||||
def __repr__(self):
|
||||
params = []
|
||||
@ -383,12 +376,22 @@ class TargetedPlugin(Plugin):
|
||||
|
||||
"""
|
||||
|
||||
suppoted_targets = []
|
||||
supported_targets = []
|
||||
parameters = [
|
||||
Parameter('cleanup_assets', kind=bool,
|
||||
global_alias='cleanup_assets',
|
||||
aliases=['clean_up'],
|
||||
default=True,
|
||||
description="""
|
||||
If ``True``, assets that are deployed or created by the
|
||||
plugin will be removed again from the device.
|
||||
"""),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def check_compatible(cls, target):
|
||||
if cls.suppoted_targets:
|
||||
if target.os not in cls.suppoted_targets:
|
||||
if cls.supported_targets:
|
||||
if target.os not in cls.supported_targets:
|
||||
msg = 'Incompatible target OS "{}" for {}'
|
||||
raise TargetError(msg.format(target.os, cls.name))
|
||||
|
||||
@ -611,24 +614,30 @@ class PluginLoader(object):
|
||||
self.logger.debug('Checking path %s', path)
|
||||
if os.path.isfile(path):
|
||||
self._discover_from_file(path)
|
||||
for root, _, files in os.walk(path, followlinks=True):
|
||||
should_skip = False
|
||||
for igpath in ignore_paths:
|
||||
if root.startswith(igpath):
|
||||
should_skip = True
|
||||
break
|
||||
if should_skip:
|
||||
continue
|
||||
for fname in files:
|
||||
if os.path.splitext(fname)[1].lower() != '.py':
|
||||
elif os.path.exists(path):
|
||||
for root, _, files in os.walk(path, followlinks=True):
|
||||
should_skip = False
|
||||
for igpath in ignore_paths:
|
||||
if root.startswith(igpath):
|
||||
should_skip = True
|
||||
break
|
||||
if should_skip:
|
||||
continue
|
||||
filepath = os.path.join(root, fname)
|
||||
self._discover_from_file(filepath)
|
||||
for fname in files:
|
||||
if os.path.splitext(fname)[1].lower() != '.py':
|
||||
continue
|
||||
filepath = os.path.join(root, fname)
|
||||
self._discover_from_file(filepath)
|
||||
elif not os.path.isabs(path):
|
||||
try:
|
||||
for module in walk_modules(path):
|
||||
self._discover_in_module(module)
|
||||
except Exception: # NOQA pylint: disable=broad-except
|
||||
pass
|
||||
|
||||
def _discover_from_file(self, filepath):
|
||||
try:
|
||||
modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS)
|
||||
module = imp.load_source(modname, filepath)
|
||||
module = import_path(filepath)
|
||||
self._discover_in_module(module)
|
||||
except (SystemExit, ImportError) as e:
|
||||
if self.keep_going:
|
||||
|
@ -35,6 +35,7 @@ class __LoaderWrapper(object):
|
||||
def reset(self):
|
||||
# These imports cannot be done at top level, because of
|
||||
# sys.modules manipulation below
|
||||
# pylint: disable=import-outside-toplevel
|
||||
from wa.framework.plugin import PluginLoader
|
||||
from wa.framework.configuration.core import settings
|
||||
self._loader = PluginLoader(settings.plugin_packages,
|
||||
|
@ -16,15 +16,14 @@ import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from devlib.utils.android import ApkInfo
|
||||
|
||||
from wa.framework import pluginloader
|
||||
from wa.framework.plugin import Plugin
|
||||
from wa.framework.exception import ResourceError
|
||||
from wa.framework.configuration import settings
|
||||
from wa.utils import log
|
||||
from wa.utils.android import get_cacheable_apk_info
|
||||
from wa.utils.misc import get_object_name
|
||||
from wa.utils.types import enum, list_or_string, prioritylist
|
||||
from wa.utils.types import enum, list_or_string, prioritylist, version_tuple
|
||||
|
||||
|
||||
SourcePriority = enum(['package', 'remote', 'lan', 'local',
|
||||
@ -142,10 +141,12 @@ class ApkFile(Resource):
|
||||
|
||||
def __init__(self, owner, variant=None, version=None,
|
||||
package=None, uiauto=False, exact_abi=False,
|
||||
supported_abi=None):
|
||||
supported_abi=None, min_version=None, max_version=None):
|
||||
super(ApkFile, self).__init__(owner)
|
||||
self.variant = variant
|
||||
self.version = version
|
||||
self.max_version = max_version
|
||||
self.min_version = min_version
|
||||
self.package = package
|
||||
self.uiauto = uiauto
|
||||
self.exact_abi = exact_abi
|
||||
@ -158,21 +159,25 @@ class ApkFile(Resource):
|
||||
def match(self, path):
|
||||
name_matches = True
|
||||
version_matches = True
|
||||
version_range_matches = True
|
||||
package_matches = True
|
||||
abi_matches = True
|
||||
uiauto_matches = uiauto_test_matches(path, self.uiauto)
|
||||
if self.version is not None:
|
||||
if self.version:
|
||||
version_matches = apk_version_matches(path, self.version)
|
||||
if self.variant is not None:
|
||||
if self.max_version or self.min_version:
|
||||
version_range_matches = apk_version_matches_range(path, self.min_version,
|
||||
self.max_version)
|
||||
if self.variant:
|
||||
name_matches = file_name_matches(path, self.variant)
|
||||
if self.package is not None:
|
||||
if self.package:
|
||||
package_matches = package_name_matches(path, self.package)
|
||||
if self.supported_abi is not None:
|
||||
if self.supported_abi:
|
||||
abi_matches = apk_abi_matches(path, self.supported_abi,
|
||||
self.exact_abi)
|
||||
return name_matches and version_matches and \
|
||||
uiauto_matches and package_matches and \
|
||||
abi_matches
|
||||
version_range_matches and uiauto_matches \
|
||||
and package_matches and abi_matches
|
||||
|
||||
def __str__(self):
|
||||
text = '<{}\'s apk'.format(self.owner)
|
||||
@ -274,18 +279,39 @@ class ResourceResolver(object):
|
||||
|
||||
def apk_version_matches(path, version):
|
||||
version = list_or_string(version)
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
for v in version:
|
||||
if info.version_name == v or info.version_code == v:
|
||||
if v in (info.version_name, info.version_code):
|
||||
return True
|
||||
if loose_version_matching(v, info.version_name):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def apk_version_matches_range(path, min_version=None, max_version=None):
|
||||
info = get_cacheable_apk_info(path)
|
||||
return range_version_matching(info.version_name, min_version, max_version)
|
||||
|
||||
|
||||
def range_version_matching(apk_version, min_version=None, max_version=None):
|
||||
if not apk_version:
|
||||
return False
|
||||
apk_version = version_tuple(apk_version)
|
||||
|
||||
if max_version:
|
||||
max_version = version_tuple(max_version)
|
||||
if apk_version > max_version:
|
||||
return False
|
||||
if min_version:
|
||||
min_version = version_tuple(min_version)
|
||||
if apk_version < min_version:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def loose_version_matching(config_version, apk_version):
|
||||
config_version = config_version.split('.')
|
||||
apk_version = apk_version.split('.')
|
||||
config_version = version_tuple(config_version)
|
||||
apk_version = version_tuple(apk_version)
|
||||
|
||||
if len(apk_version) < len(config_version):
|
||||
return False # More specific version requested than available
|
||||
@ -306,18 +332,18 @@ def file_name_matches(path, pattern):
|
||||
|
||||
|
||||
def uiauto_test_matches(path, uiauto):
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
return uiauto == ('com.arm.wa.uiauto' in info.package)
|
||||
|
||||
|
||||
def package_name_matches(path, package):
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
return info.package == package
|
||||
|
||||
|
||||
def apk_abi_matches(path, supported_abi, exact_abi=False):
|
||||
supported_abi = list_or_string(supported_abi)
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
# If no native code present, suitable for all devices.
|
||||
if not info.native_code:
|
||||
return True
|
||||
|
@ -102,13 +102,7 @@ class RunState(Podable):
|
||||
self.timestamp = datetime.utcnow()
|
||||
|
||||
def add_job(self, job):
|
||||
job_state = JobState(job.id, job.label, job.iteration, job.status)
|
||||
self.jobs[(job_state.id, job_state.iteration)] = job_state
|
||||
|
||||
def update_job(self, job):
|
||||
state = self.jobs[(job.id, job.iteration)]
|
||||
state.status = job.status
|
||||
state.timestamp = datetime.utcnow()
|
||||
self.jobs[(job.state.id, job.state.iteration)] = job.state
|
||||
|
||||
def get_status_counts(self):
|
||||
counter = Counter()
|
||||
@ -163,7 +157,7 @@ class JobState(Podable):
|
||||
pod['label'] = self.label
|
||||
pod['iteration'] = self.iteration
|
||||
pod['status'] = self.status.to_pod()
|
||||
pod['retries'] = 0
|
||||
pod['retries'] = self.retries
|
||||
pod['timestamp'] = self.timestamp
|
||||
return pod
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
|
||||
"""
|
||||
This module wraps louie signalling mechanism. It relies on modified version of loiue
|
||||
This module wraps louie signalling mechanism. It relies on modified version of louie
|
||||
that has prioritization added to handler invocation.
|
||||
|
||||
"""
|
||||
@ -23,8 +23,9 @@ import sys
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
|
||||
from louie import dispatcher, saferef # pylint: disable=wrong-import-order
|
||||
from louie.dispatcher import _remove_receiver
|
||||
import wrapt
|
||||
from louie import dispatcher # pylint: disable=wrong-import-order
|
||||
|
||||
from wa.utils.types import prioritylist, enum
|
||||
|
||||
@ -242,8 +243,8 @@ def connect(handler, signal, sender=dispatcher.Any, priority=0):
|
||||
receivers = signals[signal]
|
||||
else:
|
||||
receivers = signals[signal] = _prioritylist_wrapper()
|
||||
receivers.add(handler, priority)
|
||||
dispatcher.connect(handler, signal, sender)
|
||||
receivers.add(saferef.safe_ref(handler, on_delete=_remove_receiver), priority)
|
||||
|
||||
|
||||
def disconnect(handler, signal, sender=dispatcher.Any):
|
||||
@ -268,7 +269,7 @@ def send(signal, sender=dispatcher.Anonymous, *args, **kwargs):
|
||||
"""
|
||||
Sends a signal, causing connected handlers to be invoked.
|
||||
|
||||
Paramters:
|
||||
Parameters:
|
||||
|
||||
:signal: Signal to be sent. This must be an instance of :class:`wa.core.signal.Signal`
|
||||
or its subclasses.
|
||||
|
@ -21,9 +21,11 @@ import tempfile
|
||||
import threading
|
||||
import time
|
||||
|
||||
from wa.framework.plugin import Parameter
|
||||
from wa.framework.exception import WorkerThreadError
|
||||
from wa.framework.plugin import Parameter
|
||||
from wa.utils.android import LogcatParser
|
||||
from wa.utils.misc import touch
|
||||
import wa.framework.signal as signal
|
||||
|
||||
|
||||
class LinuxAssistant(object):
|
||||
@ -33,6 +35,9 @@ class LinuxAssistant(object):
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
|
||||
def initialize(self):
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
@ -42,6 +47,9 @@ class LinuxAssistant(object):
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
def finalize(self):
|
||||
pass
|
||||
|
||||
|
||||
class AndroidAssistant(object):
|
||||
|
||||
@ -66,40 +74,111 @@ class AndroidAssistant(object):
|
||||
temporary locaiton on the host. Setting the value of the poll
|
||||
period enables this behavior.
|
||||
"""),
|
||||
Parameter('stay_on_mode', kind=int,
|
||||
constraint=lambda x: 0 <= x <= 7,
|
||||
description="""
|
||||
Specify whether the screen should stay on while the device is
|
||||
charging:
|
||||
|
||||
0: never stay on
|
||||
1: with AC charger
|
||||
2: with USB charger
|
||||
4: with wireless charger
|
||||
|
||||
Values can be OR-ed together to produce combinations, for
|
||||
instance ``7`` will cause the screen to stay on when charging
|
||||
under any method.
|
||||
"""),
|
||||
]
|
||||
|
||||
def __init__(self, target, logcat_poll_period=None, disable_selinux=True):
|
||||
def __init__(self, target, logcat_poll_period=None, disable_selinux=True, stay_on_mode=None):
|
||||
self.target = target
|
||||
self.logcat_poll_period = logcat_poll_period
|
||||
self.disable_selinux = disable_selinux
|
||||
self.stay_on_mode = stay_on_mode
|
||||
self.orig_stay_on_mode = self.target.get_stay_on_mode() if stay_on_mode is not None else None
|
||||
self.logcat_poller = None
|
||||
self.logger = logging.getLogger('logcat')
|
||||
self._logcat_marker_msg = None
|
||||
self._logcat_marker_tag = None
|
||||
signal.connect(self._before_workload, signal.BEFORE_WORKLOAD_EXECUTION)
|
||||
if self.logcat_poll_period:
|
||||
signal.connect(self._after_workload, signal.AFTER_WORKLOAD_EXECUTION)
|
||||
|
||||
def initialize(self):
|
||||
if self.target.is_rooted and self.disable_selinux:
|
||||
self.do_disable_selinux()
|
||||
if self.stay_on_mode is not None:
|
||||
self.target.set_stay_on_mode(self.stay_on_mode)
|
||||
|
||||
def start(self):
|
||||
if self.logcat_poll_period:
|
||||
self.logcat_poller = LogcatPoller(self.target, self.logcat_poll_period)
|
||||
self.logcat_poller.start()
|
||||
else:
|
||||
if not self._logcat_marker_msg:
|
||||
self._logcat_marker_msg = 'WA logcat marker for wrap detection'
|
||||
self._logcat_marker_tag = 'WAlog'
|
||||
|
||||
def stop(self):
|
||||
if self.logcat_poller:
|
||||
self.logcat_poller.stop()
|
||||
|
||||
def finalize(self):
|
||||
if self.stay_on_mode is not None:
|
||||
self.target.set_stay_on_mode(self.orig_stay_on_mode)
|
||||
|
||||
def extract_results(self, context):
|
||||
logcat_file = os.path.join(context.output_directory, 'logcat.log')
|
||||
self.dump_logcat(logcat_file)
|
||||
context.add_artifact('logcat', logcat_file, kind='log')
|
||||
self.clear_logcat()
|
||||
if not self._check_logcat_nowrap(logcat_file):
|
||||
self.logger.warning('The main logcat buffer wrapped and lost data;'
|
||||
' results that rely on this buffer may be'
|
||||
' inaccurate or incomplete.'
|
||||
)
|
||||
|
||||
def dump_logcat(self, outfile):
|
||||
if self.logcat_poller:
|
||||
self.logcat_poller.write_log(outfile)
|
||||
else:
|
||||
self.target.dump_logcat(outfile)
|
||||
self.target.dump_logcat(outfile, logcat_format='threadtime')
|
||||
|
||||
def clear_logcat(self):
|
||||
if self.logcat_poller:
|
||||
self.logcat_poller.clear_buffer()
|
||||
else:
|
||||
self.target.clear_logcat()
|
||||
|
||||
def _before_workload(self, _):
|
||||
if self.logcat_poller:
|
||||
self.logcat_poller.start_logcat_wrap_detect()
|
||||
else:
|
||||
self.insert_logcat_marker()
|
||||
|
||||
def _after_workload(self, _):
|
||||
self.logcat_poller.stop_logcat_wrap_detect()
|
||||
|
||||
def _check_logcat_nowrap(self, outfile):
|
||||
if self.logcat_poller:
|
||||
return self.logcat_poller.check_logcat_nowrap(outfile)
|
||||
else:
|
||||
parser = LogcatParser()
|
||||
for event in parser.parse(outfile):
|
||||
if (event.tag == self._logcat_marker_tag
|
||||
and event.message == self._logcat_marker_msg):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def insert_logcat_marker(self):
|
||||
self.logger.debug('Inserting logcat marker')
|
||||
self.target.execute(
|
||||
'log -t "{}" "{}"'.format(
|
||||
self._logcat_marker_tag, self._logcat_marker_msg
|
||||
)
|
||||
)
|
||||
|
||||
def do_disable_selinux(self):
|
||||
# SELinux was added in Android 4.3 (API level 18). Trying to
|
||||
@ -119,15 +198,21 @@ class LogcatPoller(threading.Thread):
|
||||
self.period = period
|
||||
self.timeout = timeout
|
||||
self.stop_signal = threading.Event()
|
||||
self.lock = threading.Lock()
|
||||
self.lock = threading.RLock()
|
||||
self.buffer_file = tempfile.mktemp()
|
||||
self.last_poll = 0
|
||||
self.daemon = True
|
||||
self.exc = None
|
||||
self._logcat_marker_tag = 'WALog'
|
||||
self._logcat_marker_msg = 'WA logcat marker for wrap detection:{}'
|
||||
self._marker_count = 0
|
||||
self._start_marker = None
|
||||
self._end_marker = None
|
||||
|
||||
def run(self):
|
||||
self.logger.debug('Starting polling')
|
||||
try:
|
||||
self.insert_logcat_marker()
|
||||
while True:
|
||||
if self.stop_signal.is_set():
|
||||
break
|
||||
@ -135,6 +220,7 @@ class LogcatPoller(threading.Thread):
|
||||
current_time = time.time()
|
||||
if (current_time - self.last_poll) >= self.period:
|
||||
self.poll()
|
||||
self.insert_logcat_marker()
|
||||
time.sleep(0.5)
|
||||
except Exception: # pylint: disable=W0703
|
||||
self.exc = WorkerThreadError(self.name, sys.exc_info())
|
||||
@ -170,9 +256,49 @@ class LogcatPoller(threading.Thread):
|
||||
|
||||
def poll(self):
|
||||
self.last_poll = time.time()
|
||||
self.target.dump_logcat(self.buffer_file, append=True, timeout=self.timeout)
|
||||
self.target.dump_logcat(self.buffer_file, append=True, timeout=self.timeout, logcat_format='threadtime')
|
||||
self.target.clear_logcat()
|
||||
|
||||
def insert_logcat_marker(self):
|
||||
self.logger.debug('Inserting logcat marker')
|
||||
with self.lock:
|
||||
self.target.execute(
|
||||
'log -t "{}" "{}"'.format(
|
||||
self._logcat_marker_tag,
|
||||
self._logcat_marker_msg.format(self._marker_count)
|
||||
)
|
||||
)
|
||||
self._marker_count += 1
|
||||
|
||||
def check_logcat_nowrap(self, outfile):
|
||||
parser = LogcatParser()
|
||||
counter = self._start_marker
|
||||
for event in parser.parse(outfile):
|
||||
message = self._logcat_marker_msg.split(':')[0]
|
||||
if not (event.tag == self._logcat_marker_tag
|
||||
and event.message.split(':')[0] == message):
|
||||
continue
|
||||
|
||||
number = int(event.message.split(':')[1])
|
||||
if number > counter:
|
||||
return False
|
||||
elif number == counter:
|
||||
counter += 1
|
||||
|
||||
if counter == self._end_marker:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def start_logcat_wrap_detect(self):
|
||||
with self.lock:
|
||||
self._start_marker = self._marker_count
|
||||
self.insert_logcat_marker()
|
||||
|
||||
def stop_logcat_wrap_detect(self):
|
||||
with self.lock:
|
||||
self._end_marker = self._marker_count
|
||||
|
||||
|
||||
class ChromeOsAssistant(LinuxAssistant):
|
||||
|
||||
|
@ -14,14 +14,13 @@
|
||||
#
|
||||
|
||||
import inspect
|
||||
from collections import OrderedDict
|
||||
from copy import copy
|
||||
|
||||
from devlib import (LinuxTarget, AndroidTarget, LocalLinuxTarget,
|
||||
ChromeOsTarget, Platform, Juno, TC2, Gem5SimulationPlatform,
|
||||
AdbConnection, SshConnection, LocalConnection,
|
||||
Gem5Connection)
|
||||
TelnetConnection, Gem5Connection)
|
||||
from devlib.target import DEFAULT_SHELL_PROMPT
|
||||
from devlib.utils.ssh import DEFAULT_SSH_SUDO_COMMAND
|
||||
|
||||
from wa.framework import pluginloader
|
||||
from wa.framework.configuration.core import get_config_point_map
|
||||
@ -69,11 +68,14 @@ def instantiate_target(tdesc, params, connect=None, extra_platform_params=None):
|
||||
|
||||
for name, value in params.items():
|
||||
if name in target_params:
|
||||
tp[name] = value
|
||||
if not target_params[name].deprecated:
|
||||
tp[name] = value
|
||||
elif name in platform_params:
|
||||
pp[name] = value
|
||||
if not platform_params[name].deprecated:
|
||||
pp[name] = value
|
||||
elif name in conn_params:
|
||||
cp[name] = value
|
||||
if not conn_params[name].deprecated:
|
||||
cp[name] = value
|
||||
elif name in assistant_params:
|
||||
pass
|
||||
else:
|
||||
@ -129,7 +131,8 @@ class TargetDescription(object):
|
||||
config = {}
|
||||
for pattr in param_attrs:
|
||||
for p in getattr(self, pattr):
|
||||
config[p.name] = p.default
|
||||
if not p.deprecated:
|
||||
config[p.name] = p.default
|
||||
return config
|
||||
|
||||
def _set(self, attr, vals):
|
||||
@ -195,6 +198,12 @@ COMMON_TARGET_PARAMS = [
|
||||
description='''
|
||||
A regex that matches the shell prompt on the target.
|
||||
'''),
|
||||
|
||||
Parameter('max_async', kind=int, default=50,
|
||||
description='''
|
||||
The maximum number of concurent asynchronous connections to the
|
||||
target maintained at any time.
|
||||
'''),
|
||||
]
|
||||
|
||||
COMMON_PLATFORM_PARAMS = [
|
||||
@ -262,7 +271,6 @@ VEXPRESS_PLATFORM_PARAMS = [
|
||||
|
||||
``dtr``: toggle the DTR line on the serial connection
|
||||
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
|
||||
|
||||
'''),
|
||||
]
|
||||
|
||||
@ -300,6 +308,48 @@ CONNECTION_PARAMS = {
|
||||
description="""
|
||||
ADB server to connect to.
|
||||
"""),
|
||||
Parameter(
|
||||
'adb_port', kind=int,
|
||||
description="""
|
||||
ADB port to connect to.
|
||||
"""),
|
||||
Parameter(
|
||||
'poll_transfers', kind=bool,
|
||||
default=True,
|
||||
description="""
|
||||
File transfers will be polled for activity. Inactive
|
||||
file transfers are cancelled.
|
||||
"""),
|
||||
Parameter(
|
||||
'start_transfer_poll_delay', kind=int,
|
||||
default=30,
|
||||
description="""
|
||||
How long to wait (s) for a transfer to complete
|
||||
before polling transfer activity. Requires ``poll_transfers``
|
||||
to be set.
|
||||
"""),
|
||||
Parameter(
|
||||
'total_transfer_timeout', kind=int,
|
||||
default=3600,
|
||||
description="""
|
||||
The total time to elapse before a transfer is cancelled, regardless
|
||||
of its activity. Requires ``poll_transfers`` to be set.
|
||||
"""),
|
||||
Parameter(
|
||||
'transfer_poll_period', kind=int,
|
||||
default=30,
|
||||
description="""
|
||||
The period at which transfer activity is sampled. Requires
|
||||
``poll_transfers`` to be set. Too small values may cause
|
||||
the destination size to appear the same over one or more sample
|
||||
periods, causing improper transfer cancellation.
|
||||
"""),
|
||||
Parameter(
|
||||
'adb_as_root', kind=bool,
|
||||
default=False,
|
||||
description="""
|
||||
Specify whether the adb server should be started in root mode.
|
||||
""")
|
||||
],
|
||||
SshConnection: [
|
||||
Parameter(
|
||||
@ -316,6 +366,8 @@ CONNECTION_PARAMS = {
|
||||
'password', kind=str,
|
||||
description="""
|
||||
Password to use.
|
||||
(When connecting to a passwordless machine set to an
|
||||
empty string to prevent attempting ssh key authentication.)
|
||||
"""),
|
||||
Parameter(
|
||||
'keyfile', kind=str,
|
||||
@ -324,14 +376,101 @@ CONNECTION_PARAMS = {
|
||||
"""),
|
||||
Parameter(
|
||||
'port', kind=int,
|
||||
default=22,
|
||||
description="""
|
||||
The port SSH server is listening on on the target.
|
||||
"""),
|
||||
Parameter(
|
||||
'telnet', kind=bool, default=False,
|
||||
'strict_host_check', kind=bool, default=False,
|
||||
description="""
|
||||
If set to ``True``, a Telnet connection, rather than
|
||||
SSH will be used.
|
||||
Specify whether devices should be connected to if
|
||||
their host key does not match the systems known host keys. """),
|
||||
Parameter(
|
||||
'sudo_cmd', kind=str,
|
||||
default=DEFAULT_SSH_SUDO_COMMAND,
|
||||
description="""
|
||||
Sudo command to use. Must have ``{}`` specified
|
||||
somewhere in the string it indicate where the command
|
||||
to be run via sudo is to go.
|
||||
"""),
|
||||
Parameter(
|
||||
'use_scp', kind=bool,
|
||||
default=False,
|
||||
description="""
|
||||
Allow using SCP as method of file transfer instead
|
||||
of the default SFTP.
|
||||
"""),
|
||||
Parameter(
|
||||
'poll_transfers', kind=bool,
|
||||
default=True,
|
||||
description="""
|
||||
File transfers will be polled for activity. Inactive
|
||||
file transfers are cancelled.
|
||||
"""),
|
||||
Parameter(
|
||||
'start_transfer_poll_delay', kind=int,
|
||||
default=30,
|
||||
description="""
|
||||
How long to wait (s) for a transfer to complete
|
||||
before polling transfer activity. Requires ``poll_transfers``
|
||||
to be set.
|
||||
"""),
|
||||
Parameter(
|
||||
'total_transfer_timeout', kind=int,
|
||||
default=3600,
|
||||
description="""
|
||||
The total time to elapse before a transfer is cancelled, regardless
|
||||
of its activity. Requires ``poll_transfers`` to be set.
|
||||
"""),
|
||||
Parameter(
|
||||
'transfer_poll_period', kind=int,
|
||||
default=30,
|
||||
description="""
|
||||
The period at which transfer activity is sampled. Requires
|
||||
``poll_transfers`` to be set. Too small values may cause
|
||||
the destination size to appear the same over one or more sample
|
||||
periods, causing improper transfer cancellation.
|
||||
"""),
|
||||
# Deprecated Parameters
|
||||
Parameter(
|
||||
'telnet', kind=str,
|
||||
description="""
|
||||
Original shell prompt to expect.
|
||||
""",
|
||||
deprecated=True),
|
||||
Parameter(
|
||||
'password_prompt', kind=str,
|
||||
description="""
|
||||
Password prompt to expect
|
||||
""",
|
||||
deprecated=True),
|
||||
Parameter(
|
||||
'original_prompt', kind=str,
|
||||
description="""
|
||||
Original shell prompt to expect.
|
||||
""",
|
||||
deprecated=True),
|
||||
],
|
||||
TelnetConnection: [
|
||||
Parameter(
|
||||
'host', kind=str, mandatory=True,
|
||||
description="""
|
||||
Host name or IP address of the target.
|
||||
"""),
|
||||
Parameter(
|
||||
'username', kind=str, mandatory=True,
|
||||
description="""
|
||||
User name to connect with
|
||||
"""),
|
||||
Parameter(
|
||||
'password', kind=str,
|
||||
description="""
|
||||
Password to use.
|
||||
"""),
|
||||
Parameter(
|
||||
'port', kind=int,
|
||||
description="""
|
||||
The port SSH server is listening on on the target.
|
||||
"""),
|
||||
Parameter(
|
||||
'password_prompt', kind=str,
|
||||
@ -411,16 +550,16 @@ CONNECTION_PARAMS['ChromeOsConnection'] = \
|
||||
CONNECTION_PARAMS[AdbConnection] + CONNECTION_PARAMS[SshConnection]
|
||||
|
||||
|
||||
# name --> ((target_class, conn_class), params_list, defaults)
|
||||
# name --> ((target_class, conn_class, unsupported_platforms), params_list, defaults)
|
||||
TARGETS = {
|
||||
'linux': ((LinuxTarget, SshConnection), COMMON_TARGET_PARAMS, None),
|
||||
'android': ((AndroidTarget, AdbConnection), COMMON_TARGET_PARAMS +
|
||||
'linux': ((LinuxTarget, SshConnection, []), COMMON_TARGET_PARAMS, None),
|
||||
'android': ((AndroidTarget, AdbConnection, []), COMMON_TARGET_PARAMS +
|
||||
[Parameter('package_data_directory', kind=str, default='/data/data',
|
||||
description='''
|
||||
Directory containing Android data
|
||||
'''),
|
||||
], None),
|
||||
'chromeos': ((ChromeOsTarget, 'ChromeOsConnection'), COMMON_TARGET_PARAMS +
|
||||
'chromeos': ((ChromeOsTarget, 'ChromeOsConnection', []), COMMON_TARGET_PARAMS +
|
||||
[Parameter('package_data_directory', kind=str, default='/data/data',
|
||||
description='''
|
||||
Directory containing Android data
|
||||
@ -441,7 +580,8 @@ TARGETS = {
|
||||
the need for privilege elevation.
|
||||
'''),
|
||||
], None),
|
||||
'local': ((LocalLinuxTarget, LocalConnection), COMMON_TARGET_PARAMS, None),
|
||||
'local': ((LocalLinuxTarget, LocalConnection, [Juno, Gem5SimulationPlatform, TC2]),
|
||||
COMMON_TARGET_PARAMS, None),
|
||||
}
|
||||
|
||||
# name --> assistant
|
||||
@ -452,31 +592,87 @@ ASSISTANTS = {
|
||||
'chromeos': ChromeOsAssistant
|
||||
}
|
||||
|
||||
# name --> ((platform_class, conn_class), params_list, defaults, target_defaults)
|
||||
# Platform specific parameter overrides.
|
||||
JUNO_PLATFORM_OVERRIDES = [
|
||||
Parameter('baudrate', kind=int, default=115200,
|
||||
description='''
|
||||
Baud rate for the serial connection.
|
||||
'''),
|
||||
Parameter('vemsd_mount', kind=str, default='/media/JUNO',
|
||||
description='''
|
||||
VExpress MicroSD card mount location. This is a MicroSD card in
|
||||
the VExpress device that is mounted on the host via USB. The card
|
||||
contains configuration files for the platform and firmware and
|
||||
kernel images to be flashed.
|
||||
'''),
|
||||
Parameter('bootloader', kind=str, default='u-boot',
|
||||
allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'],
|
||||
description='''
|
||||
Selects the bootloader mechanism used by the board. Depending on
|
||||
firmware version, a number of possible boot mechanisms may be use.
|
||||
|
||||
Please see ``devlib`` documentation for descriptions.
|
||||
'''),
|
||||
Parameter('hard_reset_method', kind=str, default='dtr',
|
||||
allowed_values=['dtr', 'reboottxt'],
|
||||
description='''
|
||||
There are a couple of ways to reset VersatileExpress board if the
|
||||
software running on the board becomes unresponsive. Both require
|
||||
configuration to be enabled (please see ``devlib`` documentation).
|
||||
|
||||
``dtr``: toggle the DTR line on the serial connection
|
||||
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
|
||||
'''),
|
||||
]
|
||||
TC2_PLATFORM_OVERRIDES = [
|
||||
Parameter('baudrate', kind=int, default=38400,
|
||||
description='''
|
||||
Baud rate for the serial connection.
|
||||
'''),
|
||||
Parameter('vemsd_mount', kind=str, default='/media/VEMSD',
|
||||
description='''
|
||||
VExpress MicroSD card mount location. This is a MicroSD card in
|
||||
the VExpress device that is mounted on the host via USB. The card
|
||||
contains configuration files for the platform and firmware and
|
||||
kernel images to be flashed.
|
||||
'''),
|
||||
Parameter('bootloader', kind=str, default='bootmon',
|
||||
allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'],
|
||||
description='''
|
||||
Selects the bootloader mechanism used by the board. Depending on
|
||||
firmware version, a number of possible boot mechanisms may be use.
|
||||
|
||||
Please see ``devlib`` documentation for descriptions.
|
||||
'''),
|
||||
Parameter('hard_reset_method', kind=str, default='reboottxt',
|
||||
allowed_values=['dtr', 'reboottxt'],
|
||||
description='''
|
||||
There are a couple of ways to reset VersatileExpress board if the
|
||||
software running on the board becomes unresponsive. Both require
|
||||
configuration to be enabled (please see ``devlib`` documentation).
|
||||
|
||||
``dtr``: toggle the DTR line on the serial connection
|
||||
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
|
||||
'''),
|
||||
]
|
||||
|
||||
# name --> ((platform_class, conn_class, conn_overrides), params_list, defaults, target_overrides)
|
||||
# Note: normally, connection is defined by the Target name, but
|
||||
# platforms may choose to override it
|
||||
# Note: the target_defaults allows you to override common target_params for a
|
||||
# Note: the target_overrides allows you to override common target_params for a
|
||||
# particular platform. Parameters you can override are in COMMON_TARGET_PARAMS
|
||||
# Example of overriding one of the target parameters: Replace last None with:
|
||||
# {'shell_prompt': CUSTOM__SHELL_PROMPT}
|
||||
# Example of overriding one of the target parameters: Replace last `None` with
|
||||
# a list of `Parameter` objects to be used instead.
|
||||
PLATFORMS = {
|
||||
'generic': ((Platform, None), COMMON_PLATFORM_PARAMS, None, None),
|
||||
'juno': ((Juno, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
|
||||
{
|
||||
'vemsd_mount': '/media/JUNO',
|
||||
'baudrate': 115200,
|
||||
'bootloader': 'u-boot',
|
||||
'hard_reset_method': 'dtr',
|
||||
},
|
||||
None),
|
||||
'tc2': ((TC2, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
|
||||
{
|
||||
'vemsd_mount': '/media/VEMSD',
|
||||
'baudrate': 38400,
|
||||
'bootloader': 'bootmon',
|
||||
'hard_reset_method': 'reboottxt',
|
||||
}, None),
|
||||
'gem5': ((Gem5SimulationPlatform, Gem5Connection), GEM5_PLATFORM_PARAMS, None, None),
|
||||
'generic': ((Platform, None, None), COMMON_PLATFORM_PARAMS, None, None),
|
||||
'juno': ((Juno, None, [
|
||||
Parameter('host', kind=str, mandatory=False,
|
||||
description="Host name or IP address of the target."),
|
||||
]
|
||||
), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS, JUNO_PLATFORM_OVERRIDES, None),
|
||||
'tc2': ((TC2, None, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
|
||||
TC2_PLATFORM_OVERRIDES, None),
|
||||
'gem5': ((Gem5SimulationPlatform, Gem5Connection, None), GEM5_PLATFORM_PARAMS, None, None),
|
||||
}
|
||||
|
||||
|
||||
@ -496,16 +692,17 @@ class DefaultTargetDescriptor(TargetDescriptor):
|
||||
# pylint: disable=attribute-defined-outside-init,too-many-locals
|
||||
result = []
|
||||
for target_name, target_tuple in TARGETS.items():
|
||||
(target, conn), target_params = self._get_item(target_tuple)
|
||||
(target, conn, unsupported_platforms), target_params = self._get_item(target_tuple)
|
||||
assistant = ASSISTANTS[target_name]
|
||||
conn_params = CONNECTION_PARAMS[conn]
|
||||
for platform_name, platform_tuple in PLATFORMS.items():
|
||||
platform_target_defaults = platform_tuple[-1]
|
||||
platform_tuple = platform_tuple[0:-1]
|
||||
(platform, plat_conn), platform_params = self._get_item(platform_tuple)
|
||||
(platform, plat_conn, conn_defaults), platform_params = self._get_item(platform_tuple)
|
||||
if platform in unsupported_platforms:
|
||||
continue
|
||||
# Add target defaults specified in the Platform tuple
|
||||
target_params = self._apply_param_defaults(target_params,
|
||||
platform_target_defaults)
|
||||
target_params = self._override_params(target_params, platform_target_defaults)
|
||||
name = '{}_{}'.format(platform_name, target_name)
|
||||
td = TargetDescription(name, self)
|
||||
td.target = target
|
||||
@ -517,31 +714,31 @@ class DefaultTargetDescriptor(TargetDescriptor):
|
||||
|
||||
if plat_conn:
|
||||
td.conn = plat_conn
|
||||
td.conn_params = CONNECTION_PARAMS[plat_conn]
|
||||
td.conn_params = self._override_params(CONNECTION_PARAMS[plat_conn],
|
||||
conn_defaults)
|
||||
else:
|
||||
td.conn = conn
|
||||
td.conn_params = conn_params
|
||||
td.conn_params = self._override_params(conn_params, conn_defaults)
|
||||
|
||||
result.append(td)
|
||||
return result
|
||||
|
||||
def _apply_param_defaults(self, params, defaults): # pylint: disable=no-self-use
|
||||
'''Adds parameters in the defaults dict to params list.
|
||||
Return updated params as a list (idempotent function).'''
|
||||
if not defaults:
|
||||
def _override_params(self, params, overrides): # pylint: disable=no-self-use
|
||||
''' Returns a new list of parameters replacing any parameter with the
|
||||
corresponding parameter in overrides'''
|
||||
if not overrides:
|
||||
return params
|
||||
param_map = OrderedDict((p.name, copy(p)) for p in params)
|
||||
for name, value in defaults.items():
|
||||
if name not in param_map:
|
||||
raise ValueError('Unexpected default "{}"'.format(name))
|
||||
param_map[name].default = value
|
||||
# Convert the OrderedDict to a list to return the same type
|
||||
param_map = {p.name: p for p in params}
|
||||
for override in overrides:
|
||||
if override.name in param_map:
|
||||
param_map[override.name] = override
|
||||
# Return the list of overriden parameters
|
||||
return list(param_map.values())
|
||||
|
||||
def _get_item(self, item_tuple):
|
||||
cls, params, defaults = item_tuple
|
||||
updated_params = self._apply_param_defaults(params, defaults)
|
||||
return cls, updated_params
|
||||
cls_tuple, params, defaults = item_tuple
|
||||
updated_params = self._override_params(params, defaults)
|
||||
return cls_tuple, updated_params
|
||||
|
||||
|
||||
_adhoc_target_descriptions = []
|
||||
@ -584,7 +781,7 @@ def _get_target_defaults(target):
|
||||
|
||||
|
||||
def add_description_for_target(target, description=None, **kwargs):
|
||||
(base_name, ((_, base_conn), base_params, _)) = _get_target_defaults(target)
|
||||
(base_name, ((_, base_conn, _), base_params, _)) = _get_target_defaults(target)
|
||||
|
||||
if 'target_params' not in kwargs:
|
||||
kwargs['target_params'] = base_params
|
||||
@ -592,7 +789,7 @@ def add_description_for_target(target, description=None, **kwargs):
|
||||
if 'platform' not in kwargs:
|
||||
kwargs['platform'] = Platform
|
||||
if 'platform_params' not in kwargs:
|
||||
for (plat, conn), params, _, _ in PLATFORMS.values():
|
||||
for (plat, conn, _), params, _, _ in PLATFORMS.values():
|
||||
if plat == kwargs['platform']:
|
||||
kwargs['platform_params'] = params
|
||||
if conn is not None and kwargs['conn'] is None:
|
||||
|
@ -23,6 +23,7 @@ from devlib.utils.android import AndroidProperties
|
||||
from wa.framework.configuration.core import settings
|
||||
from wa.framework.exception import ConfigError
|
||||
from wa.utils.serializer import read_pod, write_pod, Podable
|
||||
from wa.utils.misc import atomic_write_path
|
||||
|
||||
|
||||
def cpuinfo_from_pod(pod):
|
||||
@ -221,6 +222,7 @@ class CpuInfo(Podable):
|
||||
def get_target_info(target):
|
||||
info = TargetInfo()
|
||||
info.target = target.__class__.__name__
|
||||
info.modules = target.modules
|
||||
info.os = target.os
|
||||
info.os_version = target.os_version
|
||||
info.system_id = target.system_id
|
||||
@ -228,16 +230,15 @@ def get_target_info(target):
|
||||
info.is_rooted = target.is_rooted
|
||||
info.kernel_version = target.kernel_version
|
||||
info.kernel_config = target.config
|
||||
info.hostname = target.hostname
|
||||
info.hostid = target.hostid
|
||||
|
||||
try:
|
||||
info.sched_features = target.read_value('/sys/kernel/debug/sched_features').split()
|
||||
except TargetError:
|
||||
# best effort -- debugfs might not be mounted
|
||||
pass
|
||||
|
||||
hostid_string = target.execute('{} hostid'.format(target.busybox)).strip()
|
||||
info.hostid = int(hostid_string, 16)
|
||||
info.hostname = target.execute('{} hostname'.format(target.busybox)).strip()
|
||||
|
||||
for i, name in enumerate(target.cpuinfo.cpu_names):
|
||||
cpu = CpuInfo()
|
||||
cpu.id = i
|
||||
@ -285,11 +286,13 @@ def read_target_info_cache():
|
||||
def write_target_info_cache(cache):
|
||||
if not os.path.exists(settings.cache_directory):
|
||||
os.makedirs(settings.cache_directory)
|
||||
write_pod(cache, settings.target_info_cache_file)
|
||||
with atomic_write_path(settings.target_info_cache_file) as at_path:
|
||||
write_pod(cache, at_path)
|
||||
|
||||
|
||||
def get_target_info_from_cache(system_id):
|
||||
cache = read_target_info_cache()
|
||||
def get_target_info_from_cache(system_id, cache=None):
|
||||
if cache is None:
|
||||
cache = read_target_info_cache()
|
||||
pod = cache.get(system_id, None)
|
||||
|
||||
if not pod:
|
||||
@ -303,8 +306,9 @@ def get_target_info_from_cache(system_id):
|
||||
return TargetInfo.from_pod(pod)
|
||||
|
||||
|
||||
def cache_target_info(target_info, overwrite=False):
|
||||
cache = read_target_info_cache()
|
||||
def cache_target_info(target_info, overwrite=False, cache=None):
|
||||
if cache is None:
|
||||
cache = read_target_info_cache()
|
||||
if target_info.system_id in cache and not overwrite:
|
||||
raise ValueError('TargetInfo for {} is already in cache.'.format(target_info.system_id))
|
||||
cache[target_info.system_id] = target_info.to_pod()
|
||||
@ -313,12 +317,13 @@ def cache_target_info(target_info, overwrite=False):
|
||||
|
||||
class TargetInfo(Podable):
|
||||
|
||||
_pod_serialization_version = 4
|
||||
_pod_serialization_version = 5
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
instance = super(TargetInfo, TargetInfo).from_pod(pod)
|
||||
instance.target = pod['target']
|
||||
instance.modules = pod['modules']
|
||||
instance.abi = pod['abi']
|
||||
instance.cpus = [CpuInfo.from_pod(c) for c in pod['cpus']]
|
||||
instance.os = pod['os']
|
||||
@ -343,6 +348,7 @@ class TargetInfo(Podable):
|
||||
def __init__(self):
|
||||
super(TargetInfo, self).__init__()
|
||||
self.target = None
|
||||
self.modules = []
|
||||
self.cpus = []
|
||||
self.os = None
|
||||
self.os_version = None
|
||||
@ -362,6 +368,7 @@ class TargetInfo(Podable):
|
||||
def to_pod(self):
|
||||
pod = super(TargetInfo, self).to_pod()
|
||||
pod['target'] = self.target
|
||||
pod['modules'] = self.modules
|
||||
pod['abi'] = self.abi
|
||||
pod['cpus'] = [c.to_pod() for c in self.cpus]
|
||||
pod['os'] = self.os
|
||||
@ -413,3 +420,8 @@ class TargetInfo(Podable):
|
||||
@staticmethod
|
||||
def _pod_upgrade_v4(pod):
|
||||
return TargetInfo._pod_upgrade_v3(pod)
|
||||
|
||||
@staticmethod
|
||||
def _pod_upgrade_v5(pod):
|
||||
pod['modules'] = pod.get('modules') or []
|
||||
return pod
|
||||
|
@ -24,8 +24,10 @@ from wa.framework.plugin import Parameter
|
||||
from wa.framework.target.descriptor import (get_target_description,
|
||||
instantiate_target,
|
||||
instantiate_assistant)
|
||||
from wa.framework.target.info import get_target_info, get_target_info_from_cache, cache_target_info
|
||||
from wa.framework.target.info import (get_target_info, get_target_info_from_cache,
|
||||
cache_target_info, read_target_info_cache)
|
||||
from wa.framework.target.runtime_parameter_manager import RuntimeParameterManager
|
||||
from wa.utils.types import module_name_set
|
||||
|
||||
|
||||
class TargetManager(object):
|
||||
@ -55,6 +57,7 @@ class TargetManager(object):
|
||||
|
||||
def initialize(self):
|
||||
self._init_target()
|
||||
self.assistant.initialize()
|
||||
|
||||
# If target supports hotplugging, online all cpus before perform discovery
|
||||
# and restore original configuration after completed.
|
||||
@ -75,6 +78,8 @@ class TargetManager(object):
|
||||
def finalize(self):
|
||||
if not self.target:
|
||||
return
|
||||
if self.assistant:
|
||||
self.assistant.finalize()
|
||||
if self.disconnect or isinstance(self.target.platform, Gem5SimulationPlatform):
|
||||
self.logger.info('Disconnecting from the device')
|
||||
with signal.wrap('TARGET_DISCONNECT'):
|
||||
@ -91,10 +96,20 @@ class TargetManager(object):
|
||||
|
||||
@memoized
|
||||
def get_target_info(self):
|
||||
info = get_target_info_from_cache(self.target.system_id)
|
||||
cache = read_target_info_cache()
|
||||
info = get_target_info_from_cache(self.target.system_id, cache=cache)
|
||||
|
||||
if info is None:
|
||||
info = get_target_info(self.target)
|
||||
cache_target_info(info)
|
||||
cache_target_info(info, cache=cache)
|
||||
else:
|
||||
# If module configuration has changed form when the target info
|
||||
# was previously cached, it is possible additional info will be
|
||||
# available, so should re-generate the cache.
|
||||
if module_name_set(info.modules) != module_name_set(self.target.modules):
|
||||
info = get_target_info(self.target)
|
||||
cache_target_info(info, overwrite=True, cache=cache)
|
||||
|
||||
return info
|
||||
|
||||
def reboot(self, context, hard=False):
|
||||
|
@ -178,7 +178,7 @@ class HotplugRuntimeConfig(RuntimeConfig):
|
||||
raise TargetError('Target does not appear to support hotplug')
|
||||
|
||||
def validate_parameters(self):
|
||||
if len(self.num_cores) == self.target.number_of_cpus:
|
||||
if self.num_cores and len(self.num_cores) == self.target.number_of_cpus:
|
||||
if all(v is False for v in list(self.num_cores.values())):
|
||||
raise ValueError('Cannot set number of all cores to 0')
|
||||
|
||||
@ -694,7 +694,7 @@ class CpufreqRuntimeConfig(RuntimeConfig):
|
||||
else:
|
||||
common_freqs = common_freqs.intersection(self.supported_cpu_freqs.get(cpu) or set())
|
||||
all_freqs = all_freqs.union(self.supported_cpu_freqs.get(cpu) or set())
|
||||
common_gov = common_gov.intersection(self.supported_cpu_governors.get(cpu))
|
||||
common_gov = common_gov.intersection(self.supported_cpu_governors.get(cpu) or set())
|
||||
|
||||
return all_freqs, common_freqs, common_gov
|
||||
|
||||
@ -732,7 +732,7 @@ class IdleStateValue(object):
|
||||
'''Checks passed state and converts to its ID'''
|
||||
value = caseless_string(value)
|
||||
for s_id, s_name, s_desc in self.values:
|
||||
if value == s_id or value == s_name or value == s_desc:
|
||||
if value in (s_id, s_name, s_desc):
|
||||
return s_id
|
||||
msg = 'Invalid IdleState: "{}"; Must be in {}'
|
||||
raise ValueError(msg.format(value, self.values))
|
||||
@ -878,6 +878,11 @@ class AndroidRuntimeConfig(RuntimeConfig):
|
||||
if value is not None:
|
||||
obj.config['screen_on'] = value
|
||||
|
||||
@staticmethod
|
||||
def set_unlock_screen(obj, value):
|
||||
if value is not None:
|
||||
obj.config['unlock_screen'] = value
|
||||
|
||||
def __init__(self, target):
|
||||
self.config = defaultdict(dict)
|
||||
super(AndroidRuntimeConfig, self).__init__(target)
|
||||
@ -930,6 +935,16 @@ class AndroidRuntimeConfig(RuntimeConfig):
|
||||
Specify whether the device screen should be on
|
||||
""")
|
||||
|
||||
param_name = 'unlock_screen'
|
||||
self._runtime_params[param_name] = \
|
||||
RuntimeParameter(
|
||||
param_name, kind=str,
|
||||
default=None,
|
||||
setter=self.set_unlock_screen,
|
||||
description="""
|
||||
Specify how the device screen should be unlocked (e.g., vertical)
|
||||
""")
|
||||
|
||||
def check_target(self):
|
||||
if self.target.os != 'android' and self.target.os != 'chromeos':
|
||||
raise ConfigError('Target does not appear to be running Android')
|
||||
@ -940,6 +955,7 @@ class AndroidRuntimeConfig(RuntimeConfig):
|
||||
pass
|
||||
|
||||
def commit(self):
|
||||
# pylint: disable=too-many-branches
|
||||
if 'airplane_mode' in self.config:
|
||||
new_airplane_mode = self.config['airplane_mode']
|
||||
old_airplane_mode = self.target.get_airplane_mode()
|
||||
@ -964,13 +980,20 @@ class AndroidRuntimeConfig(RuntimeConfig):
|
||||
|
||||
if 'brightness' in self.config:
|
||||
self.target.set_brightness(self.config['brightness'])
|
||||
|
||||
if 'rotation' in self.config:
|
||||
self.target.set_rotation(self.config['rotation'])
|
||||
|
||||
if 'screen_on' in self.config:
|
||||
if self.config['screen_on']:
|
||||
self.target.ensure_screen_is_on()
|
||||
else:
|
||||
self.target.ensure_screen_is_off()
|
||||
|
||||
if self.config.get('unlock_screen'):
|
||||
self.target.ensure_screen_is_on()
|
||||
if self.target.is_screen_locked():
|
||||
self.target.swipe_to_unlock(self.config['unlock_screen'])
|
||||
|
||||
def clear(self):
|
||||
self.config = {}
|
||||
|
@ -22,6 +22,7 @@ from wa.framework.target.runtime_config import (SysfileValuesRuntimeConfig,
|
||||
CpuidleRuntimeConfig,
|
||||
AndroidRuntimeConfig)
|
||||
from wa.utils.types import obj_dict, caseless_string
|
||||
from wa.framework import pluginloader
|
||||
|
||||
|
||||
class RuntimeParameterManager(object):
|
||||
@ -37,9 +38,16 @@ class RuntimeParameterManager(object):
|
||||
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
self.runtime_configs = [cls(self.target) for cls in self.runtime_config_cls]
|
||||
self.runtime_params = {}
|
||||
|
||||
try:
|
||||
for rt_cls in pluginloader.list_plugins(kind='runtime-config'):
|
||||
if rt_cls not in self.runtime_config_cls:
|
||||
self.runtime_config_cls.append(rt_cls)
|
||||
except ValueError:
|
||||
pass
|
||||
self.runtime_configs = [cls(self.target) for cls in self.runtime_config_cls]
|
||||
|
||||
runtime_parameter = namedtuple('RuntimeParameter', 'cfg_point, rt_config')
|
||||
for cfg in self.runtime_configs:
|
||||
for param in cfg.supported_parameters:
|
||||
|
@ -1,18 +1,18 @@
|
||||
apply plugin: 'com.android.library'
|
||||
|
||||
android {
|
||||
compileSdkVersion 25
|
||||
buildToolsVersion '25.0.3'
|
||||
compileSdkVersion 28
|
||||
buildToolsVersion '28.0.3'
|
||||
defaultConfig {
|
||||
minSdkVersion 18
|
||||
targetSdkVersion 25
|
||||
targetSdkVersion 28
|
||||
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile fileTree(include: ['*.jar'], dir: 'libs')
|
||||
compile 'com.android.support.test:runner:0.5'
|
||||
compile 'com.android.support.test:rules:0.5'
|
||||
compile 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2'
|
||||
implementation fileTree(include: ['*.jar'], dir: 'libs')
|
||||
implementation 'com.android.support.test:runner:0.5'
|
||||
implementation 'com.android.support.test:rules:0.5'
|
||||
implementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2'
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ public class BaseUiAutomation {
|
||||
|
||||
public enum FindByCriteria { BY_ID, BY_TEXT, BY_DESC };
|
||||
public enum Direction { UP, DOWN, LEFT, RIGHT, NULL };
|
||||
public enum ScreenOrientation { RIGHT, NATURAL, LEFT };
|
||||
public enum ScreenOrientation { RIGHT, NATURAL, LEFT, PORTRAIT, LANDSCAPE };
|
||||
public enum PinchType { IN, OUT, NULL };
|
||||
|
||||
// Time in milliseconds
|
||||
@ -176,6 +176,8 @@ public class BaseUiAutomation {
|
||||
}
|
||||
|
||||
public void setScreenOrientation(ScreenOrientation orientation) throws Exception {
|
||||
int width = mDevice.getDisplayWidth();
|
||||
int height = mDevice.getDisplayHeight();
|
||||
switch (orientation) {
|
||||
case RIGHT:
|
||||
mDevice.setOrientationRight();
|
||||
@ -186,6 +188,30 @@ public class BaseUiAutomation {
|
||||
case LEFT:
|
||||
mDevice.setOrientationLeft();
|
||||
break;
|
||||
case LANDSCAPE:
|
||||
if (mDevice.isNaturalOrientation()){
|
||||
if (height > width){
|
||||
mDevice.setOrientationRight();
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (height > width){
|
||||
mDevice.setOrientationNatural();
|
||||
}
|
||||
}
|
||||
break;
|
||||
case PORTRAIT:
|
||||
if (mDevice.isNaturalOrientation()){
|
||||
if (height < width){
|
||||
mDevice.setOrientationRight();
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (height < width){
|
||||
mDevice.setOrientationNatural();
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new Exception("No orientation specified");
|
||||
}
|
||||
@ -547,9 +573,29 @@ public class BaseUiAutomation {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// If an an app is not designed for running on the latest version of android
|
||||
// (currently Q) an additional screen can popup asking to confirm permissions.
|
||||
public void dismissAndroidPermissionPopup() throws Exception {
|
||||
UiObject permissionAccess =
|
||||
mDevice.findObject(new UiSelector().textMatches(
|
||||
".*Choose what to allow .* to access"));
|
||||
UiObject continueButton =
|
||||
mDevice.findObject(new UiSelector().resourceId("com.android.permissioncontroller:id/continue_button")
|
||||
.textContains("Continue"));
|
||||
if (permissionAccess.exists() && continueButton.exists()) {
|
||||
continueButton.click();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// If an an app is not designed for running on the latest version of android
|
||||
// (currently Q) dissmiss the warning popup if present.
|
||||
public void dismissAndroidVersionPopup() throws Exception {
|
||||
|
||||
// Ensure we have dissmied any permission screens before looking for the version popup
|
||||
dismissAndroidPermissionPopup();
|
||||
|
||||
UiObject warningText =
|
||||
mDevice.findObject(new UiSelector().textContains(
|
||||
"This app was built for an older version of Android"));
|
||||
@ -562,6 +608,29 @@ public class BaseUiAutomation {
|
||||
}
|
||||
|
||||
|
||||
// If Chrome is a fresh install then these popups may be presented
|
||||
// dismiss them if visible.
|
||||
public void dismissChromePopup() throws Exception {
|
||||
UiObject accept =
|
||||
mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/terms_accept")
|
||||
.className("android.widget.Button"));
|
||||
if (accept.waitForExists(3000)){
|
||||
accept.click();
|
||||
UiObject negative =
|
||||
mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/negative_button")
|
||||
.className("android.widget.Button"));
|
||||
if (negative.waitForExists(10000)) {
|
||||
negative.click();
|
||||
}
|
||||
}
|
||||
UiObject lite =
|
||||
mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/button_secondary")
|
||||
.className("android.widget.Button"));
|
||||
if (lite.exists()){
|
||||
lite.click();
|
||||
}
|
||||
}
|
||||
|
||||
// Override getParams function to decode a url encoded parameter bundle before
|
||||
// passing it to workloads.
|
||||
public Bundle getParams() {
|
||||
|
@ -3,9 +3,10 @@
|
||||
buildscript {
|
||||
repositories {
|
||||
jcenter()
|
||||
google()
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:2.3.2'
|
||||
classpath 'com.android.tools.build:gradle:7.2.1'
|
||||
|
||||
|
||||
// NOTE: Do not place your application dependencies here; they belong
|
||||
@ -16,6 +17,7 @@ buildscript {
|
||||
allprojects {
|
||||
repositories {
|
||||
jcenter()
|
||||
google()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-3.3-all.zip
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip
|
||||
|
Binary file not shown.
@ -21,9 +21,9 @@ from subprocess import Popen, PIPE
|
||||
|
||||
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev'])
|
||||
|
||||
version = VersionTuple(3, 1, 3, '')
|
||||
version = VersionTuple(3, 4, 0, 'dev1')
|
||||
|
||||
required_devlib_version = VersionTuple(1, 1, 1, '')
|
||||
required_devlib_version = VersionTuple(1, 4, 0, 'dev3')
|
||||
|
||||
|
||||
def format_version(v):
|
||||
@ -48,13 +48,13 @@ def get_wa_version_with_commit():
|
||||
|
||||
|
||||
def get_commit():
|
||||
p = Popen(['git', 'rev-parse', 'HEAD'],
|
||||
cwd=os.path.dirname(__file__), stdout=PIPE, stderr=PIPE)
|
||||
try:
|
||||
p = Popen(['git', 'rev-parse', 'HEAD'],
|
||||
cwd=os.path.dirname(__file__), stdout=PIPE, stderr=PIPE)
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
std, _ = p.communicate()
|
||||
p.wait()
|
||||
if p.returncode:
|
||||
return None
|
||||
if sys.version_info[0] == 3 and isinstance(std, bytes):
|
||||
return std[:8].decode(sys.stdout.encoding or 'utf-8')
|
||||
else:
|
||||
return std[:8]
|
||||
return std[:8].decode(sys.stdout.encoding or 'utf-8')
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
# Copyright 2014-2019 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -14,17 +14,25 @@
|
||||
#
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
|
||||
from devlib.utils.android import ApkInfo
|
||||
try:
|
||||
from shlex import quote
|
||||
except ImportError:
|
||||
from pipes import quote
|
||||
|
||||
|
||||
from wa.utils.android import get_cacheable_apk_info, build_apk_launch_command
|
||||
from wa.framework.plugin import TargetedPlugin, Parameter
|
||||
from wa.framework.resource import (ApkFile, ReventFile,
|
||||
File, loose_version_matching)
|
||||
File, loose_version_matching,
|
||||
range_version_matching)
|
||||
from wa.framework.exception import WorkloadError, ConfigError
|
||||
from wa.utils.types import ParameterDict, list_or_string
|
||||
from wa.utils.types import ParameterDict, list_or_string, version_tuple
|
||||
from wa.utils.revent import ReventRecorder
|
||||
from wa.utils.exec_control import once_per_instance
|
||||
from wa.utils.misc import atomic_write_path
|
||||
|
||||
|
||||
class Workload(TargetedPlugin):
|
||||
@ -37,14 +45,12 @@ class Workload(TargetedPlugin):
|
||||
kind = 'workload'
|
||||
|
||||
parameters = [
|
||||
Parameter('cleanup_assets', kind=bool,
|
||||
global_alias='cleanup_assets',
|
||||
aliases=['clean_up'],
|
||||
Parameter('uninstall', kind=bool,
|
||||
default=True,
|
||||
description="""
|
||||
If ``True``, if assets are deployed as part of the workload they
|
||||
will be removed again from the device as part of finalize.
|
||||
""")
|
||||
If ``True``, executables that are installed to the device
|
||||
as part of the workload will be uninstalled again.
|
||||
"""),
|
||||
]
|
||||
|
||||
# Set this to True to mark that this workload poses a risk of exposing
|
||||
@ -118,13 +124,11 @@ class Workload(TargetedPlugin):
|
||||
Execute the workload. This is the method that performs the actual
|
||||
"work" of the workload.
|
||||
"""
|
||||
pass
|
||||
|
||||
def extract_results(self, context):
|
||||
"""
|
||||
Extract results on the target
|
||||
"""
|
||||
pass
|
||||
|
||||
def update_output(self, context):
|
||||
"""
|
||||
@ -132,11 +136,9 @@ class Workload(TargetedPlugin):
|
||||
metrics and artifacts for this workload iteration.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def teardown(self, context):
|
||||
""" Perform any final clean up for the Workload. """
|
||||
pass
|
||||
|
||||
@once_per_instance
|
||||
def finalize(self, context):
|
||||
@ -175,8 +177,10 @@ class ApkWorkload(Workload):
|
||||
loading_time = 10
|
||||
package_names = []
|
||||
supported_versions = []
|
||||
activity = None
|
||||
view = None
|
||||
clear_data_on_reset = True
|
||||
apk_arguments = {}
|
||||
|
||||
# Set this to True to mark that this workload requires the target apk to be run
|
||||
# for initialisation purposes before the main run is performed.
|
||||
@ -199,6 +203,16 @@ class ApkWorkload(Workload):
|
||||
description="""
|
||||
The version of the package to be used.
|
||||
"""),
|
||||
Parameter('max_version', kind=str,
|
||||
default=None,
|
||||
description="""
|
||||
The maximum version of the package to be used.
|
||||
"""),
|
||||
Parameter('min_version', kind=str,
|
||||
default=None,
|
||||
description="""
|
||||
The minimum version of the package to be used.
|
||||
"""),
|
||||
Parameter('variant', kind=str,
|
||||
default=None,
|
||||
description="""
|
||||
@ -218,6 +232,7 @@ class ApkWorkload(Workload):
|
||||
"""),
|
||||
Parameter('uninstall', kind=bool,
|
||||
default=False,
|
||||
override=True,
|
||||
description="""
|
||||
If ``True``, will uninstall workload\'s APK as part of teardown.'
|
||||
"""),
|
||||
@ -256,6 +271,12 @@ class ApkWorkload(Workload):
|
||||
raise ConfigError('Target does not appear to support Android')
|
||||
|
||||
super(ApkWorkload, self).__init__(target, **kwargs)
|
||||
|
||||
if self.activity is not None and '.' not in self.activity:
|
||||
# If we're receiving just the activity name, it's taken relative to
|
||||
# the package namespace:
|
||||
self.activity = '.' + self.activity
|
||||
|
||||
self.apk = PackageHandler(self,
|
||||
package_name=self.package_name,
|
||||
variant=self.variant,
|
||||
@ -266,7 +287,17 @@ class ApkWorkload(Workload):
|
||||
uninstall=self.uninstall,
|
||||
exact_abi=self.exact_abi,
|
||||
prefer_host_package=self.prefer_host_package,
|
||||
clear_data_on_reset=self.clear_data_on_reset)
|
||||
clear_data_on_reset=self.clear_data_on_reset,
|
||||
activity=self.activity,
|
||||
min_version=self.min_version,
|
||||
max_version=self.max_version,
|
||||
apk_arguments=self.apk_arguments)
|
||||
|
||||
def validate(self):
|
||||
if self.min_version and self.max_version:
|
||||
if version_tuple(self.min_version) > version_tuple(self.max_version):
|
||||
msg = 'Cannot specify min version ({}) greater than max version ({})'
|
||||
raise ConfigError(msg.format(self.min_version, self.max_version))
|
||||
|
||||
@once_per_instance
|
||||
def initialize(self, context):
|
||||
@ -292,7 +323,6 @@ class ApkWorkload(Workload):
|
||||
Perform the setup necessary to rerun the workload. Only called if
|
||||
``requires_rerun`` is set.
|
||||
"""
|
||||
pass
|
||||
|
||||
def teardown(self, context):
|
||||
super(ApkWorkload, self).teardown(context)
|
||||
@ -337,7 +367,8 @@ class ApkUIWorkload(ApkWorkload):
|
||||
@once_per_instance
|
||||
def finalize(self, context):
|
||||
super(ApkUIWorkload, self).finalize(context)
|
||||
self.gui.remove()
|
||||
if self.cleanup_assets:
|
||||
self.gui.remove()
|
||||
|
||||
|
||||
class ApkUiautoWorkload(ApkUIWorkload):
|
||||
@ -375,7 +406,6 @@ class ApkReventWorkload(ApkUIWorkload):
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
super(ApkReventWorkload, self).__init__(target, **kwargs)
|
||||
self.apk = PackageHandler(self)
|
||||
self.gui = ReventGUI(self, target,
|
||||
self.setup_timeout,
|
||||
self.run_timeout,
|
||||
@ -417,7 +447,8 @@ class UIWorkload(Workload):
|
||||
@once_per_instance
|
||||
def finalize(self, context):
|
||||
super(UIWorkload, self).finalize(context)
|
||||
self.gui.remove()
|
||||
if self.cleanup_assets:
|
||||
self.gui.remove()
|
||||
|
||||
|
||||
class UiautoWorkload(UIWorkload):
|
||||
@ -489,7 +520,7 @@ class UiAutomatorGUI(object):
|
||||
def init_resources(self, resolver):
|
||||
self.uiauto_file = resolver.get(ApkFile(self.owner, uiauto=True))
|
||||
if not self.uiauto_package:
|
||||
uiauto_info = ApkInfo(self.uiauto_file)
|
||||
uiauto_info = get_cacheable_apk_info(self.uiauto_file)
|
||||
self.uiauto_package = uiauto_info.package
|
||||
|
||||
def init_commands(self):
|
||||
@ -613,12 +644,12 @@ class ReventGUI(object):
|
||||
if self.revent_teardown_file:
|
||||
self.revent_recorder.replay(self.on_target_teardown_revent,
|
||||
timeout=self.teardown_timeout)
|
||||
|
||||
def remove(self):
|
||||
self.target.remove(self.on_target_setup_revent)
|
||||
self.target.remove(self.on_target_run_revent)
|
||||
self.target.remove(self.on_target_extract_results_revent)
|
||||
self.target.remove(self.on_target_teardown_revent)
|
||||
|
||||
def remove(self):
|
||||
self.revent_recorder.remove()
|
||||
|
||||
def _check_revent_files(self):
|
||||
@ -647,18 +678,24 @@ class PackageHandler(object):
|
||||
|
||||
@property
|
||||
def activity(self):
|
||||
if self._activity:
|
||||
return self._activity
|
||||
if self.apk_info is None:
|
||||
return None
|
||||
return self.apk_info.activity
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
def __init__(self, owner, install_timeout=300, version=None, variant=None,
|
||||
package_name=None, strict=False, force_install=False, uninstall=False,
|
||||
exact_abi=False, prefer_host_package=True, clear_data_on_reset=True):
|
||||
exact_abi=False, prefer_host_package=True, clear_data_on_reset=True,
|
||||
activity=None, min_version=None, max_version=None, apk_arguments=None):
|
||||
self.logger = logging.getLogger('apk')
|
||||
self.owner = owner
|
||||
self.target = self.owner.target
|
||||
self.install_timeout = install_timeout
|
||||
self.version = version
|
||||
self.min_version = min_version
|
||||
self.max_version = max_version
|
||||
self.variant = variant
|
||||
self.package_name = package_name
|
||||
self.strict = strict
|
||||
@ -667,18 +704,21 @@ class PackageHandler(object):
|
||||
self.exact_abi = exact_abi
|
||||
self.prefer_host_package = prefer_host_package
|
||||
self.clear_data_on_reset = clear_data_on_reset
|
||||
self._activity = activity
|
||||
self.supported_abi = self.target.supported_abi
|
||||
self.apk_file = None
|
||||
self.apk_info = None
|
||||
self.apk_version = None
|
||||
self.logcat_log = None
|
||||
self.error_msg = None
|
||||
self.apk_arguments = apk_arguments
|
||||
|
||||
def initialize(self, context):
|
||||
self.resolve_package(context)
|
||||
|
||||
def setup(self, context):
|
||||
context.update_metadata('app_version', self.apk_info.version_name)
|
||||
context.update_metadata('app_name', self.apk_info.package)
|
||||
self.initialize_package(context)
|
||||
self.start_activity()
|
||||
self.target.execute('am kill-all') # kill all *background* activities
|
||||
@ -700,7 +740,7 @@ class PackageHandler(object):
|
||||
self.resolve_package_from_host(context)
|
||||
|
||||
if self.apk_file:
|
||||
self.apk_info = ApkInfo(self.apk_file)
|
||||
self.apk_info = get_cacheable_apk_info(self.apk_file)
|
||||
else:
|
||||
if self.error_msg:
|
||||
raise WorkloadError(self.error_msg)
|
||||
@ -724,7 +764,9 @@ class PackageHandler(object):
|
||||
version=self.version,
|
||||
package=self.package_name,
|
||||
exact_abi=self.exact_abi,
|
||||
supported_abi=self.supported_abi),
|
||||
supported_abi=self.supported_abi,
|
||||
min_version=self.min_version,
|
||||
max_version=self.max_version),
|
||||
strict=self.strict)
|
||||
else:
|
||||
available_packages = []
|
||||
@ -734,48 +776,57 @@ class PackageHandler(object):
|
||||
version=self.version,
|
||||
package=package,
|
||||
exact_abi=self.exact_abi,
|
||||
supported_abi=self.supported_abi),
|
||||
supported_abi=self.supported_abi,
|
||||
min_version=self.min_version,
|
||||
max_version=self.max_version),
|
||||
strict=self.strict)
|
||||
if apk_file:
|
||||
available_packages.append(apk_file)
|
||||
if len(available_packages) == 1:
|
||||
self.apk_file = available_packages[0]
|
||||
elif len(available_packages) > 1:
|
||||
msg = 'Multiple matching packages found for "{}" on host: {}'
|
||||
self.error_msg = msg.format(self.owner, available_packages)
|
||||
self.error_msg = self._get_package_error_msg('host')
|
||||
|
||||
def resolve_package_from_target(self): # pylint: disable=too-many-branches
|
||||
self.logger.debug('Resolving package on target')
|
||||
found_package = None
|
||||
if self.package_name:
|
||||
if not self.target.package_is_installed(self.package_name):
|
||||
return
|
||||
else:
|
||||
installed_versions = [self.package_name]
|
||||
else:
|
||||
installed_versions = []
|
||||
for package in self.owner.package_names:
|
||||
if self.target.package_is_installed(package):
|
||||
installed_versions.append(package)
|
||||
|
||||
if self.version:
|
||||
matching_packages = []
|
||||
for package in installed_versions:
|
||||
package_version = self.target.get_package_version(package)
|
||||
if self.version or self.min_version or self.max_version:
|
||||
matching_packages = []
|
||||
for package in installed_versions:
|
||||
package_version = self.target.get_package_version(package)
|
||||
if self.version:
|
||||
for v in list_or_string(self.version):
|
||||
if loose_version_matching(v, package_version):
|
||||
matching_packages.append(package)
|
||||
if len(matching_packages) == 1:
|
||||
self.package_name = matching_packages[0]
|
||||
elif len(matching_packages) > 1:
|
||||
msg = 'Multiple matches for version "{}" found on device.'
|
||||
self.error_msg = msg.format(self.version)
|
||||
else:
|
||||
if len(installed_versions) == 1:
|
||||
self.package_name = installed_versions[0]
|
||||
elif len(installed_versions) > 1:
|
||||
self.error_msg = 'Package version not set and multiple versions found on device.'
|
||||
else:
|
||||
if range_version_matching(package_version, self.min_version,
|
||||
self.max_version):
|
||||
matching_packages.append(package)
|
||||
|
||||
if self.package_name:
|
||||
if len(matching_packages) == 1:
|
||||
found_package = matching_packages[0]
|
||||
elif len(matching_packages) > 1:
|
||||
self.error_msg = self._get_package_error_msg('device')
|
||||
else:
|
||||
if len(installed_versions) == 1:
|
||||
found_package = installed_versions[0]
|
||||
elif len(installed_versions) > 1:
|
||||
self.error_msg = 'Package version not set and multiple versions found on device.'
|
||||
if found_package:
|
||||
self.logger.debug('Found matching package on target; Pulling to host.')
|
||||
self.apk_file = self.pull_apk(self.package_name)
|
||||
self.apk_file = self.pull_apk(found_package)
|
||||
self.package_name = found_package
|
||||
|
||||
def initialize_package(self, context):
|
||||
installed_version = self.target.get_package_version(self.apk_info.package)
|
||||
@ -805,11 +856,10 @@ class PackageHandler(object):
|
||||
self.apk_version = host_version
|
||||
|
||||
def start_activity(self):
|
||||
if not self.apk_info.activity:
|
||||
cmd = 'am start -W {}'.format(self.apk_info.package)
|
||||
else:
|
||||
cmd = 'am start -W -n {}/{}'.format(self.apk_info.package,
|
||||
self.apk_info.activity)
|
||||
|
||||
cmd = build_apk_launch_command(self.apk_info.package, self.activity,
|
||||
self.apk_arguments)
|
||||
|
||||
output = self.target.execute(cmd)
|
||||
if 'Error:' in output:
|
||||
# this will dismiss any error dialogs
|
||||
@ -844,12 +894,93 @@ class PackageHandler(object):
|
||||
message = 'Cannot retrieve "{}" as not installed on Target'
|
||||
raise WorkloadError(message.format(package))
|
||||
package_info = self.target.get_package_info(package)
|
||||
self.target.pull(package_info.apk_path, self.owner.dependencies_directory,
|
||||
timeout=self.install_timeout)
|
||||
apk_name = self.target.path.basename(package_info.apk_path)
|
||||
return os.path.join(self.owner.dependencies_directory, apk_name)
|
||||
apk_name = self._get_package_name(package_info.apk_path)
|
||||
host_path = os.path.join(self.owner.dependencies_directory, apk_name)
|
||||
with atomic_write_path(host_path) as at_path:
|
||||
self.target.pull(package_info.apk_path, at_path,
|
||||
timeout=self.install_timeout)
|
||||
return host_path
|
||||
|
||||
def teardown(self):
|
||||
self.target.execute('am force-stop {}'.format(self.apk_info.package))
|
||||
if self.uninstall:
|
||||
self.target.uninstall_package(self.apk_info.package)
|
||||
|
||||
def _get_package_name(self, apk_path):
|
||||
return self.target.path.basename(apk_path)
|
||||
|
||||
def _get_package_error_msg(self, location):
|
||||
if self.version:
|
||||
msg = 'Multiple matches for "{version}" found on {location}.'
|
||||
elif self.min_version and self.max_version:
|
||||
msg = 'Multiple matches between versions "{min_version}" and "{max_version}" found on {location}.'
|
||||
elif self.max_version:
|
||||
msg = 'Multiple matches less than or equal to "{max_version}" found on {location}.'
|
||||
elif self.min_version:
|
||||
msg = 'Multiple matches greater or equal to "{min_version}" found on {location}.'
|
||||
else:
|
||||
msg = ''
|
||||
return msg.format(version=self.version, min_version=self.min_version,
|
||||
max_version=self.max_version, location=location)
|
||||
|
||||
|
||||
class TestPackageHandler(PackageHandler):
|
||||
"""Class wrapping an APK used through ``am instrument``.
|
||||
"""
|
||||
def __init__(self, owner, instrument_args=None, raw_output=False,
|
||||
instrument_wait=True, no_hidden_api_checks=False,
|
||||
*args, **kwargs):
|
||||
if instrument_args is None:
|
||||
instrument_args = {}
|
||||
super(TestPackageHandler, self).__init__(owner, *args, **kwargs)
|
||||
self.raw = raw_output
|
||||
self.args = instrument_args
|
||||
self.wait = instrument_wait
|
||||
self.no_checks = no_hidden_api_checks
|
||||
|
||||
self.cmd = ''
|
||||
self.instrument_thread = None
|
||||
self._instrument_output = None
|
||||
|
||||
def setup(self, context):
|
||||
self.initialize_package(context)
|
||||
|
||||
words = ['am', 'instrument', '--user', '0']
|
||||
if self.raw:
|
||||
words.append('-r')
|
||||
if self.wait:
|
||||
words.append('-w')
|
||||
if self.no_checks:
|
||||
words.append('--no-hidden-api-checks')
|
||||
for k, v in self.args.items():
|
||||
words.extend(['-e', str(k), str(v)])
|
||||
|
||||
words.append(str(self.apk_info.package))
|
||||
if self.apk_info.activity:
|
||||
words[-1] += '/{}'.format(self.apk_info.activity)
|
||||
|
||||
self.cmd = ' '.join(quote(x) for x in words)
|
||||
self.instrument_thread = threading.Thread(target=self._start_instrument)
|
||||
|
||||
def start_activity(self):
|
||||
self.instrument_thread.start()
|
||||
|
||||
def wait_instrument_over(self):
|
||||
self.instrument_thread.join()
|
||||
if 'Error:' in self._instrument_output:
|
||||
cmd = 'am force-stop {}'.format(self.apk_info.package)
|
||||
self.target.execute(cmd)
|
||||
raise WorkloadError(self._instrument_output)
|
||||
|
||||
def _start_instrument(self):
|
||||
self._instrument_output = self.target.execute(self.cmd)
|
||||
self.logger.debug(self._instrument_output)
|
||||
|
||||
def _get_package_name(self, apk_path):
|
||||
return 'test_{}'.format(self.target.path.basename(apk_path))
|
||||
|
||||
@property
|
||||
def instrument_output(self):
|
||||
if self.instrument_thread.is_alive():
|
||||
self.instrument_thread.join() # writes self._instrument_output
|
||||
return self._instrument_output
|
||||
|
@ -20,6 +20,7 @@ import time
|
||||
from wa import Instrument, Parameter
|
||||
from wa.framework.exception import ConfigError, InstrumentError
|
||||
from wa.framework.instrument import extremely_slow
|
||||
from wa.utils.types import identifier
|
||||
|
||||
|
||||
class DelayInstrument(Instrument):
|
||||
@ -32,7 +33,7 @@ class DelayInstrument(Instrument):
|
||||
The delay may be specified as either a fixed period or a temperature
|
||||
threshold that must be reached.
|
||||
|
||||
Optionally, if an active cooling solution is available on the device tqgitq
|
||||
Optionally, if an active cooling solution is available on the device to
|
||||
speed up temperature drop between runs, it may be controlled using this
|
||||
instrument.
|
||||
|
||||
@ -200,16 +201,16 @@ class DelayInstrument(Instrument):
|
||||
reading = self.target.read_int(self.temperature_file)
|
||||
|
||||
def validate(self):
|
||||
if (self.temperature_between_specs is not None and
|
||||
self.fixed_between_specs is not None):
|
||||
if (self.temperature_between_specs is not None
|
||||
and self.fixed_between_specs is not None):
|
||||
raise ConfigError('Both fixed delay and thermal threshold specified for specs.')
|
||||
|
||||
if (self.temperature_between_jobs is not None and
|
||||
self.fixed_between_jobs is not None):
|
||||
if (self.temperature_between_jobs is not None
|
||||
and self.fixed_between_jobs is not None):
|
||||
raise ConfigError('Both fixed delay and thermal threshold specified for jobs.')
|
||||
|
||||
if (self.temperature_before_start is not None and
|
||||
self.fixed_before_start is not None):
|
||||
if (self.temperature_before_start is not None
|
||||
and self.fixed_before_start is not None):
|
||||
raise ConfigError('Both fixed delay and thermal threshold specified before start.')
|
||||
|
||||
if not any([self.temperature_between_specs, self.fixed_between_specs,
|
||||
@ -222,7 +223,7 @@ class DelayInstrument(Instrument):
|
||||
for module in self.active_cooling_modules:
|
||||
if self.target.has(module):
|
||||
if not cooling_module:
|
||||
cooling_module = getattr(self.target, module)
|
||||
cooling_module = getattr(self.target, identifier(module))
|
||||
else:
|
||||
msg = 'Multiple cooling modules found "{}" "{}".'
|
||||
raise InstrumentError(msg.format(cooling_module.name, module))
|
||||
|
@ -144,7 +144,13 @@ class DAQBackend(EnergyInstrumentBackend):
|
||||
connector on the DAQ (varies between DAQ models). The default
|
||||
assumes DAQ 6363 and similar with AI channels on connectors
|
||||
0-7 and 16-23.
|
||||
""")
|
||||
"""),
|
||||
Parameter('keep_raw', kind=bool, default=False,
|
||||
description="""
|
||||
If set to ``True``, this will prevent the raw files obtained
|
||||
from the device before processing from being deleted
|
||||
(this is maily used for debugging).
|
||||
"""),
|
||||
]
|
||||
|
||||
instrument = DaqInstrument
|
||||
@ -189,6 +195,12 @@ class EnergyProbeBackend(EnergyInstrumentBackend):
|
||||
description="""
|
||||
Path to /dev entry for the energy probe (it should be /dev/ttyACMx)
|
||||
"""),
|
||||
Parameter('keep_raw', kind=bool, default=False,
|
||||
description="""
|
||||
If set to ``True``, this will prevent the raw files obtained
|
||||
from the device before processing from being deleted
|
||||
(this is maily used for debugging).
|
||||
"""),
|
||||
]
|
||||
|
||||
instrument = EnergyProbeInstrument
|
||||
@ -224,6 +236,12 @@ class ArmEnergyProbeBackend(EnergyInstrumentBackend):
|
||||
description="""
|
||||
Path to config file of the AEP
|
||||
"""),
|
||||
Parameter('keep_raw', kind=bool, default=False,
|
||||
description="""
|
||||
If set to ``True``, this will prevent the raw files obtained
|
||||
from the device before processing from being deleted
|
||||
(this is maily used for debugging).
|
||||
"""),
|
||||
]
|
||||
|
||||
instrument = ArmEnergyProbeInstrument
|
||||
@ -282,11 +300,17 @@ class AcmeCapeBackend(EnergyInstrumentBackend):
|
||||
description="""
|
||||
Size of the capture buffer (in KB).
|
||||
"""),
|
||||
Parameter('keep_raw', kind=bool, default=False,
|
||||
description="""
|
||||
If set to ``True``, this will prevent the raw files obtained
|
||||
from the device before processing from being deleted
|
||||
(this is maily used for debugging).
|
||||
"""),
|
||||
]
|
||||
|
||||
# pylint: disable=arguments-differ
|
||||
def get_instruments(self, target, metadir,
|
||||
iio_capture, host, iio_devices, buffer_size):
|
||||
iio_capture, host, iio_devices, buffer_size, keep_raw):
|
||||
|
||||
#
|
||||
# Devlib's ACME instrument uses iio-capture under the hood, which can
|
||||
@ -307,7 +331,7 @@ class AcmeCapeBackend(EnergyInstrumentBackend):
|
||||
for iio_device in iio_devices:
|
||||
ret[iio_device] = AcmeCapeInstrument(
|
||||
target, iio_capture=iio_capture, host=host,
|
||||
iio_device=iio_device, buffer_size=buffer_size)
|
||||
iio_device=iio_device, buffer_size=buffer_size, keep_raw=keep_raw)
|
||||
return ret
|
||||
|
||||
|
||||
@ -510,3 +534,7 @@ class EnergyMeasurement(Instrument):
|
||||
units = metrics[0].units
|
||||
value = sum(m.value for m in metrics)
|
||||
context.add_metric(name, value, units)
|
||||
|
||||
def teardown(self, context):
|
||||
for instrument in self.instruments.values():
|
||||
instrument.teardown()
|
||||
|
@ -32,16 +32,16 @@ import tarfile
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
from devlib.exception import TargetError
|
||||
from devlib.utils.android import ApkInfo
|
||||
|
||||
from wa import Instrument, Parameter, very_fast
|
||||
from wa.framework.exception import ConfigError
|
||||
from wa.framework.instrument import slow
|
||||
from wa.utils.diff import diff_sysfs_dirs, diff_interrupt_files
|
||||
from wa.utils.misc import as_relative
|
||||
from wa.utils.misc import as_relative, safe_extract
|
||||
from wa.utils.misc import ensure_file_directory_exists as _f
|
||||
from wa.utils.misc import ensure_directory_exists as _d
|
||||
from wa.utils.types import list_of_strings
|
||||
from wa.utils.android import get_cacheable_apk_info
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -162,20 +162,26 @@ class SysfsExtractor(Instrument):
|
||||
self.target.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True)
|
||||
self.target.pull(on_device_tarball, on_host_tarball)
|
||||
with tarfile.open(on_host_tarball, 'r:gz') as tf:
|
||||
tf.extractall(context.output_directory)
|
||||
safe_extract(tf, context.output_directory)
|
||||
self.target.remove(on_device_tarball)
|
||||
os.remove(on_host_tarball)
|
||||
|
||||
for paths in self.device_and_host_paths:
|
||||
after_dir = paths[self.AFTER_PATH]
|
||||
dev_dir = paths[self.DEVICE_PATH].strip('*') # remove potential trailing '*'
|
||||
if (not os.listdir(after_dir) and
|
||||
self.target.file_exists(dev_dir) and
|
||||
self.target.list_directory(dev_dir)):
|
||||
if (not os.listdir(after_dir)
|
||||
and self.target.file_exists(dev_dir)
|
||||
and self.target.list_directory(dev_dir)):
|
||||
self.logger.error('sysfs files were not pulled from the device.')
|
||||
self.device_and_host_paths.remove(paths) # Path is removed to skip diffing it
|
||||
for _, before_dir, after_dir, diff_dir in self.device_and_host_paths:
|
||||
for dev_dir, before_dir, after_dir, diff_dir in self.device_and_host_paths:
|
||||
diff_sysfs_dirs(before_dir, after_dir, diff_dir)
|
||||
context.add_artifact('{} [before]'.format(dev_dir), before_dir,
|
||||
kind='data', classifiers={'stage': 'before'})
|
||||
context.add_artifact('{} [after]'.format(dev_dir), after_dir,
|
||||
kind='data', classifiers={'stage': 'after'})
|
||||
context.add_artifact('{} [diff]'.format(dev_dir), diff_dir,
|
||||
kind='data', classifiers={'stage': 'diff'})
|
||||
|
||||
def teardown(self, context):
|
||||
self._one_time_setup_done = []
|
||||
@ -238,7 +244,7 @@ class ApkVersion(Instrument):
|
||||
|
||||
def setup(self, context):
|
||||
if hasattr(context.workload, 'apk_file'):
|
||||
self.apk_info = ApkInfo(context.workload.apk_file)
|
||||
self.apk_info = get_cacheable_apk_info(context.workload.apk_file)
|
||||
else:
|
||||
self.apk_info = None
|
||||
|
||||
@ -276,9 +282,15 @@ class InterruptStatsInstrument(Instrument):
|
||||
wfh.write(self.target.execute('cat /proc/interrupts'))
|
||||
|
||||
def update_output(self, context):
|
||||
context.add_artifact('interrupts [before]', self.before_file, kind='data',
|
||||
classifiers={'stage': 'before'})
|
||||
# If workload execution failed, the after_file may not have been created.
|
||||
if os.path.isfile(self.after_file):
|
||||
diff_interrupt_files(self.before_file, self.after_file, _f(self.diff_file))
|
||||
context.add_artifact('interrupts [after]', self.after_file, kind='data',
|
||||
classifiers={'stage': 'after'})
|
||||
context.add_artifact('interrupts [diff]', self.diff_file, kind='data',
|
||||
classifiers={'stage': 'diff'})
|
||||
|
||||
|
||||
class DynamicFrequencyInstrument(SysfsExtractor):
|
||||
|
@ -15,13 +15,14 @@
|
||||
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
import csv
|
||||
import os
|
||||
import re
|
||||
|
||||
from devlib.trace.perf import PerfCollector
|
||||
from devlib.collector.perf import PerfCollector
|
||||
|
||||
from wa import Instrument, Parameter
|
||||
from wa.utils.types import list_or_string, list_of_strs
|
||||
from wa import Instrument, Parameter, ConfigError
|
||||
from wa.utils.types import list_or_string, list_of_strs, numeric
|
||||
|
||||
PERF_COUNT_REGEX = re.compile(r'^(CPU\d+)?\s*(\d+)\s*(.*?)\s*(\[\s*\d+\.\d+%\s*\])?\s*$')
|
||||
|
||||
@ -30,30 +31,41 @@ class PerfInstrument(Instrument):
|
||||
|
||||
name = 'perf'
|
||||
description = """
|
||||
Perf is a Linux profiling with performance counters.
|
||||
Perf is a Linux profiling tool with performance counters.
|
||||
Simpleperf is an Android profiling tool with performance counters.
|
||||
|
||||
It is highly recomended to use perf_type = simpleperf when using this instrument
|
||||
on android devices since it recognises android symbols in record mode and is much more stable
|
||||
when reporting record .data files. For more information see simpleperf documentation at:
|
||||
https://android.googlesource.com/platform/system/extras/+/master/simpleperf/doc/README.md
|
||||
|
||||
Performance counters are CPU hardware registers that count hardware events
|
||||
such as instructions executed, cache-misses suffered, or branches
|
||||
mispredicted. They form a basis for profiling applications to trace dynamic
|
||||
control flow and identify hotspots.
|
||||
|
||||
pref accepts options and events. If no option is given the default '-a' is
|
||||
used. For events, the default events are migrations and cs. They both can
|
||||
be specified in the config file.
|
||||
perf accepts options and events. If no option is given the default '-a' is
|
||||
used. For events, the default events for perf are migrations and cs. The default
|
||||
events for simpleperf are raw-cpu-cycles, raw-l1-dcache, raw-l1-dcache-refill, raw-instructions-retired.
|
||||
They both can be specified in the config file.
|
||||
|
||||
Events must be provided as a list that contains them and they will look like
|
||||
this ::
|
||||
|
||||
perf_events = ['migrations', 'cs']
|
||||
(for perf_type = perf ) perf_events = ['migrations', 'cs']
|
||||
(for perf_type = simpleperf) perf_events = ['raw-cpu-cycles', 'raw-l1-dcache']
|
||||
|
||||
|
||||
Events can be obtained by typing the following in the command line on the
|
||||
device ::
|
||||
|
||||
perf list
|
||||
simpleperf list
|
||||
|
||||
Whereas options, they can be provided as a single string as following ::
|
||||
|
||||
perf_options = '-a -i'
|
||||
perf_options = '--app com.adobe.reader'
|
||||
|
||||
Options can be obtained by running the following in the command line ::
|
||||
|
||||
@ -61,40 +73,80 @@ class PerfInstrument(Instrument):
|
||||
"""
|
||||
|
||||
parameters = [
|
||||
Parameter('events', kind=list_of_strs, default=['migrations', 'cs'],
|
||||
global_alias='perf_events',
|
||||
constraint=(lambda x: x, 'must not be empty.'),
|
||||
Parameter('perf_type', kind=str, allowed_values=['perf', 'simpleperf'], default='perf',
|
||||
global_alias='perf_type', description="""Specifies which type of perf binaries
|
||||
to install. Use simpleperf for collecting perf data on android systems."""),
|
||||
Parameter('command', kind=str, default='stat', allowed_values=['stat', 'record'],
|
||||
global_alias='perf_command', description="""Specifies which perf command to use. If in record mode
|
||||
report command will also be executed and results pulled from target along with raw data
|
||||
file"""),
|
||||
Parameter('events', kind=list_of_strs, global_alias='perf_events',
|
||||
description="""Specifies the events to be counted."""),
|
||||
Parameter('optionstring', kind=list_or_string, default='-a',
|
||||
global_alias='perf_options',
|
||||
description="""Specifies options to be used for the perf command. This
|
||||
may be a list of option strings, in which case, multiple instances of perf
|
||||
will be kicked off -- one for each option string. This may be used to e.g.
|
||||
collected different events from different big.LITTLE clusters.
|
||||
collected different events from different big.LITTLE clusters. In order to
|
||||
profile a particular application process for android with simpleperf use
|
||||
the --app option e.g. --app com.adobe.reader
|
||||
"""),
|
||||
Parameter('report_option_string', kind=str, global_alias='perf_report_options', default=None,
|
||||
description="""Specifies options to be used to gather report when record command
|
||||
is used. It's highly recommended to use perf_type simpleperf when running on
|
||||
android devices as reporting options are unstable with perf"""),
|
||||
Parameter('run_report_sample', kind=bool, default=False, description="""If true, run
|
||||
'perf/simpleperf report-sample'. It only works with the record command."""),
|
||||
Parameter('report_sample_options', kind=str, default=None,
|
||||
description="""Specifies options to pass to report-samples when run_report_sample
|
||||
is true."""),
|
||||
Parameter('labels', kind=list_of_strs, default=None,
|
||||
global_alias='perf_labels',
|
||||
description="""Provides labels for pref output. If specified, the number of
|
||||
labels must match the number of ``optionstring``\ s.
|
||||
description="""Provides labels for perf/simpleperf output for each optionstring.
|
||||
If specified, the number of labels must match the number of ``optionstring``\ s.
|
||||
"""),
|
||||
Parameter('force_install', kind=bool, default=False,
|
||||
description="""
|
||||
always install perf binary even if perf is already present on the device.
|
||||
"""),
|
||||
Parameter('validate_pmu_events', kind=bool, default=True,
|
||||
description="""
|
||||
Query the hardware capabilities to verify the specified PMU events.
|
||||
"""),
|
||||
]
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
super(PerfInstrument, self).__init__(target, **kwargs)
|
||||
self.collector = None
|
||||
self.outdir = None
|
||||
|
||||
def validate(self):
|
||||
if self.report_option_string and (self.command != "record"):
|
||||
raise ConfigError("report_option_string only works with perf/simpleperf record. Set command to record or remove report_option_string")
|
||||
if self.report_sample_options and (self.command != "record"):
|
||||
raise ConfigError("report_sample_options only works with perf/simpleperf record. Set command to record or remove report_sample_options")
|
||||
if self.run_report_sample and (self.command != "record"):
|
||||
raise ConfigError("run_report_sample only works with perf/simpleperf record. Set command to record or remove run_report_sample")
|
||||
|
||||
def initialize(self, context):
|
||||
if self.report_sample_options:
|
||||
self.run_report_sample = True
|
||||
|
||||
self.collector = PerfCollector(self.target,
|
||||
self.perf_type,
|
||||
self.command,
|
||||
self.events,
|
||||
self.optionstring,
|
||||
self.report_option_string,
|
||||
self.run_report_sample,
|
||||
self.report_sample_options,
|
||||
self.labels,
|
||||
self.force_install)
|
||||
self.force_install,
|
||||
self.validate_pmu_events)
|
||||
|
||||
def setup(self, context):
|
||||
self.outdir = os.path.join(context.output_directory, self.perf_type)
|
||||
self.collector.set_output(self.outdir)
|
||||
self.collector.reset()
|
||||
|
||||
def start(self, context):
|
||||
@ -105,12 +157,32 @@ class PerfInstrument(Instrument):
|
||||
|
||||
def update_output(self, context):
|
||||
self.logger.info('Extracting reports from target...')
|
||||
outdir = os.path.join(context.output_directory, 'perf')
|
||||
self.collector.get_trace(outdir)
|
||||
self.collector.get_data()
|
||||
|
||||
for host_file in os.listdir(outdir):
|
||||
if self.perf_type == 'perf':
|
||||
self._process_perf_output(context)
|
||||
else:
|
||||
self._process_simpleperf_output(context)
|
||||
|
||||
def teardown(self, context):
|
||||
self.collector.reset()
|
||||
|
||||
def _process_perf_output(self, context):
|
||||
if self.command == 'stat':
|
||||
self._process_perf_stat_output(context)
|
||||
elif self.command == 'record':
|
||||
self._process_perf_record_output(context)
|
||||
|
||||
def _process_simpleperf_output(self, context):
|
||||
if self.command == 'stat':
|
||||
self._process_simpleperf_stat_output(context)
|
||||
elif self.command == 'record':
|
||||
self._process_simpleperf_record_output(context)
|
||||
|
||||
def _process_perf_stat_output(self, context):
|
||||
for host_file in os.listdir(self.outdir):
|
||||
label = host_file.split('.out')[0]
|
||||
host_file_path = os.path.join(outdir, host_file)
|
||||
host_file_path = os.path.join(self.outdir, host_file)
|
||||
context.add_artifact(label, host_file_path, 'raw')
|
||||
with open(host_file_path) as fh:
|
||||
in_results_section = False
|
||||
@ -118,21 +190,158 @@ class PerfInstrument(Instrument):
|
||||
if 'Performance counter stats' in line:
|
||||
in_results_section = True
|
||||
next(fh) # skip the following blank line
|
||||
if in_results_section:
|
||||
if not line.strip(): # blank line
|
||||
in_results_section = False
|
||||
break
|
||||
else:
|
||||
line = line.split('#')[0] # comment
|
||||
match = PERF_COUNT_REGEX.search(line)
|
||||
if match:
|
||||
classifiers = {}
|
||||
cpu = match.group(1)
|
||||
if cpu is not None:
|
||||
classifiers['cpu'] = int(cpu.replace('CPU', ''))
|
||||
count = int(match.group(2))
|
||||
metric = '{}_{}'.format(label, match.group(3))
|
||||
context.add_metric(metric, count, classifiers=classifiers)
|
||||
if not in_results_section:
|
||||
continue
|
||||
if not line.strip(): # blank line
|
||||
in_results_section = False
|
||||
break
|
||||
else:
|
||||
self._add_perf_stat_metric(line, label, context)
|
||||
|
||||
def teardown(self, context):
|
||||
self.collector.reset()
|
||||
@staticmethod
|
||||
def _add_perf_stat_metric(line, label, context):
|
||||
line = line.split('#')[0] # comment
|
||||
match = PERF_COUNT_REGEX.search(line)
|
||||
if not match:
|
||||
return
|
||||
classifiers = {}
|
||||
cpu = match.group(1)
|
||||
if cpu is not None:
|
||||
classifiers['cpu'] = int(cpu.replace('CPU', ''))
|
||||
count = int(match.group(2))
|
||||
metric = '{}_{}'.format(label, match.group(3))
|
||||
context.add_metric(metric, count, classifiers=classifiers)
|
||||
|
||||
def _process_perf_record_output(self, context):
|
||||
for host_file in os.listdir(self.outdir):
|
||||
label, ext = os.path.splitext(host_file)
|
||||
context.add_artifact(label, os.path.join(self.outdir, host_file), 'raw')
|
||||
column_headers = []
|
||||
column_header_indeces = []
|
||||
event_type = ''
|
||||
if ext == '.rpt':
|
||||
with open(os.path.join(self.outdir, host_file)) as fh:
|
||||
for line in fh:
|
||||
words = line.split()
|
||||
if not words:
|
||||
continue
|
||||
event_type = self._get_report_event_type(words, event_type)
|
||||
column_headers = self._get_report_column_headers(column_headers, words, 'perf')
|
||||
for column_header in column_headers:
|
||||
column_header_indeces.append(line.find(column_header))
|
||||
self._add_report_metric(column_headers,
|
||||
column_header_indeces,
|
||||
line,
|
||||
words,
|
||||
context,
|
||||
event_type,
|
||||
label)
|
||||
|
||||
@staticmethod
|
||||
def _get_report_event_type(words, event_type):
|
||||
if words[0] != '#':
|
||||
return event_type
|
||||
if len(words) == 6 and words[4] == 'event':
|
||||
event_type = words[5]
|
||||
event_type = event_type.strip("'")
|
||||
return event_type
|
||||
|
||||
def _process_simpleperf_stat_output(self, context):
|
||||
labels = []
|
||||
for host_file in os.listdir(self.outdir):
|
||||
labels.append(host_file.split('.out')[0])
|
||||
for opts, label in zip(self.optionstring, labels):
|
||||
stat_file = os.path.join(self.outdir, '{}{}'.format(label, '.out'))
|
||||
if '--csv' in opts:
|
||||
self._process_simpleperf_stat_from_csv(stat_file, context, label)
|
||||
else:
|
||||
self._process_simpleperf_stat_from_raw(stat_file, context, label)
|
||||
|
||||
@staticmethod
|
||||
def _process_simpleperf_stat_from_csv(stat_file, context, label):
|
||||
with open(stat_file) as csv_file:
|
||||
readCSV = csv.reader(csv_file, delimiter=',')
|
||||
line_num = 0
|
||||
for row in readCSV:
|
||||
if 'Performance counter statistics' not in row and 'Total test time' not in row:
|
||||
classifiers = {}
|
||||
if '%' in row:
|
||||
classifiers['scaled from(%)'] = row[len(row) - 2].replace('(', '').replace(')', '').replace('%', '')
|
||||
context.add_metric('{}_{}'.format(label, row[1]), row[0], 'count', classifiers=classifiers)
|
||||
line_num += 1
|
||||
|
||||
@staticmethod
|
||||
def _process_simpleperf_stat_from_raw(stat_file, context, label):
|
||||
with open(stat_file) as fh:
|
||||
for line in fh:
|
||||
if '#' in line and not line.startswith('#'):
|
||||
units = 'count'
|
||||
if "(ms)" in line:
|
||||
line = line.replace("(ms)", "")
|
||||
units = 'ms'
|
||||
tmp_line = line.split('#')[0]
|
||||
tmp_line = line.strip()
|
||||
count, metric = tmp_line.split(' ')[0], tmp_line.split(' ')[2]
|
||||
count = float(count) if "." in count else int(count.replace(',', ''))
|
||||
classifiers = {}
|
||||
if '%' in line:
|
||||
scaled_percentage = line.split('(')[1].strip().replace(')', '').replace('%', '')
|
||||
classifiers['scaled from(%)'] = int(scaled_percentage)
|
||||
metric = '{}_{}'.format(label, metric)
|
||||
context.add_metric(metric, count, units, classifiers=classifiers)
|
||||
|
||||
def _process_simpleperf_record_output(self, context):
|
||||
for host_file in os.listdir(self.outdir):
|
||||
label, ext = os.path.splitext(host_file)
|
||||
context.add_artifact(label, os.path.join(self.outdir, host_file), 'raw')
|
||||
if ext != '.rpt':
|
||||
continue
|
||||
column_headers = []
|
||||
column_header_indeces = []
|
||||
event_type = ''
|
||||
with open(os.path.join(self.outdir, host_file)) as fh:
|
||||
for line in fh:
|
||||
words = line.split()
|
||||
if not words:
|
||||
continue
|
||||
if words[0] == 'Event:':
|
||||
event_type = words[1]
|
||||
column_headers = self._get_report_column_headers(column_headers,
|
||||
words,
|
||||
'simpleperf')
|
||||
for column_header in column_headers:
|
||||
column_header_indeces.append(line.find(column_header))
|
||||
self._add_report_metric(column_headers,
|
||||
column_header_indeces,
|
||||
line,
|
||||
words,
|
||||
context,
|
||||
event_type,
|
||||
label)
|
||||
|
||||
@staticmethod
|
||||
def _get_report_column_headers(column_headers, words, perf_type):
|
||||
if 'Overhead' not in words:
|
||||
return column_headers
|
||||
if perf_type == 'perf':
|
||||
words.remove('#')
|
||||
column_headers = words
|
||||
# Concatonate Shared Objects header
|
||||
if 'Shared' in column_headers:
|
||||
shared_index = column_headers.index('Shared')
|
||||
column_headers[shared_index:shared_index + 2] = ['{} {}'.format(column_headers[shared_index],
|
||||
column_headers[shared_index + 1])]
|
||||
return column_headers
|
||||
|
||||
@staticmethod
|
||||
def _add_report_metric(column_headers, column_header_indeces, line, words, context, event_type, label):
|
||||
if '%' not in words[0]:
|
||||
return
|
||||
classifiers = {}
|
||||
for i in range(1, len(column_headers)):
|
||||
classifiers[column_headers[i]] = line[column_header_indeces[i]:column_header_indeces[i + 1]].strip()
|
||||
|
||||
context.add_metric('{}_{}_Overhead'.format(label, event_type),
|
||||
numeric(words[0].strip('%')),
|
||||
'percent',
|
||||
classifiers=classifiers)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user