mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-04-13 06:10:50 +01:00
Compare commits
545 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
2d14c82f92 | ||
|
8598d1ba3c | ||
|
523fb3f659 | ||
|
0732fa9cf0 | ||
|
b03f28d1d5 | ||
|
f125fd340d | ||
|
75cfb56b38 | ||
|
b734e90de1 | ||
|
5670e571e1 | ||
|
45f09a66be | ||
|
9638a084f9 | ||
|
4da8b0691f | ||
|
412a785068 | ||
|
6fc5340f2f | ||
|
da667b58ac | ||
|
4e9d402c24 | ||
|
e0bf7668b8 | ||
|
4839ab354f | ||
|
b6ecc18763 | ||
|
7315041e90 | ||
|
adbb647fa7 | ||
|
366f59ebf7 | ||
|
0eb17bf8f0 | ||
|
f166ac742e | ||
|
6fe4bce68d | ||
|
28b78a93f1 | ||
|
77ebefba08 | ||
|
41f7984243 | ||
|
23fcb2c120 | ||
|
e38b51b242 | ||
|
ea08a4f9e6 | ||
|
5b56210d5f | ||
|
0179202c90 | ||
|
617306fdda | ||
|
8d4fe9556b | ||
|
775b24f7a3 | ||
|
13f9c64513 | ||
|
6cd1c60715 | ||
|
05eab42f27 | ||
|
b113a8b351 | ||
|
d67d9bd2a4 | ||
|
11374aae3f | ||
|
839242d636 | ||
|
b9b02f83fc | ||
|
6aa1caad94 | ||
|
bf72a576e6 | ||
|
951eec991c | ||
|
0b64b51259 | ||
|
f4ebca39a1 | ||
|
88b085c11b | ||
|
36a909dda2 | ||
|
3228a3187c | ||
|
5e0c59babb | ||
|
dc2fc99e98 | ||
|
46ff6e1f62 | ||
|
8b3f58e726 | ||
|
fe7a88e43e | ||
|
61bb162350 | ||
|
d1e960e9b0 | ||
|
29a5a7fd43 | ||
|
37346fe1b1 | ||
|
40a118c8cd | ||
|
c4535320fa | ||
|
08b87291f8 | ||
|
a3eacb877c | ||
|
48152224a8 | ||
|
095d6bc100 | ||
|
8b94ed972d | ||
|
276f146c1e | ||
|
3b9fcd8001 | ||
|
88fb1de62b | ||
|
7dc337b7d0 | ||
|
b0f9072830 | ||
|
b109acac05 | ||
|
9c7bae3440 | ||
|
7b5ffafbda | ||
|
be02ad649c | ||
|
5a121983fc | ||
|
69795628ed | ||
|
7a332dfd5b | ||
|
4bad433670 | ||
|
0b558e408c | ||
|
c023b9859c | ||
|
284cc60b00 | ||
|
06b508107b | ||
|
cb1107df8f | ||
|
789e150b0a | ||
|
43cb80d854 | ||
|
31d306c23a | ||
|
591c85edec | ||
|
72298ff9ac | ||
|
f08770884a | ||
|
a5e5920aca | ||
|
5558d43ddd | ||
|
c8ea525a00 | ||
|
c4c0230958 | ||
|
b65a371b9d | ||
|
7f0a6da86b | ||
|
75a70ad181 | ||
|
84b5ea8a56 | ||
|
4b54e17020 | ||
|
da4d10d4e7 | ||
|
8882feed84 | ||
|
7f82480a26 | ||
|
e4be2b73ef | ||
|
22750b15c7 | ||
|
e3703f0e1e | ||
|
4ddd610149 | ||
|
c5e3a421b1 | ||
|
0e2a150170 | ||
|
69378b0873 | ||
|
c543c49423 | ||
|
dd07d2ec43 | ||
|
94590e88ee | ||
|
c2725ffaa2 | ||
|
751bbb19fe | ||
|
ae1bc2c031 | ||
|
91b791665a | ||
|
62c4f3837c | ||
|
3c5bece01e | ||
|
cb51ef4d47 | ||
|
8e56a4c831 | ||
|
76032c1d05 | ||
|
4c20fe814a | ||
|
92e253d838 | ||
|
18439e3b31 | ||
|
5cfe452a35 | ||
|
f1aff6b5a8 | ||
|
5dd3abe564 | ||
|
e3ab798f6e | ||
|
ed925938dc | ||
|
ed4eb8af5d | ||
|
a1bdb7de45 | ||
|
fbe9460995 | ||
|
aa4df95a69 | ||
|
fbb84eca72 | ||
|
fbd6f4e90c | ||
|
1c08360263 | ||
|
ff220dfb44 | ||
|
7489b487e1 | ||
|
ba5a65aad7 | ||
|
7bea3a69bb | ||
|
971289698b | ||
|
66e220d444 | ||
|
ae8a7bdfb5 | ||
|
b0355194bc | ||
|
7817308bf7 | ||
|
ab9e29bdae | ||
|
9edb6b20f0 | ||
|
879a491691 | ||
|
7086fa6b48 | ||
|
716e59daf5 | ||
|
08fcc7d30f | ||
|
684121e2e7 | ||
|
0c1229df8c | ||
|
615cbbc94d | ||
|
1425a6f6c9 | ||
|
4557da2f80 | ||
|
7cf5fbd8af | ||
|
3f5a31de96 | ||
|
7c6ebfb49c | ||
|
8640f4f69a | ||
|
460965363f | ||
|
d4057367d8 | ||
|
ef6cffd85a | ||
|
37f4d33015 | ||
|
8c7320a1be | ||
|
6d72a242ce | ||
|
0c2613c608 | ||
|
b8301640f7 | ||
|
c473cfa8fe | ||
|
1f0da5facf | ||
|
39121caf66 | ||
|
83da20ce9f | ||
|
f664a00bdc | ||
|
443358f513 | ||
|
586d95a4f0 | ||
|
58f3ea35ec | ||
|
7fe334b467 | ||
|
3967071a5e | ||
|
cd6f4541ca | ||
|
7e6eb089ab | ||
|
491dcd5b5b | ||
|
7a085e586a | ||
|
0f47002e4e | ||
|
6ff5abdffe | ||
|
82d09612cb | ||
|
ecbfe32b9d | ||
|
2d32d81acb | ||
|
b9d593e578 | ||
|
1f8be77331 | ||
|
66f0edec5b | ||
|
e2489ea3a0 | ||
|
16be8a70f5 | ||
|
dce07e5095 | ||
|
711bff6a60 | ||
|
2a8454db6a | ||
|
9b19f33186 | ||
|
53faf159e8 | ||
|
84a9526dd3 | ||
|
a3cf2e5650 | ||
|
607cff4c54 | ||
|
d56f0fbe20 | ||
|
0f9c20dc69 | ||
|
310bad3966 | ||
|
a8abf24db0 | ||
|
dad0a28b5e | ||
|
2cd4bf7e31 | ||
|
5049e3663b | ||
|
c9ddee761a | ||
|
3be00b296d | ||
|
9a931f42ee | ||
|
06ba8409c1 | ||
|
2da9370920 | ||
|
ef9b4c8919 | ||
|
31f4c0fd5f | ||
|
62ca7c0c36 | ||
|
d0f099700a | ||
|
5f00a94121 | ||
|
0f2de5f951 | ||
|
51ffd60c06 | ||
|
0a4164349b | ||
|
fe50d75858 | ||
|
b93a8cbbd6 | ||
|
79dec810f3 | ||
|
44cead2f76 | ||
|
c6d23ab01f | ||
|
6f9856cf2e | ||
|
0f9331dafe | ||
|
659e60414f | ||
|
796f62d924 | ||
|
f60032a59d | ||
|
977ce4995d | ||
|
a66251dd60 | ||
|
d3adfa1af9 | ||
|
39a294ddbe | ||
|
164095e664 | ||
|
24a4a032db | ||
|
05857ec2bc | ||
|
fd8a7e442c | ||
|
dfb4737e51 | ||
|
06518ad40a | ||
|
009fd831b8 | ||
|
88284750e7 | ||
|
8b337768a3 | ||
|
38aa9d12bd | ||
|
769c883a3a | ||
|
90db655959 | ||
|
817d98ed72 | ||
|
d67668621c | ||
|
1531ddcdef | ||
|
322f9be2d3 | ||
|
494424c8ea | ||
|
ee54a68b65 | ||
|
cc1cc6f77f | ||
|
da0ceab027 | ||
|
683eec2377 | ||
|
07e47de807 | ||
|
5906bca6b3 | ||
|
9556c3a004 | ||
|
1f4bae92bf | ||
|
dcbc00addd | ||
|
4ee75be7ab | ||
|
796dfb1de6 | ||
|
f3e7b14b28 | ||
|
e9839d52c4 | ||
|
7ebbb05934 | ||
|
13166f66d1 | ||
|
ab5d12be72 | ||
|
298bc3a7f3 | ||
|
09d6f4dea1 | ||
|
d7c95fa844 | ||
|
0efd20cf59 | ||
|
e41aa3c967 | ||
|
3bef4fc92d | ||
|
0166180f30 | ||
|
a9f3ee9752 | ||
|
35ce87068c | ||
|
6beac11ee2 | ||
|
2f231b5ce5 | ||
|
75878e2f27 | ||
|
023cb88ab1 | ||
|
d27443deb5 | ||
|
1a15f5c761 | ||
|
d3af4e7515 | ||
|
73b0b0d709 | ||
|
bb18a1a51c | ||
|
062be6d544 | ||
|
c1e095be51 | ||
|
eeebd010b9 | ||
|
e387e3d9b7 | ||
|
6042fa374a | ||
|
050329a5ee | ||
|
d9e7aa9af0 | ||
|
125cd3bb41 | ||
|
75ea78ea4f | ||
|
12bb21045e | ||
|
4bb1f4988f | ||
|
0ff6b4842a | ||
|
98b787e326 | ||
|
e915436661 | ||
|
68e1806c07 | ||
|
f19ebb79ee | ||
|
c950f5ec8f | ||
|
6aaa28781b | ||
|
d87025ad3a | ||
|
ac5819da8e | ||
|
31e08a6477 | ||
|
47769cf28d | ||
|
d8601880ac | ||
|
0efc9b9ccd | ||
|
501d3048a5 | ||
|
c4daccd800 | ||
|
db944629f3 | ||
|
564738a2ad | ||
|
c092128e94 | ||
|
463840d2b7 | ||
|
43633ab362 | ||
|
a6f0ab31e4 | ||
|
72fd5b5139 | ||
|
766bb4da1a | ||
|
a5f0521353 | ||
|
3435c36b98 | ||
|
bd252a6471 | ||
|
f46851a3b4 | ||
|
8910234448 | ||
|
1108c5701e | ||
|
f5d1a9e94a | ||
|
959106d61b | ||
|
0aea3abcaf | ||
|
24ccc024f8 | ||
|
42ab811032 | ||
|
832ed797e1 | ||
|
31b44e447e | ||
|
179b2e2264 | ||
|
22437359b6 | ||
|
2347c8c007 | ||
|
52a0a79012 | ||
|
60693e1b65 | ||
|
8ddf16dfea | ||
|
9aec4850c2 | ||
|
bdaa26d772 | ||
|
d7aedae69c | ||
|
45af8c69b8 | ||
|
e398083f6e | ||
|
4ce41407e9 | ||
|
aa0564e8f3 | ||
|
83f826d6fe | ||
|
1599b59770 | ||
|
8cd9862e32 | ||
|
b4ea2798dd | ||
|
76e6f14212 | ||
|
ce59318e66 | ||
|
5652057adb | ||
|
e9f5577237 | ||
|
ec3d928b3b | ||
|
ee8bab365b | ||
|
e3406bdb74 | ||
|
55d983ecaf | ||
|
f8908e8194 | ||
|
dd44d6fa16 | ||
|
753786a45c | ||
|
8647ceafd8 | ||
|
2c2118ad23 | ||
|
0ec8427d05 | ||
|
cf5c3a2723 | ||
|
8ddc1c1eba | ||
|
b5db4afc05 | ||
|
f977c3dfc8 | ||
|
769aae3047 | ||
|
a1ba3c6f69 | ||
|
536fc7eb92 | ||
|
de36dacb82 | ||
|
637bf57cbc | ||
|
60ffd27bba | ||
|
984a74a6ca | ||
|
5b8dc1779c | ||
|
ba0cd7f842 | ||
|
adb3ffa6aa | ||
|
bedd3bf062 | ||
|
03e463ad4a | ||
|
2ce8d6fc95 | ||
|
1415f61e36 | ||
|
6ab1ae74a6 | ||
|
a1cecc0002 | ||
|
0cba3c68dc | ||
|
f267fc9277 | ||
|
462a5b651a | ||
|
7cd7b73f58 | ||
|
4a9a2ad105 | ||
|
9f88459f56 | ||
|
a2087ea467 | ||
|
31a5a95803 | ||
|
3f202205a5 | ||
|
ce7720b26d | ||
|
766b96e2ad | ||
|
3c9de98a4b | ||
|
5263cfd6f8 | ||
|
e312efc113 | ||
|
0ea9e2fb63 | ||
|
78090bd94e | ||
|
ef45b6b615 | ||
|
22c237ebe9 | ||
|
ed95755af5 | ||
|
4c6636eb72 | ||
|
60fe412548 | ||
|
e187e7efd6 | ||
|
d9e16bfebd | ||
|
d21258e24d | ||
|
8770888685 | ||
|
755417f139 | ||
|
ba4004db5f | ||
|
87ac9c6ab3 | ||
|
ea5ea90bb6 | ||
|
b93beb3f1f | ||
|
ca0d2eaaf5 | ||
|
06961d6adb | ||
|
7d8cd85951 | ||
|
6b03653227 | ||
|
a9e254742a | ||
|
f2d6f351cb | ||
|
916f7cbb17 | ||
|
72046f5f0b | ||
|
4f67cda89f | ||
|
0113940c85 | ||
|
0fb8d261fa | ||
|
0426a966da | ||
|
eabe15750c | ||
|
250bf61c4b | ||
|
64f7c2431e | ||
|
0fee3debea | ||
|
423882a8e6 | ||
|
86287831b3 | ||
|
e81aaf3421 | ||
|
2d7dc61686 | ||
|
88a4677434 | ||
|
dcf0418379 | ||
|
1723ac8132 | ||
|
1462f26b2e | ||
|
8ee924b896 | ||
|
92cf132cf2 | ||
|
4ff7e4aab0 | ||
|
e0ffd84239 | ||
|
d3d5ca9154 | ||
|
88f708abf5 | ||
|
bb282eb19c | ||
|
285bc2cd0b | ||
|
0d9dbe8845 | ||
|
c89ea9875e | ||
|
e4283729c1 | ||
|
a2eb6e96e2 | ||
|
3bd8f033d5 | ||
|
ea1d4e9071 | ||
|
cc0cfaafe3 | ||
|
1b4fc68542 | ||
|
e40517ab95 | ||
|
ce94638436 | ||
|
d1fba957b3 | ||
|
17bb0083e5 | ||
|
c4ad7467e0 | ||
|
2f75261567 | ||
|
281eb6adf9 | ||
|
576df80379 | ||
|
f2f210c37f | ||
|
6b04cbffd2 | ||
|
dead312ff7 | ||
|
7632ee8288 | ||
|
8f6b1a7fae | ||
|
f64aaf64a0 | ||
|
7dce0fb208 | ||
|
375a36c155 | ||
|
7c3054b54b | ||
|
98727bce30 | ||
|
93ffe0434c | ||
|
75f3080c9b | ||
|
75c0e40bb0 | ||
|
e73b299fbe | ||
|
9a4a90e0a6 | ||
|
8ba602cf83 | ||
|
891ef60f4d | ||
|
6632223ac5 | ||
|
5dcac0c8ef | ||
|
9a9a2c0742 | ||
|
57aa5ca588 | ||
|
fb42d11a83 | ||
|
fce506eb02 | ||
|
a1213cf84e | ||
|
a7d0b6fdbd | ||
|
f5fed26758 | ||
|
7d01258bce | ||
|
ccaca3d6d8 | ||
|
6d654157b2 | ||
|
bb255de9ad | ||
|
ca03f21f46 | ||
|
59e29de285 | ||
|
0440c41266 | ||
|
b20b9f9cad | ||
|
8cd79f2ac4 | ||
|
6239f6ab2f | ||
|
718f2c1c90 | ||
|
4c4fd2a267 | ||
|
6366a2c264 | ||
|
42b3f4cf9f | ||
|
1eaffb6744 | ||
|
4a9b24a9a8 | ||
|
5afc96dc4d | ||
|
d858435c3d | ||
|
caf805e851 | ||
|
973be6326a | ||
|
778bc46217 | ||
|
fc226fbb6e | ||
|
d007b283df | ||
|
8464c32808 | ||
|
e4a856ad03 | ||
|
7d833ec112 | ||
|
b729f7c9e4 | ||
|
fbfd81caeb | ||
|
0e69a9808d | ||
|
4478bd4574 | ||
|
0e84cf6d64 | ||
|
6d9ec3138c | ||
|
e8f545861d | ||
|
770d2b2f0e | ||
|
8a2c660fdd | ||
|
dacb350992 | ||
|
039758948e | ||
|
ce93823967 | ||
|
7755363efd | ||
|
bcea1bd0af | ||
|
a062a39f78 | ||
|
b1a01f777f | ||
|
96dd100b70 | ||
|
e485b9ed39 | ||
|
86dcfbf595 | ||
|
1b58390ff5 | ||
|
ae4fdb9e77 | ||
|
5714c8e6a1 | ||
|
791d9496a7 | ||
|
e8b0d42758 | ||
|
97cf0ac059 | ||
|
f3dc94b95e | ||
|
ad87a40e06 | ||
|
fd1dd789bf | ||
|
c410d2e1a1 | ||
|
0e0d4e0ff0 |
16
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
16
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help resolve an issue.
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the issue**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**Run Log**
|
||||
Please attach your `run.log` detailing the issue.
|
||||
|
||||
**Other comments (optional)**
|
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is.
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the feature request here.
|
10
.github/ISSUE_TEMPLATE/question---support-.md
vendored
Normal file
10
.github/ISSUE_TEMPLATE/question---support-.md
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
name: 'Question / Support '
|
||||
about: Ask a question or reqeust support
|
||||
title: ''
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**
|
11
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
name: Question
|
||||
about: Ask a question
|
||||
title: ''
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe you query**
|
||||
What would you like to know / what are you trying to achieve?
|
92
.github/workflows/main.yml
vendored
Normal file
92
.github/workflows/main.yml
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
name: WA Test Suite
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
schedule:
|
||||
- cron: 0 2 * * *
|
||||
# Allows runing this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
Run-Linters-and-Tests:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.8.18
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8.18
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .[test]
|
||||
python -m pip install pylint==2.6.2 pep8 flake8 mock nose
|
||||
- name: Run pylint
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE && ./dev_scripts/pylint wa/
|
||||
- name: Run PEP8
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE && ./dev_scripts/pep8 wa
|
||||
- name: Run nose tests
|
||||
run: |
|
||||
nosetests
|
||||
|
||||
Execute-Test-Workload-and-Process:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .
|
||||
- name: Run test workload
|
||||
run: |
|
||||
cd /tmp && wa run $GITHUB_WORKSPACE/tests/ci/idle_agenda.yaml -v -d idle_workload
|
||||
- name: Test Process Command
|
||||
run: |
|
||||
cd /tmp && wa process -f -p csv idle_workload
|
||||
|
||||
Test-WA-Commands:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .
|
||||
- name: Test Show Command
|
||||
run: |
|
||||
wa show dhrystone && wa show generic_android && wa show trace-cmd && wa show csv
|
||||
- name: Test List Command
|
||||
run: |
|
||||
wa list all
|
||||
- name: Test Create Command
|
||||
run: |
|
||||
wa create agenda dhrystone generic_android csv trace_cmd && wa create package test && wa create workload test
|
28
.readthedocs.yml
Normal file
28
.readthedocs.yml
Normal file
@ -0,0 +1,28 @@
|
||||
# .readthedocs.yml
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
# Required
|
||||
version: 2
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
builder: html
|
||||
configuration: doc/source/conf.py
|
||||
|
||||
# Build the docs in additional formats such as PDF and ePub
|
||||
formats: all
|
||||
|
||||
|
||||
# Configure the build environment
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
|
||||
# Ensure doc dependencies are installed before building
|
||||
python:
|
||||
install:
|
||||
- requirements: doc/requirements.txt
|
||||
- method: pip
|
||||
path: .
|
31
.travis.yml
31
.travis.yml
@ -1,31 +0,0 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
language: python
|
||||
|
||||
python:
|
||||
- "2.7"
|
||||
- "3.4"
|
||||
- "3.6"
|
||||
|
||||
install:
|
||||
- pip install nose
|
||||
- pip install nose2
|
||||
|
||||
script:
|
||||
- git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && cd /tmp/devlib && python setup.py install
|
||||
- cd $TRAVIS_BUILD_DIR && python setup.py install
|
||||
- nose2 -s $TRAVIS_BUILD_DIR/tests
|
||||
|
@ -1,2 +1,3 @@
|
||||
recursive-include scripts *
|
||||
recursive-include doc *
|
||||
recursive-include wa *
|
||||
|
@ -18,7 +18,7 @@ workloads, instruments or output processing.
|
||||
Requirements
|
||||
============
|
||||
|
||||
- Python 2.7 or Python 3
|
||||
- Python 3.5+
|
||||
- Linux (should work on other Unixes, but untested)
|
||||
- Latest Android SDK (ANDROID_HOME must be set) for Android devices, or
|
||||
- SSH for Linux devices
|
||||
@ -30,7 +30,11 @@ Installation
|
||||
To install::
|
||||
|
||||
git clone git@github.com:ARM-software/workload-automation.git workload-automation
|
||||
sudo -H pip install ./workload-automation
|
||||
sudo -H python setup [install|develop]
|
||||
|
||||
Note: A `requirements.txt` is included however this is designed to be used as a
|
||||
reference for known working versions rather than as part of a standard
|
||||
installation.
|
||||
|
||||
Please refer to the `installation section <http://workload-automation.readthedocs.io/en/latest/user_information.html#install>`_
|
||||
in the documentation for more details.
|
||||
|
@ -6,7 +6,7 @@ DEFAULT_DIRS=(
|
||||
|
||||
EXCLUDE=wa/tests,wa/framework/target/descriptor.py
|
||||
EXCLUDE_COMMA=
|
||||
IGNORE=E501,E265,E266,W391,E401,E402,E731,W504,W605,F401
|
||||
IGNORE=E501,E265,E266,W391,E401,E402,E731,W503,W605,F401
|
||||
|
||||
if ! hash flake8 2>/dev/null; then
|
||||
echo "flake8 not found in PATH"
|
||||
|
@ -1,6 +1,4 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEFAULT_DIRS=(
|
||||
wa
|
||||
)
|
||||
@ -34,7 +32,18 @@ compare_versions() {
|
||||
return 0
|
||||
}
|
||||
|
||||
pylint_version=$(python3 -c 'from pylint.__pkginfo__ import version; print(version)')
|
||||
pylint_version=$(python -c 'from pylint.__pkginfo__ import version; print(version)' 2>/dev/null)
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
pylint_version=$(python3 -c 'from pylint.__pkginfo__ import version; print(version)' 2>/dev/null)
|
||||
fi
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
pylint_version=$(python3 -c 'from pylint import version; print(version)' 2>/dev/null)
|
||||
fi
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
echo "ERROR: no pylint verison found; is it installed?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
compare_versions $pylint_version "1.9.2"
|
||||
result=$?
|
||||
if [ "$result" == "2" ]; then
|
||||
@ -42,12 +51,13 @@ if [ "$result" == "2" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -e
|
||||
THIS_DIR="`dirname \"$0\"`"
|
||||
CWD=$PWD
|
||||
pushd $THIS_DIR > /dev/null
|
||||
if [[ "$target" == "" ]]; then
|
||||
for dir in "${DEFAULT_DIRS[@]}"; do
|
||||
PYTHONPATH=. pylint --rcfile ../extras/pylintrc --load-plugins pylint_plugins $THIS_DIR/../$dir
|
||||
PYTHONPATH=. pylint --rcfile ../extras/pylintrc --load-plugins pylint_plugins ../$dir
|
||||
done
|
||||
else
|
||||
PYTHONPATH=. pylint --rcfile ../extras/pylintrc --load-plugins pylint_plugins $CWD/$target
|
||||
|
@ -1,3 +1,5 @@
|
||||
import sys
|
||||
|
||||
from astroid import MANAGER
|
||||
from astroid import scoped_nodes
|
||||
|
||||
@ -23,18 +25,19 @@ def transform(mod):
|
||||
if not text.strip():
|
||||
return
|
||||
|
||||
text = text.split('\n')
|
||||
text = text.split(b'\n')
|
||||
# NOTE: doing it this way because the "correct" approach below does not
|
||||
# work. We can get away with this, because in well-formated WA files,
|
||||
# the initial line is the copyright header's blank line.
|
||||
if 'pylint:' in text[0]:
|
||||
if b'pylint:' in text[0]:
|
||||
msg = 'pylint directive found on the first line of {}; please move to below copyright header'
|
||||
raise RuntimeError(msg.format(mod.name))
|
||||
if text[0].strip() and text[0][0] != '#':
|
||||
char = chr(text[0][0])
|
||||
if text[0].strip() and char != '#':
|
||||
msg = 'first line of {} is not a comment; is the copyright header missing?'
|
||||
raise RuntimeError(msg.format(mod.name))
|
||||
text[0] = '# pylint: disable={}'.format(','.join(errors))
|
||||
mod.file_bytes = '\n'.join(text)
|
||||
text[0] = '# pylint: disable={}'.format(','.join(errors)).encode('utf-8')
|
||||
mod.file_bytes = b'\n'.join(text)
|
||||
|
||||
# This is what *should* happen, but doesn't work.
|
||||
# text.insert(0, '# pylint: disable=attribute-defined-outside-init')
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2015-2015 ARM Limited
|
||||
# Copyright 2015-2019 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -26,10 +26,11 @@ OUTPUT_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), 'source', 'instr
|
||||
|
||||
|
||||
def generate_instrument_method_map(outfile):
|
||||
signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.iteritems()],
|
||||
signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.items()],
|
||||
headers=['method name', 'signal'], align='<<')
|
||||
priority_table = format_simple_table(zip(CallbackPriority.names, CallbackPriority.values),
|
||||
headers=['decorator', 'priority'], align='<>')
|
||||
decorator_names = map(lambda x: x.replace('high', 'fast').replace('low', 'slow'), CallbackPriority.names)
|
||||
priority_table = format_simple_table(zip(decorator_names, CallbackPriority.names, CallbackPriority.values),
|
||||
headers=['decorator', 'CallbackPriority name', 'CallbackPriority value'], align='<>')
|
||||
with open(OUTPUT_TEMPLATE_FILE) as fh:
|
||||
template = string.Template(fh.read())
|
||||
with open(outfile, 'w') as wfh:
|
||||
@ -37,4 +38,4 @@ def generate_instrument_method_map(outfile):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
generate_instrumentation_method_map(sys.argv[1])
|
||||
generate_instrument_method_map(sys.argv[1])
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2019 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -25,7 +25,12 @@ from wa.utils.doc import (strip_inlined_text, get_rst_from_plugin,
|
||||
get_params_rst, underline, line_break)
|
||||
from wa.utils.misc import capitalize
|
||||
|
||||
GENERATE_FOR_PACKAGES = ['wa.workloads', 'wa.instruments', 'wa.output_processors']
|
||||
GENERATE_FOR_PACKAGES = [
|
||||
'wa.workloads',
|
||||
'wa.instruments',
|
||||
'wa.output_processors',
|
||||
]
|
||||
|
||||
|
||||
def insert_contents_table(title='', depth=1):
|
||||
"""
|
||||
@ -41,6 +46,7 @@ def insert_contents_table(title='', depth=1):
|
||||
|
||||
|
||||
def generate_plugin_documentation(source_dir, outdir, ignore_paths):
|
||||
# pylint: disable=unused-argument
|
||||
pluginloader.clear()
|
||||
pluginloader.update(packages=GENERATE_FOR_PACKAGES)
|
||||
if not os.path.exists(outdir):
|
||||
@ -57,7 +63,7 @@ def generate_plugin_documentation(source_dir, outdir, ignore_paths):
|
||||
exts = pluginloader.list_plugins(ext_type)
|
||||
sorted_exts = iter(sorted(exts, key=lambda x: x.name))
|
||||
try:
|
||||
wfh.write(get_rst_from_plugin(sorted_exts.next()))
|
||||
wfh.write(get_rst_from_plugin(next(sorted_exts)))
|
||||
except StopIteration:
|
||||
return
|
||||
for ext in sorted_exts:
|
||||
@ -73,9 +79,11 @@ def generate_target_documentation(outdir):
|
||||
'juno_linux',
|
||||
'juno_android']
|
||||
|
||||
intro = '\nThis is a list of commonly used targets and their device '\
|
||||
'parameters, to see a complete for a complete reference please use the '\
|
||||
'WA :ref:`list command <list-command>`.\n\n\n'
|
||||
intro = (
|
||||
'\nThis is a list of commonly used targets and their device '
|
||||
'parameters, to see a complete for a complete reference please use the'
|
||||
' WA :ref:`list command <list-command>`.\n\n\n'
|
||||
)
|
||||
|
||||
pluginloader.clear()
|
||||
pluginloader.update(packages=['wa.framework.target.descriptor'])
|
||||
@ -112,7 +120,8 @@ def generate_config_documentation(config, outdir):
|
||||
if not os.path.exists(outdir):
|
||||
os.mkdir(outdir)
|
||||
|
||||
outfile = os.path.join(outdir, '{}.rst'.format('_'.join(config.name.split())))
|
||||
config_name = '_'.join(config.name.split())
|
||||
outfile = os.path.join(outdir, '{}.rst'.format(config_name))
|
||||
with open(outfile, 'w') as wfh:
|
||||
wfh.write(get_params_rst(config.config_points))
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
nose
|
||||
numpy
|
||||
pandas
|
||||
sphinx_rtd_theme>=0.3.1
|
||||
sphinx_rtd_theme==1.0.0
|
||||
sphinx==4.2
|
||||
docutils<0.18
|
||||
devlib @ git+https://github.com/ARM-software/devlib@master
|
||||
|
78
doc/source/WA-logo-white.svg
Normal file
78
doc/source/WA-logo-white.svg
Normal file
@ -0,0 +1,78 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="231.99989"
|
||||
height="128.625"
|
||||
id="svg4921"
|
||||
version="1.1"
|
||||
inkscape:version="0.48.4 r9939"
|
||||
sodipodi:docname="WA-logo-black.svg">
|
||||
<defs
|
||||
id="defs4923" />
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="0.70000001"
|
||||
inkscape:cx="80.419359"
|
||||
inkscape:cy="149.66406"
|
||||
inkscape:document-units="px"
|
||||
inkscape:current-layer="layer1"
|
||||
showgrid="false"
|
||||
inkscape:window-width="1676"
|
||||
inkscape:window-height="1027"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="19"
|
||||
inkscape:window-maximized="0"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0" />
|
||||
<metadata
|
||||
id="metadata4926">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title></dc:title>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
inkscape:label="Layer 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(-135.03125,-342.375)">
|
||||
<path
|
||||
style="fill:#ffffff;fill-opacity:1;stroke:none"
|
||||
d="m 239,342.375 0,11.21875 c -5.57308,1.24469 -10.80508,3.40589 -15.5,6.34375 l -8.34375,-8.34375 -15.5625,15.5625 8.28125,8.28125 c -3.25948,5.08895 -5.62899,10.81069 -6.875,16.9375 l -11,0 0,22 11.46875,0 c 1.38373,5.61408 3.71348,10.8741 6.8125,15.5625 l -8.15625,8.1875 15.5625,15.53125 8.46875,-8.46875 c 4.526,2.73972 9.527,4.77468 14.84375,5.96875 l 0,11.21875 14.59375,0 c -4.57581,-6.7196 -7.25,-14.81979 -7.25,-23.5625 0,-5.85191 1.21031,-11.43988 3.375,-16.5 -10.88114,-0.15024 -19.65625,-9.02067 -19.65625,-19.9375 0,-10.66647 8.37245,-19.40354 18.90625,-19.9375 0.3398,-0.0172 0.68717,0 1.03125,0 10.5808,0 19.2466,8.24179 19.90625,18.65625 5.54962,-2.70912 11.78365,-4.25 18.375,-4.25 7.94803,0 15.06896,2.72769 21.71875,6.0625 l 0,-10.53125 -11.03125,0 c -1.13608,-5.58713 -3.20107,-10.85298 -6.03125,-15.59375 l 8.1875,-8.21875 -15.5625,-15.53125 -7.78125,7.78125 C 272.7607,357.45113 267.0827,354.99261 261,353.625 l 0,-11.25 z m 11,46 c -7.73198,0 -14,6.26802 -14,14 0,7.732 6.26802,14 14,14 1.05628,0 2.07311,-0.12204 3.0625,-0.34375 2.84163,-4.38574 6.48859,-8.19762 10.71875,-11.25 C 263.91776,403.99646 264,403.19884 264,402.375 c 0,-7.73198 -6.26801,-14 -14,-14 z m -87.46875,13.25 -11.78125,4.78125 2.4375,6 c -2.7134,1.87299 -5.02951,4.16091 -6.90625,6.75 L 140,416.5 l -4.96875,11.6875 6.21875,2.65625 c -0.64264,3.42961 -0.65982,6.98214 0,10.53125 l -5.875,2.40625 4.75,11.78125 6.15625,-2.5 c 1.95629,2.70525 4.32606,5.00539 7,6.84375 l -2.59375,6.15625 11.6875,4.9375 2.71875,-6.34375 c 3.01575,0.48636 6.11446,0.48088 9.21875,-0.0312 l 2.4375,6 11.78125,-4.75 -2.4375,-6.03125 c 2.70845,-1.87526 5.03044,-4.16169 6.90625,-6.75 l 6.21875,2.625 4.96875,-11.6875 -6.15625,-2.625 c 0.56936,-3.04746 0.64105,-6.22008 0.1875,-9.375 l 6.125,-2.46875 -4.75,-11.78125 -5.90625,2.40625 c -1.8179,-2.74443 -4.05238,-5.13791 -6.59375,-7.0625 L 189.6875,406.9688 178,402.0313 l -2.5,5.84375 c -3.41506,-0.712 -6.97941,-0.8039 -10.53125,-0.21875 z m 165.28125,7.125 -7.09375,19.125 -9.59375,23 -1.875,-42.0625 -14.1875,0 -18.1875,42.0625 -1.78125,-42.0625 -13.8125,0 2.5,57.875 17.28125,0 18.71875,-43.96875 1.9375,43.96875 16.90625,0 0.0312,-0.0625 2.71875,0 1.78125,-5.0625 7.90625,-22.90625 0.0312,0 1.59375,-4.65625 4.46875,-10.40625 7.46875,21.75 -11.125,0 -3.71875,10.75 18.625,0 3.625,10.53125 15,0 -21.4375,-57.875 z m -158,15.875 c 4.48547,0.0706 8.71186,2.76756 10.5,7.1875 2.38422,5.89328 -0.45047,12.61577 -6.34375,15 -5.89327,2.38421 -12.61578,-0.48172 -15,-6.375 -2.3097,-5.70909 0.29002,-12.18323 5.8125,-14.75 0.17811,-0.0828 0.34709,-0.14426 0.53125,-0.21875 1.47332,-0.59605 3.00484,-0.86727 4.5,-0.84375 z m -0.1875,3.40625 c -0.2136,5.4e-4 -0.44162,0.0134 -0.65625,0.0312 -0.79249,0.0658 -1.56779,0.24857 -2.34375,0.5625 -4.13846,1.67427 -6.14302,6.3928 -4.46875,10.53125 1.67428,4.13847 6.3928,6.14301 10.53125,4.46875 4.13847,-1.67428 6.11177,-6.3928 4.4375,-10.53125 -1.27532,-3.15234 -4.29605,-5.07059 -7.5,-5.0625 z"
|
||||
id="rect4081-3-8"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccccccccccccccscscscsccccccccccsssccsscccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccssssccssssssss" />
|
||||
<g
|
||||
id="g3117"
|
||||
transform="translate(-244.99999,-214.64287)">
|
||||
<g
|
||||
transform="translate(83.928571,134.28571)"
|
||||
id="text4037-4-7"
|
||||
style="font-size:79.3801651px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:DejaVu Sans;-inkscape-font-specification:DejaVu Sans Bold" />
|
||||
<g
|
||||
transform="translate(83.928571,134.28571)"
|
||||
id="text4041-5-8"
|
||||
style="font-size:79.3801651px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:DejaVu Sans;-inkscape-font-specification:DejaVu Sans" />
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 5.7 KiB |
@ -23,6 +23,23 @@ iterating over all WA output directories found.
|
||||
:param path: must be the path to the top-level output directory (the one
|
||||
containing ``__meta`` subdirectory and ``run.log``).
|
||||
|
||||
WA output stored in a Postgres database by the ``Postgres`` output processor
|
||||
can be accessed via a :class:`RunDatabaseOutput` which can be initialized as follows:
|
||||
|
||||
.. class:: RunDatabaseOutput(password, host='localhost', user='postgres', port='5432', dbname='wa', run_uuid=None, list_runs=False)
|
||||
|
||||
The main interface into Postgres database containing WA results.
|
||||
|
||||
:param password: The password used to authenticate with
|
||||
:param host: The database host address. Defaults to ``'localhost'``
|
||||
:param user: The user name used to authenticate with. Defaults to ``'postgres'``
|
||||
:param port: The database connection port number. Defaults to ``'5432'``
|
||||
:param dbname: The database name. Defaults to ``'wa'``
|
||||
:param run_uuid: The ``run_uuid`` to identify the selected run
|
||||
:param list_runs: Will connect to the database and will print out the available runs
|
||||
with their corresponding run_uuids. Defaults to ``False``
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
@ -39,6 +56,32 @@ called ``wa_output`` in the current working directory we can initialize a
|
||||
...: output_directory = 'wa_output'
|
||||
...: run_output = RunOutput(output_directory)
|
||||
|
||||
Alternatively if the results have been stored in a Postgres database we can
|
||||
initialize a ``RunDatabaseOutput`` as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
In [1]: from wa import RunDatabaseOutput
|
||||
...:
|
||||
...: db_settings = {
|
||||
...: host: 'localhost',
|
||||
...: port: '5432',
|
||||
...: dbname: 'wa'
|
||||
...: user: 'postgres',
|
||||
...: password: 'wa'
|
||||
...: }
|
||||
...:
|
||||
...: RunDatabaseOutput(list_runs=True, **db_settings)
|
||||
Available runs are:
|
||||
========= ============ ============= =================== =================== ====================================
|
||||
Run Name Project Project Stage Start Time End Time run_uuid
|
||||
========= ============ ============= =================== =================== ====================================
|
||||
Test Run my_project None 2018-11-29 14:53:08 2018-11-29 14:53:24 aa3077eb-241a-41d3-9610-245fd4e552a9
|
||||
run_1 my_project None 2018-11-29 14:53:34 2018-11-29 14:53:37 4c2885c9-2f4a-49a1-bbc5-b010f8d6b12a
|
||||
========= ============ ============= =================== =================== ====================================
|
||||
|
||||
In [2]: run_uuid = '4c2885c9-2f4a-49a1-bbc5-b010f8d6b12a'
|
||||
...: run_output = RunDatabaseOutput(run_uuid=run_uuid, **db_settings)
|
||||
|
||||
|
||||
From here we can retrieve various information about the run. For example if we
|
||||
@ -65,7 +108,7 @@ parameters and the metrics recorded from the first job was we can do the followi
|
||||
Out[5]: u'dhrystone'
|
||||
|
||||
# Print out all the runtime parameters and their values for this job
|
||||
In [6]: for k, v in job_1.spec.runtime_parameters.iteritems():
|
||||
In [6]: for k, v in job_1.spec.runtime_parameters.items():
|
||||
...: print (k, v)
|
||||
(u'airplane_mode': False)
|
||||
(u'brightness': 100)
|
||||
@ -73,7 +116,7 @@ parameters and the metrics recorded from the first job was we can do the followi
|
||||
(u'big_frequency': 1700000)
|
||||
(u'little_frequency': 1400000)
|
||||
|
||||
# Print out all the metrics avalible for this job
|
||||
# Print out all the metrics available for this job
|
||||
In [7]: job_1.metrics
|
||||
Out[7]:
|
||||
[<thread 0 score: 14423105 (+)>,
|
||||
@ -92,6 +135,15 @@ parameters and the metrics recorded from the first job was we can do the followi
|
||||
<total DMIPS: 52793 (+)>,
|
||||
<total score: 92758402 (+)>]
|
||||
|
||||
# Load the run results csv file into pandas
|
||||
In [7]: pd.read_csv(run_output.get_artifact_path('run_result_csv'))
|
||||
Out[7]:
|
||||
id workload iteration metric value units
|
||||
0 450000-wk1 dhrystone 1 thread 0 score 1.442310e+07 NaN
|
||||
1 450000-wk1 dhrystone 1 thread 0 DMIPS 8.209700e+04 NaN
|
||||
2 450000-wk1 dhrystone 1 thread 1 score 1.442310e+07 NaN
|
||||
3 450000-wk1 dhrystone 1 thread 1 DMIPS 8.720900e+04 NaN
|
||||
...
|
||||
|
||||
|
||||
We can also retrieve information about the target that the run was performed on
|
||||
@ -214,7 +266,7 @@ methods
|
||||
Return the :class:`Metric` associated with the run (not the individual jobs)
|
||||
with the specified `name`.
|
||||
|
||||
:return: The :class`Metric` object for the metric with the specified name.
|
||||
:return: The :class:`Metric` object for the metric with the specified name.
|
||||
|
||||
|
||||
.. method:: RunOutput.get_job_spec(spec_id)
|
||||
@ -232,6 +284,56 @@ methods
|
||||
:return: A list of `str` labels of workloads that were part of this run.
|
||||
|
||||
|
||||
.. method:: RunOutput.add_classifier(name, value, overwrite=False)
|
||||
|
||||
Add a classifier to the run as a whole. If a classifier with the specified
|
||||
``name`` already exists, a``ValueError`` will be raised, unless
|
||||
`overwrite=True` is specified.
|
||||
|
||||
|
||||
:class:`RunDatabaseOutput`
|
||||
---------------------------
|
||||
|
||||
:class:`RunDatabaseOutput` provides access to the output of a WA :term:`run`,
|
||||
including metrics,artifacts, metadata, and configuration stored in a postgres database.
|
||||
The majority of attributes and methods are the same :class:`RunOutput` however the
|
||||
noticeable differences are:
|
||||
|
||||
``jobs``
|
||||
A list of :class:`JobDatabaseOutput` objects for each job that was executed
|
||||
during the run.
|
||||
|
||||
``basepath``
|
||||
A representation of the current database and host information backing this object.
|
||||
|
||||
methods
|
||||
~~~~~~~
|
||||
|
||||
.. method:: RunDatabaseOutput.get_artifact(name)
|
||||
|
||||
Return the :class:`Artifact` specified by ``name``. This will only look
|
||||
at the run artifacts; this will not search the artifacts of the individual
|
||||
jobs. The `path` attribute of the :class:`Artifact` will be set to the Database OID of the object.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:return: The :class:`Artifact` with that name
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
|
||||
.. method:: RunDatabaseOutput.get_artifact_path(name)
|
||||
|
||||
If the artifcat is a file this method returns a `StringIO` object containing
|
||||
the contents of the artifact specified by ``name``. If the aritifcat is a
|
||||
directory, the method returns a path to a locally extracted version of the
|
||||
directory which is left to the user to remove after use. This will only look
|
||||
at the run artifacts; this will not search the artifacts of the individual
|
||||
jobs.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:return: A `StringIO` object with the contents of the artifact
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
|
||||
:class:`JobOutput`
|
||||
------------------
|
||||
|
||||
@ -307,16 +409,15 @@ artifacts, metadata, and configuration. It has the following attributes:
|
||||
methods
|
||||
~~~~~~~
|
||||
|
||||
.. method:: RunOutput.get_artifact(name)
|
||||
.. method:: JobOutput.get_artifact(name)
|
||||
|
||||
Return the :class:`Artifact` specified by ``name`` associated with this job.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:param name: The name of the artifact to retrieve.
|
||||
:return: The :class:`Artifact` with that name
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
|
||||
.. method:: RunOutput.get_artifact_path(name)
|
||||
.. method:: JobOutput.get_artifact_path(name)
|
||||
|
||||
Return the path to the file backing the artifact specified by ``name``,
|
||||
associated with this job.
|
||||
@ -325,13 +426,58 @@ methods
|
||||
:return: The path to the artifact
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
|
||||
.. method:: RunOutput.get_metric(name)
|
||||
.. method:: JobOutput.get_metric(name)
|
||||
|
||||
Return the :class:`Metric` associated with this job with the specified
|
||||
`name`.
|
||||
|
||||
:return: The :class`Metric` object for the metric with the specified name.
|
||||
:return: The :class:`Metric` object for the metric with the specified name.
|
||||
|
||||
.. method:: JobOutput.add_classifier(name, value, overwrite=False)
|
||||
|
||||
Add a classifier to the job. The classifier will be propagated to all
|
||||
existing artifacts and metrics, as well as those added afterwards. If a
|
||||
classifier with the specified ``name`` already exists, a ``ValueError`` will
|
||||
be raised, unless `overwrite=True` is specified.
|
||||
|
||||
|
||||
:class:`JobDatabaseOutput`
|
||||
---------------------------
|
||||
|
||||
:class:`JobOutput` provides access to the output of a single :term:`job`
|
||||
executed during a WA :term:`run`, including metrics, artifacts, metadata, and
|
||||
configuration stored in a postgres database.
|
||||
The majority of attributes and methods are the same :class:`JobOutput` however the
|
||||
noticeable differences are:
|
||||
|
||||
``basepath``
|
||||
A representation of the current database and host information backing this object.
|
||||
|
||||
|
||||
methods
|
||||
~~~~~~~
|
||||
|
||||
.. method:: JobDatabaseOutput.get_artifact(name)
|
||||
|
||||
Return the :class:`Artifact` specified by ``name`` associated with this job.
|
||||
The `path` attribute of the :class:`Artifact` will be set to the Database
|
||||
OID of the object.
|
||||
|
||||
:param name: The name of the artifact to retrieve.
|
||||
:return: The :class:`Artifact` with that name
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
.. method:: JobDatabaseOutput.get_artifact_path(name)
|
||||
|
||||
If the artifcat is a file this method returns a `StringIO` object containing
|
||||
the contents of the artifact specified by ``name`` associated with this job.
|
||||
If the aritifcat is a directory, the method returns a path to a locally
|
||||
extracted version of the directory which is left to the user to remove after
|
||||
use.
|
||||
|
||||
:param name: The name of the artifact who's path to retrieve.
|
||||
:return: A `StringIO` object with the contents of the artifact
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
|
||||
:class:`Metric`
|
||||
@ -371,6 +517,11 @@ A :class:`Metric` has the following attributes:
|
||||
or they may have been added by the workload to help distinguish between
|
||||
otherwise identical metrics.
|
||||
|
||||
``label``
|
||||
This is a string constructed from the name and classifiers, to provide a
|
||||
more unique identifier, e.g. for grouping values across iterations. The
|
||||
format is in the form ``name/cassifier1=value1/classifier2=value2/...``.
|
||||
|
||||
|
||||
:class:`Artifact`
|
||||
-----------------
|
||||
@ -420,7 +571,7 @@ An :class:`Artifact` has the following attributes:
|
||||
it is the opposite of ``export``, but in general may also be
|
||||
discarded.
|
||||
|
||||
.. note:: whether a file is marked as ``log``/``data`` or ``raw``
|
||||
.. note:: Whether a file is marked as ``log``/``data`` or ``raw``
|
||||
depends on how important it is to preserve this file,
|
||||
e.g. when archiving, vs how much space it takes up.
|
||||
Unlike ``export`` artifacts which are (almost) always
|
||||
@ -471,6 +622,12 @@ The available attributes of the class are as follows:
|
||||
The name of the target class that was uised ot interact with the device
|
||||
during the run E.g. ``"AndroidTarget"``, ``"LinuxTarget"`` etc.
|
||||
|
||||
``modules``
|
||||
A list of names of modules that have been loaded by the target. Modules
|
||||
provide additional functionality, such as access to ``cpufreq`` and which
|
||||
modules are installed may impact how much of the ``TargetInfo`` has been
|
||||
populated.
|
||||
|
||||
``cpus``
|
||||
A list of :class:`CpuInfo` objects describing the capabilities of each CPU.
|
||||
|
||||
|
@ -178,6 +178,16 @@ methods.
|
||||
locations) and device will be searched for an application with a matching
|
||||
package name.
|
||||
|
||||
``supported_versions``
|
||||
This attribute should be a list of apk versions that are suitable for this
|
||||
workload, if a specific apk version is not specified then any available
|
||||
supported version may be chosen.
|
||||
|
||||
``activity``
|
||||
This attribute can be optionally set to override the default activity that
|
||||
will be extracted from the selected APK file which will be used when
|
||||
launching the APK.
|
||||
|
||||
``view``
|
||||
This is the "view" associated with the application. This is used by
|
||||
instruments like ``fps`` to monitor the current framerate being generated by
|
||||
|
@ -2,9 +2,427 @@
|
||||
What's New in Workload Automation
|
||||
=================================
|
||||
|
||||
-------------
|
||||
***********
|
||||
Version 3.3.1
|
||||
***********
|
||||
|
||||
.. warning:: This is the last release supporting Python 3.5 and Python 3.6.
|
||||
Subsequent releases will support Python 3.7+.
|
||||
|
||||
New Features:
|
||||
==============
|
||||
|
||||
Commands:
|
||||
---------
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- ``perf``: Add support for ``report-sample``.
|
||||
|
||||
Workloads:
|
||||
----------------
|
||||
- ``PCMark``: Add support for PCMark 3.0.
|
||||
- ``Antutu``: Add support for 9.1.6.
|
||||
- ``Geekbench``: Add support for Geekbench5.
|
||||
- ``gfxbench``: Support the non corporate version.
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Fix installation on systems without git installed.
|
||||
- Avoid querying online cpus if hotplug is disabled.
|
||||
|
||||
Dockerfile:
|
||||
-----------
|
||||
- Update base image to Ubuntu 20.04.
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- ``perf``: Fix parsing csv with using interval-only-values.
|
||||
- ``perf``: Improve error reporting of an invalid agenda.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``postgres``: Fixed SQL command when creating a new event.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``speedometer``: Fix adb reverse when rebooting a device.
|
||||
- ``googleplaybook``: Support newer apk version.
|
||||
- ``googlephotos``: Support newer apk version.
|
||||
- ``gmail``: Support newer apk version.
|
||||
|
||||
Other:
|
||||
------
|
||||
- Upgrade Android Gradle to 7.2 and Gradle plugin to 4.2.
|
||||
|
||||
***********
|
||||
Version 3.3
|
||||
***********
|
||||
|
||||
New Features:
|
||||
==============
|
||||
|
||||
Commands:
|
||||
---------
|
||||
- Add ``report`` command to provide a summary of a run.
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- Add ``proc_stat`` instrument to monitor CPU load using data from ``/proc/stat``.
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Add support for simulating atomic writes to prevent race conditions when running current instances of WA.
|
||||
- Add support file transfer for SSH connections via SFTP and falling back to using SCP implementation.
|
||||
- Support detection of logcat buffer overflow and present a warning if this occurs.
|
||||
- Allow skipping all remaining jobs if a job had exhausted all of its retires.
|
||||
- Add polling mechanism for file transfers rather than relying on timeouts.
|
||||
- Add `run_completed` reboot policy to enable rebooting a target after a run has been completed.
|
||||
|
||||
|
||||
Android Devices:
|
||||
----------------
|
||||
- Enable configuration of whether to keep the screen on while the device is plugged in.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- Enable the use of cascading deletion in Postgres databases to clean up after deletion of a run entry.
|
||||
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Improvements to the ``process`` command to correctly handle skipped and in process jobs.
|
||||
- Add support for deprecated parameters allowing for a warning to be raised when providing
|
||||
a parameter that will no longer have an effect.
|
||||
- Switch implementation of SSH connections to use Paramiko for greater stability.
|
||||
- By default use sftp for file transfers with SSH connections, allow falling back to scp
|
||||
by setting ``use_scp``.
|
||||
- Fix callbacks not being disconnected correctly when requested.
|
||||
- ``ApkInfo`` objects are now cached to reduce re-parsing of APK files.
|
||||
- Speed up discovery of wa output directories.
|
||||
- Fix merge handling of parameters from multiple files.
|
||||
|
||||
Dockerfile:
|
||||
-----------
|
||||
- Install additional instruments for use in the docker environment.
|
||||
- Fix environment variables not being defined in non interactive environments.
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- ``trace_cmd`` additional fixes for python 3 support.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``postgres``: Fixed SQL command when creating a new event.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``aitutu``: Improve reliability of results extraction.
|
||||
- ``androbench``: Enabling dismissing of additional popups on some devices.
|
||||
- ``antutu``: Now supports major version 8 in additional to version 7.X.
|
||||
- ``exoplayer``: Add support for Android 10.
|
||||
- ``googlephotos``: Support newer apk version.
|
||||
- ``gfxbench``: Allow user configuration for which tests should be ran.
|
||||
- ``gfxbench``: Improved score detection for a wider range of devices.
|
||||
- ``gfxbench``: Moved results extraction out of run stage.
|
||||
- ``jankbench``: Support newer versions of Pandas for processing.
|
||||
- ``pcmark``: Add support for handling additional popups and installation flows.
|
||||
- ``pcmark``: No longer clear and re-download test data before each execution.
|
||||
- ``speedometer``: Enable the workload to run offline and drops requirement for
|
||||
UiAutomator. To support this root access is now required to run the workload.
|
||||
- ``youtube``: Update to support later versions of the apk.
|
||||
|
||||
Other:
|
||||
------
|
||||
- ``cpustates``: Improved name handling for unknown idle states.
|
||||
|
||||
|
||||
***********
|
||||
Version 3.2
|
||||
***********
|
||||
|
||||
.. warning:: This release only supports Python 3.5+. Python 2 support has now
|
||||
been dropped.
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- ``TargetInfo`` now tracks installed modules and will ensure the cache is
|
||||
also updated on module change.
|
||||
- Migrated the build scripts for uiauto based workloads to Python 3.
|
||||
- Uiauto applications now target SDK version 28 to prevent PlayProtect
|
||||
blocking the installation of the automation apks on some devices.
|
||||
- The workload metadata now includes the apk package name if applicable.
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- ``energy_instruments`` will now have their ``teardown`` method called
|
||||
correctly.
|
||||
- ``energy_instruments``: Added a ``keep_raw`` parameter to control whether
|
||||
raw files generated during execution should be deleted upon teardown.
|
||||
- Update relevant instruments to make use of the new devlib collector
|
||||
interface, for more information please see the
|
||||
`devlib documentation <https://devlib.readthedocs.io/en/latest/collectors.html>`_.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``postgres``: If initialisation fails then the output processor will no
|
||||
longer attempt to reconnect at a later point during the run.
|
||||
- ``postgres``: Will now ensure that the connection to the database is
|
||||
re-established if it is dropped e.g. due to a long expecting workload.
|
||||
- ``postgres``: Change the type of the ``hostid`` field to ``Bigint`` to
|
||||
allow a larger range of ids.
|
||||
- ``postgres``: Bump schema version to 1.5.
|
||||
- ``perf``: Added support for the ``simpleperf`` profiling tool for android
|
||||
devices.
|
||||
- ``perf``: Added support for the perf ``record`` command.
|
||||
- ``cpustates``: Improve handling of situations where cpufreq and/or cpuinfo
|
||||
data is unavailable.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``adodereader``: Now support apk version 19.7.1.10709.
|
||||
- ``antutu``: Supports dismissing of popup asking to create a shortcut on
|
||||
the homescreen.
|
||||
- ``gmail``: Now supports apk version 2019.05.26.252424914.
|
||||
- ``googlemaps``: Now supports apk version 10.19.1.
|
||||
- ``googlephotos``: Now supports apk version 4.28.0.
|
||||
- ``geekbench``: Added support for versions 4.3.4, 4.4.0 and 4.4.2.
|
||||
- ``geekbench-corporate``: Added support for versions 5.0.1 and 5.0.3.
|
||||
- ``pcmark``: Now locks device orientation to portrait to increase
|
||||
compatibility.
|
||||
- ``pcmark``: Supports dismissing new Android 10 permission warnings.
|
||||
|
||||
Other:
|
||||
------
|
||||
- Improve documentation to help debugging module installation errors.
|
||||
|
||||
*************
|
||||
Version 3.1.4
|
||||
*************
|
||||
|
||||
.. warning:: This is the last release that supports Python 2. Subsequent versions
|
||||
will be support Python 3.5+ only.
|
||||
|
||||
New Features:
|
||||
==============
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- ``ApkWorkload``: Allow specifying A maximum and minimum version of an APK
|
||||
instead of requiring a specific version.
|
||||
- ``TestPackageHandler``: Added to support running android applications that
|
||||
are invoked via ``am instrument``.
|
||||
- Directories can now be added as ``Artifacts``.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``aitutu``: Executes the Aitutu Image Speed/Accuracy and Object
|
||||
Speed/Accuracy tests.
|
||||
- ``uibench``: Run a configurable activity of the UIBench workload suite.
|
||||
- ``uibenchjanktests``: Run an automated and instrument version of the
|
||||
UIBench JankTests.
|
||||
- ``motionmark``: Run a browser graphical benchmark.
|
||||
|
||||
Other:
|
||||
------
|
||||
- Added ``requirements.txt`` as a reference for known working package versions.
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- ``JobOuput``: Added an ``augmentation`` attribute to allow listing of
|
||||
enabled augmentations for individual jobs.
|
||||
- Better error handling for misconfiguration job selection.
|
||||
- All ``Workload`` classes now have an ``uninstall`` parameter to control whether
|
||||
any binaries installed to the target should be uninstalled again once the
|
||||
run has completed.
|
||||
- The ``cleanup_assets`` parameter is now more consistently utilized across
|
||||
workloads.
|
||||
- ``ApkWorkload``: Added an ``activity`` attribute to allow for overriding the
|
||||
automatically detected version from the APK.
|
||||
- ``ApkWorkload`` Added support for providing an implicit activity path.
|
||||
- Fixed retrieving job level artifacts from a database backend.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``SysfsExtractor``: Ensure that the extracted directories are added as
|
||||
``Artifacts``.
|
||||
- ``InterruptStatsInstrument``: Ensure that the output files are added as
|
||||
``Artifacts``.
|
||||
- ``Postgres``: Fix missing ``system_id`` field from ``TargetInfo``.
|
||||
- ``Postgres``: Support uploading directory ``Artifacts``.
|
||||
- ``Postgres``: Bump the schema version to v1.3.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``geekbench``: Improved apk version handling.
|
||||
- ``geekbench``: Now supports apk version 4.3.2.
|
||||
|
||||
Other:
|
||||
------
|
||||
- ``Dockerfile``: Now installs all optional extras for use with WA.
|
||||
- Fixed support for YAML anchors.
|
||||
- Fixed building of documentation with Python 3.
|
||||
- Changed shorthand of installing all of WA extras to `all` as per
|
||||
the documentation.
|
||||
- Upgraded the Dockerfile to use Ubuntu 18.10 and Python 3.
|
||||
- Restricted maximum versions of ``numpy`` and ``pandas`` for Python 2.7.
|
||||
|
||||
|
||||
*************
|
||||
Version 3.1.3
|
||||
*************
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Other:
|
||||
------
|
||||
- Security update for PyYAML to attempt prevention of arbitrary code execution
|
||||
during parsing.
|
||||
|
||||
*************
|
||||
Version 3.1.2
|
||||
*************
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Implement an explicit check for Devlib versions to ensure that versions
|
||||
are kept in sync with each other.
|
||||
- Added a ``View`` parameter to ApkWorkloads for use with certain instruments
|
||||
for example ``fps``.
|
||||
- Added ``"supported_versions"`` attribute to workloads to allow specifying a
|
||||
list of supported version for a particular workload.
|
||||
- Change default behaviour to run any available version of a workload if a
|
||||
specific version is not specified.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``Postgres``: Fix handling of ``screen_resoultion`` during processing.
|
||||
|
||||
Other
|
||||
-----
|
||||
- Added additional information to documentation
|
||||
- Added fix for Devlib's ``KernelConfig`` refactor
|
||||
- Added a ``"label"`` property to ``Metrics``
|
||||
|
||||
*************
|
||||
Version 3.1.1
|
||||
*************
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Other
|
||||
-----
|
||||
- Improve formatting when displaying metrics
|
||||
- Update revent binaries to include latest fixes
|
||||
- Update DockerImage to use new released version of WA and Devlib
|
||||
- Fix broken package on PyPi
|
||||
|
||||
*************
|
||||
Version 3.1.0
|
||||
*************
|
||||
|
||||
New Features:
|
||||
==============
|
||||
|
||||
Commands
|
||||
---------
|
||||
- ``create database``: Added :ref:`create subcommand <create-command>`
|
||||
command in order to initialize a PostgresSQL database to allow for storing
|
||||
WA output with the Postgres Output Processor.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``Postgres``: Added output processor which can be used to populate a
|
||||
Postgres database with the output generated from a WA run.
|
||||
- ``logcat-regex``: Add new output processor to extract arbitrary "key"
|
||||
"value" pairs from logcat.
|
||||
|
||||
Configuration:
|
||||
--------------
|
||||
- :ref:`Configuration Includes <config-include>`: Add support for including
|
||||
other YAML files inside agendas and config files using ``"include#:"``
|
||||
entries.
|
||||
- :ref:`Section groups <section-groups>`: This allows for a ``group`` entry
|
||||
to be specified for each section and will automatically cross product the
|
||||
relevant sections with sections from other groups adding the relevant
|
||||
classifiers.
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Added support for using the :ref:`OutputAPI <output_processing_api>` with a
|
||||
Postgres Database backend. Used to retrieve and
|
||||
:ref:`process <processing_output>` run data uploaded by the ``Postgres``
|
||||
output processor.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``gfxbench-corporate``: Execute a set of on and offscreen graphical benchmarks from
|
||||
GFXBench including Car Chase and Manhattan.
|
||||
- ``glbench``: Measures the graphics performance of Android devices by
|
||||
testing the underlying OpenGL (ES) implementation.
|
||||
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Remove quotes from ``sudo_cmd`` parameter default value due to changes in
|
||||
devlib.
|
||||
- Various Python 3 related fixes.
|
||||
- Ensure plugin names are converted to identifiers internally to act more
|
||||
consistently when dealing with names containing ``-``'s etc.
|
||||
- Now correctly updates RunInfo with project and run name information.
|
||||
- Add versioning support for POD structures with the ability to
|
||||
automatically update data structures / formats to new versions.
|
||||
|
||||
Commands:
|
||||
---------
|
||||
- Fix revent target initialization.
|
||||
- Fix revent argument validation.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``Speedometer``: Close open tabs upon workload completion.
|
||||
- ``jankbench``: Ensure that the logcat monitor thread is terminated
|
||||
correctly to prevent left over adb processes.
|
||||
- UiAutomator workloads are now able to dismiss android warning that a
|
||||
workload has not been designed for the latest version of android.
|
||||
|
||||
Other:
|
||||
------
|
||||
- Report additional metadata about target, including: system_id,
|
||||
page_size_kb.
|
||||
- Uses cache directory to reduce target calls, e.g. will now use cached
|
||||
version of TargetInfo if local copy is found.
|
||||
- Update recommended :ref:`installation <github>` commands when installing from
|
||||
github due to pip not following dependency links correctly.
|
||||
- Fix incorrect parameter names in runtime parameter documentation.
|
||||
|
||||
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
*************
|
||||
Version 3.0.0
|
||||
-------------
|
||||
*************
|
||||
|
||||
WA3 is a more or less from-scratch re-write of WA2. We have attempted to
|
||||
maintain configuration-level compatibility wherever possible (so WA2 agendas
|
||||
@ -29,7 +447,7 @@ believe to be no longer useful.
|
||||
do the port yourselves :-) ).
|
||||
|
||||
New Features
|
||||
~~~~~~~~~~~~
|
||||
============
|
||||
|
||||
- Python 3 support. WA now runs on both Python 2 and Python 3.
|
||||
|
||||
@ -75,7 +493,7 @@ New Features
|
||||
.. _devlib: https://github.com/ARM-software/devlib
|
||||
|
||||
Changes
|
||||
~~~~~~~
|
||||
=======
|
||||
|
||||
- Configuration files ``config.py`` are now specified in YAML format in
|
||||
``config.yaml``. WA3 has support for automatic conversion of the default
|
||||
|
@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 ARM Limited
|
||||
# Copyright 2023 ARM Limited
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
@ -68,7 +68,7 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'wa'
|
||||
copyright = u'2018, ARM Limited'
|
||||
copyright = u'2023, ARM Limited'
|
||||
author = u'ARM Limited'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
@ -135,7 +135,9 @@ html_theme = 'sphinx_rtd_theme'
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
html_theme_options = {
|
||||
'logo_only': True
|
||||
}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
@ -149,7 +151,7 @@ html_theme = 'sphinx_rtd_theme'
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
html_logo = 'WA-logo-white.svg'
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
|
@ -343,7 +343,7 @@ see the
|
||||
|
||||
A list of additional :class:`Parameters` the output processor can take.
|
||||
|
||||
:initialize():
|
||||
:initialize(context):
|
||||
|
||||
This method will only be called once during the workload run
|
||||
therefore operations that only need to be performed initially should
|
||||
@ -373,7 +373,7 @@ see the
|
||||
existing data collected/generated for the run as a whole. E.g.
|
||||
uploading them to a database etc.
|
||||
|
||||
:finalize():
|
||||
:finalize(context):
|
||||
|
||||
This method is the complement to the initialize method and will also
|
||||
only be called once.
|
||||
|
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 74 KiB |
@ -47,6 +47,10 @@ submitting a pull request:
|
||||
- If significant additions have been made to the framework, unit
|
||||
tests should be added to cover the new functionality.
|
||||
|
||||
- If modifications have been made to the UI Automation source of a workload, the
|
||||
corresponding APK should be rebuilt and submitted as part of the same pull
|
||||
request. This can be done via the ``build.sh`` script in the relevant
|
||||
``uiauto`` subdirectory.
|
||||
- If modifications have been made to documentation (this includes description
|
||||
attributes for Parameters and Extensions), documentation should be built to
|
||||
make sure no errors or warning during build process, and a visual inspection
|
||||
|
@ -37,8 +37,8 @@ This section contains reference information common to plugins of all types.
|
||||
The Context
|
||||
~~~~~~~~~~~
|
||||
|
||||
.. note:: For clarification on the meaning of "workload specification" ("spec"), "job"
|
||||
and "workload" and the distiction between them, please see the :ref:`glossary <glossary>`.
|
||||
.. note:: For clarification on the meaning of "workload specification" "spec", "job"
|
||||
and "workload" and the distinction between them, please see the :ref:`glossary <glossary>`.
|
||||
|
||||
The majority of methods in plugins accept a context argument. This is an
|
||||
instance of :class:`wa.framework.execution.ExecutionContext`. It contains
|
||||
@ -119,7 +119,7 @@ context.output_directory
|
||||
This is the output directory for the current iteration. This will an
|
||||
iteration-specific subdirectory under the main results location. If
|
||||
there is no current iteration (e.g. when processing overall run results)
|
||||
this will point to the same location as ``root_output_directory``.
|
||||
this will point to the same location as ``run_output_directory``.
|
||||
|
||||
|
||||
Additionally, the global ``wa.settings`` object exposes on other location:
|
||||
@ -158,7 +158,7 @@ irrespective of the host's path notation. For example:
|
||||
.. note:: Output processors, unlike workloads and instruments, do not have their
|
||||
own target attribute as they are designed to be able to be run offline.
|
||||
|
||||
.. _plugin-parmeters:
|
||||
.. _plugin-parameters:
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~~
|
||||
|
@ -5,10 +5,12 @@ Convention for Naming revent Files for Revent Workloads
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
There is a convention for naming revent files which you should follow if you
|
||||
want to record your own revent files. Each revent file must start with the
|
||||
device name(case sensitive) then followed by a dot '.' then the stage name
|
||||
then '.revent'. All your custom revent files should reside at
|
||||
``'~/.workload_automation/dependencies/WORKLOAD NAME/'``. These are the current
|
||||
want to record your own revent files. Each revent file must be called (case sensitive)
|
||||
``<device name>.<stage>.revent``,
|
||||
where ``<device name>`` is the name of your device (as defined by the model
|
||||
name of your device which can be retrieved with
|
||||
``adb shell getprop ro.product.model`` or by the ``name`` attribute of your
|
||||
customized device class), and ``<stage>`` is one of the following currently
|
||||
supported stages:
|
||||
|
||||
:setup: This stage is where the application is loaded (if present). It is
|
||||
@ -26,10 +28,12 @@ Only the run stage is mandatory, the remaining stages will be replayed if a
|
||||
recording is present otherwise no actions will be performed for that particular
|
||||
stage.
|
||||
|
||||
For instance, to add a custom revent files for a device named "mydevice" and
|
||||
a workload name "myworkload", you need to add the revent files to the directory
|
||||
``/home/$WA_USER_HOME/dependencies/myworkload/revent_files`` creating it if
|
||||
necessary. ::
|
||||
All your custom revent files should reside at
|
||||
``'$WA_USER_DIRECTORY/dependencies/WORKLOAD NAME/'``. So
|
||||
typically to add a custom revent files for a device named "mydevice" and a
|
||||
workload name "myworkload", you would need to add the revent files to the
|
||||
directory ``~/.workload_automation/dependencies/myworkload/revent_files``
|
||||
creating the directory structure if necessary. ::
|
||||
|
||||
mydevice.setup.revent
|
||||
mydevice.run.revent
|
||||
@ -332,6 +336,6 @@ recordings in scripts. Here is an example:
|
||||
from wa.utils.revent import ReventRecording
|
||||
|
||||
with ReventRecording('/path/to/recording.revent') as recording:
|
||||
print "Recording: {}".format(recording.filepath)
|
||||
print "There are {} input events".format(recording.num_events)
|
||||
print "Over a total of {} seconds".format(recording.duration)
|
||||
print("Recording: {}".format(recording.filepath))
|
||||
print("There are {} input events".format(recording.num_events))
|
||||
print("Over a total of {} seconds".format(recording.duration))
|
||||
|
@ -58,22 +58,28 @@ will automatically generate a workload in the your ``WA_CONFIG_DIR/plugins``. If
|
||||
you wish to specify a custom location this can be provided with ``-p
|
||||
<path>``
|
||||
|
||||
A typical invocation of the :ref:`create <create-command>` command would be in
|
||||
the form::
|
||||
|
||||
wa create workload -k <workload_kind> <workload_name>
|
||||
|
||||
|
||||
.. _adding-a-basic-workload-example:
|
||||
|
||||
Adding a Basic Workload
|
||||
-----------------------
|
||||
|
||||
To add a basic workload you can simply use the command::
|
||||
To add a ``basic`` workload template for our example workload we can simply use the
|
||||
command::
|
||||
|
||||
wa create workload basic
|
||||
wa create workload -k basic ziptest
|
||||
|
||||
This will generate a very basic workload with dummy methods for the workload
|
||||
interface and it is left to the developer to add any required functionality to
|
||||
the workload.
|
||||
This will generate a very basic workload with dummy methods for the each method in
|
||||
the workload interface and it is left to the developer to add any required functionality.
|
||||
|
||||
Not all the methods are required to be implemented, this example shows how a
|
||||
subset might be used to implement a simple workload that times how long it takes
|
||||
to compress a file of a particular size on the device.
|
||||
Not all the methods from the interface are required to be implemented, this
|
||||
example shows how a subset might be used to implement a simple workload that
|
||||
times how long it takes to compress a file of a particular size on the device.
|
||||
|
||||
|
||||
.. note:: This is intended as an example of how to implement the Workload
|
||||
@ -87,14 +93,15 @@ in this example we are implementing a very simple workload and do not
|
||||
require any additional feature so shall inherit directly from the the base
|
||||
:class:`Workload` class. We then need to provide a ``name`` for our workload
|
||||
which is what will be used to identify your workload for example in an
|
||||
agenda or via the show command.
|
||||
agenda or via the show command, if you used the `create` command this will
|
||||
already be populated for you.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import os
|
||||
from wa import Workload, Parameter
|
||||
|
||||
class ZipTestWorkload(Workload):
|
||||
class ZipTest(Workload):
|
||||
|
||||
name = 'ziptest'
|
||||
|
||||
@ -113,7 +120,7 @@ separated by a new line.
|
||||
'''
|
||||
|
||||
In order to allow for additional configuration of the workload from a user a
|
||||
list of :ref:`parameters <plugin-parmeters>` can be supplied. These can be
|
||||
list of :ref:`parameters <plugin-parameters>` can be supplied. These can be
|
||||
configured in a variety of different ways. For example here we are ensuring that
|
||||
the value of the parameter is an integer and larger than 0 using the ``kind``
|
||||
and ``constraint`` options, also if no value is provided we are providing a
|
||||
@ -176,7 +183,7 @@ allow it to decide whether to keep the file or not.
|
||||
# Pull the results file to the host
|
||||
self.host_outfile = os.path.join(context.output_directory, 'timing_results')
|
||||
self.target.pull(self.target_outfile, self.host_outfile)
|
||||
context.add_artifact('ziptest-results', host_output_file, kind='raw')
|
||||
context.add_artifact('ziptest-results', self.host_outfile, kind='raw')
|
||||
|
||||
The ``update_output`` method we can do any generation of metrics that we wish to
|
||||
for our workload. In this case we are going to simply convert the times reported
|
||||
@ -252,7 +259,7 @@ The full implementation of this workload would look something like:
|
||||
# Pull the results file to the host
|
||||
self.host_outfile = os.path.join(context.output_directory, 'timing_results')
|
||||
self.target.pull(self.target_outfile, self.host_outfile)
|
||||
context.add_artifact('ziptest-results', host_output_file, kind='raw')
|
||||
context.add_artifact('ziptest-results', self.host_outfile, kind='raw')
|
||||
|
||||
def update_output(self, context):
|
||||
super(ZipTestWorkload, self).update_output(context)
|
||||
@ -485,9 +492,10 @@ Adding an Instrument
|
||||
====================
|
||||
This is an example of how we would create a instrument which will trace device
|
||||
errors using a custom "trace" binary file. For more detailed information please see the
|
||||
:ref:`Instrument Reference <instrument-reference>`. The first thing to do is to subclass
|
||||
:class:`Instrument`, overwrite the variable name with what we want our instrument
|
||||
to be called and locate our binary for our instrument.
|
||||
:ref:`Instrument Reference <instrument-reference>`. The first thing to do is to create
|
||||
a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass
|
||||
:class:`Instrument`. Make sure to overwrite the variable name with what we want our instrument
|
||||
to be called and then locate our binary for the instrument.
|
||||
|
||||
::
|
||||
|
||||
@ -495,8 +503,8 @@ to be called and locate our binary for our instrument.
|
||||
|
||||
name = 'trace-errors'
|
||||
|
||||
def __init__(self, target):
|
||||
super(TraceErrorsInstrument, self).__init__(target)
|
||||
def __init__(self, target, **kwargs):
|
||||
super(TraceErrorsInstrument, self).__init__(target, **kwargs)
|
||||
self.binary_name = 'trace'
|
||||
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
|
||||
self.trace_on_target = None
|
||||
@ -533,21 +541,20 @@ again decorated the method. ::
|
||||
Once we have generated our result data we need to retrieve it from the device
|
||||
for further processing or adding directly to WA's output for that job. For
|
||||
example for trace data we will want to pull it to the device and add it as a
|
||||
:ref:`artifact <artifact>` to WA's :ref:`context <context>` as shown below::
|
||||
|
||||
def extract_results(self, context):
|
||||
# pull the trace file from the target
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.target.pull(self.result, context.working_directory)
|
||||
context.add_artifact('error_trace', self.result, kind='export')
|
||||
|
||||
Once we have retrieved the data we can now do any further processing and add any
|
||||
relevant :ref:`Metrics <metrics>` to the :ref:`context <context>`. For this we
|
||||
will use the the ``add_metric`` method to add the results to the final output
|
||||
for that workload. The method can be passed 4 params, which are the metric
|
||||
`key`, `value`, `unit` and `lower_is_better`. ::
|
||||
:ref:`artifact <artifact>` to WA's :ref:`context <context>`. Once we have
|
||||
retrieved the data, we can now do any further processing and add any relevant
|
||||
:ref:`Metrics <metrics>` to the :ref:`context <context>`. For this we will use
|
||||
the the ``add_metric`` method to add the results to the final output for that
|
||||
workload. The method can be passed 4 params, which are the metric `key`,
|
||||
`value`, `unit` and `lower_is_better`. ::
|
||||
|
||||
def update_output(self, context):
|
||||
# pull the trace file from the target
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.outfile = os.path.join(context.output_directory, 'trace.txt')
|
||||
self.target.pull(self.result, self.outfile)
|
||||
context.add_artifact('error_trace', self.outfile, kind='export')
|
||||
|
||||
# parse the file if needs to be parsed, or add result directly to
|
||||
# context.
|
||||
|
||||
@ -567,12 +574,14 @@ At the very end of the run we would want to uninstall the binary we deployed ear
|
||||
|
||||
So the full example would look something like::
|
||||
|
||||
from wa import Instrument
|
||||
|
||||
class TraceErrorsInstrument(Instrument):
|
||||
|
||||
name = 'trace-errors'
|
||||
|
||||
def __init__(self, target):
|
||||
super(TraceErrorsInstrument, self).__init__(target)
|
||||
def __init__(self, target, **kwargs):
|
||||
super(TraceErrorsInstrument, self).__init__(target, **kwargs)
|
||||
self.binary_name = 'trace'
|
||||
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
|
||||
self.trace_on_target = None
|
||||
@ -588,12 +597,12 @@ So the full example would look something like::
|
||||
def stop(self, context):
|
||||
self.target.execute('{} stop'.format(self.trace_on_target))
|
||||
|
||||
def extract_results(self, context):
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.target.pull(self.result, context.working_directory)
|
||||
context.add_artifact('error_trace', self.result, kind='export')
|
||||
|
||||
def update_output(self, context):
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.outfile = os.path.join(context.output_directory, 'trace.txt')
|
||||
self.target.pull(self.result, self.outfile)
|
||||
context.add_artifact('error_trace', self.outfile, kind='export')
|
||||
|
||||
metric = # ..
|
||||
context.add_metric('number_of_errors', metric, lower_is_better=True
|
||||
|
||||
@ -609,8 +618,9 @@ Adding an Output Processor
|
||||
==========================
|
||||
|
||||
This is an example of how we would create an output processor which will format
|
||||
the run metrics as a column-aligned table. The first thing to do is to subclass
|
||||
:class:`OutputProcessor` and overwrite the variable name with what we want our
|
||||
the run metrics as a column-aligned table. The first thing to do is to create
|
||||
a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass
|
||||
:class:`OutputProcessor`. Make sure to overwrite the variable name with what we want our
|
||||
processor to be called and provide a short description.
|
||||
|
||||
Next we need to implement any relevant methods, (please see
|
||||
|
@ -26,7 +26,8 @@ CPU frequency fixed to max, and once with CPU frequency fixed to min.
|
||||
Classifiers are used to indicate the configuration in the output.
|
||||
|
||||
First, create the :class:`RunOutput` object, which is the main interface for
|
||||
interacting with WA outputs.
|
||||
interacting with WA outputs. Or alternatively a :class:`RunDatabaseOutput`
|
||||
if storing your results in a postgres database.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@ -151,10 +152,6 @@ For the purposes of this report, they will be used to augment the metric's name.
|
||||
|
||||
scores[workload][name][freq] = metric
|
||||
|
||||
rows = []
|
||||
for workload in sorted(scores.keys()):
|
||||
wldata = scores[workload]
|
||||
|
||||
Once the metrics have been sorted, generate the report showing the delta
|
||||
between the two configurations (indicated by the "frequency" classifier) and
|
||||
highlight any unexpected deltas (based on the ``lower_is_better`` attribute of
|
||||
@ -164,23 +161,27 @@ statically significant deltas.)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
for name in sorted(wldata.keys()):
|
||||
min_score = wldata[name]['min'].value
|
||||
max_score = wldata[name]['max'].value
|
||||
delta = max_score - min_score
|
||||
units = wldata[name]['min'].units or ''
|
||||
lib = wldata[name]['min'].lower_is_better
|
||||
rows = []
|
||||
for workload in sorted(scores.keys()):
|
||||
wldata = scores[workload]
|
||||
|
||||
warn = ''
|
||||
if (lib and delta > 0) or (not lib and delta < 0):
|
||||
warn = '!!!'
|
||||
for name in sorted(wldata.keys()):
|
||||
min_score = wldata[name]['min'].value
|
||||
max_score = wldata[name]['max'].value
|
||||
delta = max_score - min_score
|
||||
units = wldata[name]['min'].units or ''
|
||||
lib = wldata[name]['min'].lower_is_better
|
||||
|
||||
rows.append([workload, name,
|
||||
'{:.3f}'.format(min_score), '{:.3f}'.format(max_score),
|
||||
'{:.3f}'.format(delta), units, warn])
|
||||
warn = ''
|
||||
if (lib and delta > 0) or (not lib and delta < 0):
|
||||
warn = '!!!'
|
||||
|
||||
# separate workloads with a blank row
|
||||
rows.append(['', '', '', '', '', '', ''])
|
||||
rows.append([workload, name,
|
||||
'{:.3f}'.format(min_score), '{:.3f}'.format(max_score),
|
||||
'{:.3f}'.format(delta), units, warn])
|
||||
|
||||
# separate workloads with a blank row
|
||||
rows.append(['', '', '', '', '', '', ''])
|
||||
|
||||
|
||||
write_table(rows, sys.stdout, align='<<>>><<',
|
||||
@ -275,23 +276,23 @@ Below is the complete example code, and a report it generated for a sample run.
|
||||
for workload in sorted(scores.keys()):
|
||||
wldata = scores[workload]
|
||||
|
||||
for name in sorted(wldata.keys()):
|
||||
min_score = wldata[name]['min'].value
|
||||
max_score = wldata[name]['max'].value
|
||||
delta = max_score - min_score
|
||||
units = wldata[name]['min'].units or ''
|
||||
lib = wldata[name]['min'].lower_is_better
|
||||
for name in sorted(wldata.keys()):
|
||||
min_score = wldata[name]['min'].value
|
||||
max_score = wldata[name]['max'].value
|
||||
delta = max_score - min_score
|
||||
units = wldata[name]['min'].units or ''
|
||||
lib = wldata[name]['min'].lower_is_better
|
||||
|
||||
warn = ''
|
||||
if (lib and delta > 0) or (not lib and delta < 0):
|
||||
warn = '!!!'
|
||||
warn = ''
|
||||
if (lib and delta > 0) or (not lib and delta < 0):
|
||||
warn = '!!!'
|
||||
|
||||
rows.append([workload, name,
|
||||
'{:.3f}'.format(min_score), '{:.3f}'.format(max_score),
|
||||
'{:.3f}'.format(delta), units, warn])
|
||||
rows.append([workload, name,
|
||||
'{:.3f}'.format(min_score), '{:.3f}'.format(max_score),
|
||||
'{:.3f}'.format(delta), units, warn])
|
||||
|
||||
# separate workloads with a blank row
|
||||
rows.append(['', '', '', '', '', '', ''])
|
||||
# separate workloads with a blank row
|
||||
rows.append(['', '', '', '', '', '', ''])
|
||||
|
||||
|
||||
write_table(rows, sys.stdout, align='<<>>><<',
|
||||
|
@ -69,7 +69,72 @@ WA3 config file.
|
||||
|
||||
**Q:** My Juno board keeps resetting upon starting WA even if it hasn't crashed.
|
||||
--------------------------------------------------------------------------------
|
||||
Please ensure that you do not have any other terminals (e.g. ``screen``
|
||||
**A** Please ensure that you do not have any other terminals (e.g. ``screen``
|
||||
sessions) connected to the board's UART. When WA attempts to open the connection
|
||||
for its own use this can cause the board to reset if a connection is already
|
||||
present.
|
||||
|
||||
|
||||
**Q:** I'm using the FPS instrument but I do not get any/correct results for my workload
|
||||
-----------------------------------------------------------------------------------------
|
||||
|
||||
**A:** If your device is running with Android 6.0 + then the default utility for
|
||||
collecting fps metrics will be ``gfxinfo`` however this does not seem to be able
|
||||
to extract any meaningful information for some workloads. In this case please
|
||||
try setting the ``force_surfaceflinger`` parameter for the ``fps`` augmentation
|
||||
to ``True``. This will attempt to guess the "View" for the workload
|
||||
automatically however this is device specific and therefore may need
|
||||
customizing. If this is required please open the application and execute
|
||||
``dumpsys SurfaceFlinger --list`` on the device via adb. This will provide a
|
||||
list of all views available for measuring.
|
||||
|
||||
As an example, when trying to find the view for the AngryBirds Rio workload you
|
||||
may get something like:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
...
|
||||
AppWindowToken{41dfe54 token=Token{77819a7 ActivityRecord{a151266 u0 com.rovio.angrybirdsrio/com.rovio.fusion.App t506}}}#0
|
||||
a3d001c com.rovio.angrybirdsrio/com.rovio.fusion.App#0
|
||||
Background for -SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0
|
||||
SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0
|
||||
com.rovio.angrybirdsrio/com.rovio.fusion.App#0
|
||||
boostedAnimationLayer#0
|
||||
mAboveAppWindowsContainers#0
|
||||
...
|
||||
|
||||
From these ``"SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0"`` is
|
||||
the mostly likely the View that needs to be set as the ``view`` workload
|
||||
parameter and will be picked up be the ``fps`` augmentation.
|
||||
|
||||
|
||||
**Q:** I am getting an error which looks similar to ``'CONFIG_SND_BT87X is not exposed in kernel config'...``
|
||||
-------------------------------------------------------------------------------------------------------------
|
||||
**A:** If you are receiving this under normal operation this can be caused by a
|
||||
mismatch of your WA and devlib versions. Please update both to their latest
|
||||
versions and delete your ``$USER_HOME/.workload_automation/cache/targets.json``
|
||||
(or equivalent) file.
|
||||
|
||||
**Q:** I get an error which looks similar to ``UnicodeDecodeError('ascii' codec can't decode byte...``
|
||||
------------------------------------------------------------------------------------------------------
|
||||
**A:** If you receive this error or a similar warning about your environment,
|
||||
please ensure that you configure your environment to use a locale which supports
|
||||
UTF-8. Otherwise this can cause issues when attempting to parse files containing
|
||||
none ascii characters.
|
||||
|
||||
**Q:** I get the error ``Module "X" failed to install on target``
|
||||
------------------------------------------------------------------------------------------------------
|
||||
**A:** By default a set of devlib modules will be automatically loaded onto the
|
||||
target designed to add additional functionality. If the functionality provided
|
||||
by the module is not required then the module can be safely disabled by setting
|
||||
``load_default_modules`` to ``False`` in the ``device_config`` entry of the
|
||||
:ref:`agenda <config-agenda-entry>` and then re-enabling any specific modules
|
||||
that are still required. An example agenda snippet is shown below:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
config:
|
||||
device: generic_android
|
||||
device_config:
|
||||
load_default_modules: False
|
||||
modules: ['list', 'of', 'modules', 'to', 'enable']
|
||||
|
@ -13,10 +13,11 @@ these signals are dispatched during execution please see the
|
||||
$signal_names
|
||||
|
||||
The methods above may be decorated with on the listed decorators to set the
|
||||
priority of the Instrument method relative to other callbacks registered for the
|
||||
signal (within the same priority level, callbacks are invoked in the order they
|
||||
were registered). The table below shows the mapping of the decorator to the
|
||||
corresponding priority:
|
||||
priority (a value in the ``wa.framework.signal.CallbackPriority`` enum) of the
|
||||
Instrument method relative to other callbacks registered for the signal (within
|
||||
the same priority level, callbacks are invoked in the order they were
|
||||
registered). The table below shows the mapping of the decorator to the
|
||||
corresponding priority name and level:
|
||||
|
||||
$priority_prefixes
|
||||
|
||||
|
@ -16,7 +16,7 @@ Configuration
|
||||
Default configuration file change
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Instead of the standard ``config.py`` file located at
|
||||
``$WA_USER_HOME/config.py`` WA now uses a ``confg.yaml`` file (at the same
|
||||
``$WA_USER_DIRECTORY/config.py`` WA now uses a ``confg.yaml`` file (at the same
|
||||
location) which is written in the YAML format instead of python. Additionally
|
||||
upon first invocation WA3 will automatically try and detect whether a WA2 config
|
||||
file is present and convert it to use the new WA3 format. During this process
|
||||
|
@ -489,6 +489,75 @@ Note that the ``config`` section still applies to every spec in the agenda. So
|
||||
the precedence order is -- spec settings override section settings, which in
|
||||
turn override global settings.
|
||||
|
||||
|
||||
.. _section-groups:
|
||||
|
||||
Section Groups
|
||||
---------------
|
||||
|
||||
Section groups are a way of grouping sections together and are used to produce a
|
||||
cross product of each of the different groups. This can be useful when you want
|
||||
to run a set of experiments with all the available combinations without having
|
||||
to specify each combination manually.
|
||||
|
||||
For example if we want to investigate the differences between running the
|
||||
maximum and minimum frequency with both the maximum and minimum number of cpus
|
||||
online, we can create an agenda as follows:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
sections:
|
||||
- id: min_freq
|
||||
runtime_parameters:
|
||||
freq: min
|
||||
group: frequency
|
||||
- id: max_freq
|
||||
runtime_parameters:
|
||||
freq: max
|
||||
group: frequency
|
||||
|
||||
- id: min_cpus
|
||||
runtime_parameters:
|
||||
cpus: 1
|
||||
group: cpus
|
||||
- id: max_cpus
|
||||
runtime_parameters:
|
||||
cpus: 8
|
||||
group: cpus
|
||||
|
||||
workloads:
|
||||
- dhrystone
|
||||
|
||||
This will results in 8 jobs being generated for each of the possible combinations.
|
||||
|
||||
::
|
||||
|
||||
min_freq-min_cpus-wk1 (dhrystone)
|
||||
min_freq-max_cpus-wk1 (dhrystone)
|
||||
max_freq-min_cpus-wk1 (dhrystone)
|
||||
max_freq-max_cpus-wk1 (dhrystone)
|
||||
min_freq-min_cpus-wk1 (dhrystone)
|
||||
min_freq-max_cpus-wk1 (dhrystone)
|
||||
max_freq-min_cpus-wk1 (dhrystone)
|
||||
max_freq-max_cpus-wk1 (dhrystone)
|
||||
|
||||
Each of the generated jobs will have :ref:`classifiers <classifiers>` for
|
||||
each group and the associated id automatically added.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# ...
|
||||
print('Job ID: {}'.format(job.id))
|
||||
print('Classifiers:')
|
||||
for k, v in job.classifiers.items():
|
||||
print(' {}: {}'.format(k, v))
|
||||
|
||||
Job ID: min_freq-min_cpus-no_idle-wk1
|
||||
Classifiers:
|
||||
frequency: min_freq
|
||||
cpus: min_cpus
|
||||
|
||||
|
||||
.. _augmentations:
|
||||
|
||||
Augmentations
|
||||
@ -621,7 +690,7 @@ Workload-specific augmentation
|
||||
It is possible to enable or disable (but not configure) augmentations at
|
||||
workload or section level, as well as in the global config, in which case, the
|
||||
augmentations would only be enabled/disabled for that workload/section. If the
|
||||
same augmentation is enabled at one level and disabled at another, as will all
|
||||
same augmentation is enabled at one level and disabled at another, as with all
|
||||
WA configuration, the more specific settings will take precedence over the less
|
||||
specific ones (i.e. workloads override sections that, in turn, override global
|
||||
config).
|
||||
|
@ -17,6 +17,8 @@ further configuration will be required.
|
||||
Android
|
||||
-------
|
||||
|
||||
.. _android-general-device-setup:
|
||||
|
||||
General Device Setup
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -44,12 +46,15 @@ common parameters you might want to change are outlined below.
|
||||
Android builds. If this is not the case for your device, you will need to
|
||||
specify an alternative working directory (e.g. under ``/data/local``).
|
||||
|
||||
:load_default_modules: A number of "default" modules (e.g. for cpufreq
|
||||
subsystem) are loaded automatically, unless explicitly disabled. If you
|
||||
encounter an issue with one of the modules then this setting can be set to
|
||||
``False`` and any specific modules that you require can be request via the
|
||||
``modules`` entry.
|
||||
|
||||
:modules: A list of additional modules to be installed for the target. Devlib
|
||||
implements functionality for particular subsystems as modules. A number of
|
||||
"default" modules (e.g. for cpufreq subsystem) are loaded automatically,
|
||||
unless explicitly disabled. If additional modules need to be loaded, they
|
||||
may be specified using this parameter.
|
||||
implements functionality for particular subsystems as modules. If additional
|
||||
modules need to be loaded, they may be specified using this parameter.
|
||||
|
||||
Please see the `devlib documentation <http://devlib.readthedocs.io/en/latest/modules.html>`_
|
||||
for information on the available modules.
|
||||
@ -76,13 +81,14 @@ A typical ``device_config`` inside ``config.yaml`` may look something like
|
||||
# ...
|
||||
|
||||
|
||||
or a more specific config could be be
|
||||
or a more specific config could be:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
device_config:
|
||||
device: 0123456789ABCDEF
|
||||
working_direcory: '/sdcard/wa-working'
|
||||
load_default_modules: True
|
||||
modules: ['hotplug', 'cpufreq']
|
||||
core_names : ['a7', 'a7', 'a7', 'a15', 'a15']
|
||||
# ...
|
||||
|
@ -14,9 +14,9 @@ Using revent with workloads
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Some workloads (pretty much all games) rely on recorded revents for their
|
||||
execution. ReventWorkloads will require between 1 and 4 revent files be be ran.
|
||||
There is one mandatory recording ``run`` for performing the actual execution of
|
||||
the workload and the remaining are optional. ``setup`` can be used to perform
|
||||
execution. ReventWorkloads require between 1 and 4 revent files to be ran.
|
||||
There is one mandatory recording, ``run``, for performing the actual execution of
|
||||
the workload and the remaining stages are optional. ``setup`` can be used to perform
|
||||
the initial setup (navigating menus, selecting game modes, etc).
|
||||
``extract_results`` can be used to perform any actions after the main stage of
|
||||
the workload for example to navigate a results or summary screen of the app. And
|
||||
@ -26,17 +26,21 @@ exiting the app.
|
||||
Because revents are very device-specific\ [*]_, these files would need to
|
||||
be recorded for each device.
|
||||
|
||||
The files must be called ``<device name>.(setup|run|extract_results|teardown).revent``
|
||||
, where ``<device name>`` is the name of your device (as defined by the ``name``
|
||||
attribute of your device's class). WA will look for these files in two
|
||||
places: ``<install dir>/wa/workloads/<workload name>/revent_files``
|
||||
and ``~/.workload_automation/dependencies/<workload name>``. The first
|
||||
location is primarily intended for revent files that come with WA (and if
|
||||
The files must be called ``<device name>.(setup|run|extract_results|teardown).revent``,
|
||||
where ``<device name>`` is the name of your device (as defined by the model
|
||||
name of your device which can be retrieved with
|
||||
``adb shell getprop ro.product.model`` or by the ``name`` attribute of your
|
||||
customized device class).
|
||||
|
||||
WA will look for these files in two places:
|
||||
``<installdir>/wa/workloads/<workload name>/revent_files`` and
|
||||
``$WA_USER_DIRECTORY/dependencies/<workload name>``. The
|
||||
first location is primarily intended for revent files that come with WA (and if
|
||||
you did a system-wide install, you'll need sudo to add files there), so it's
|
||||
probably easier to use the second location for the files you record. Also,
|
||||
if revent files for a workload exist in both locations, the files under
|
||||
``~/.workload_automation/dependencies`` will be used in favour of those
|
||||
installed with WA.
|
||||
probably easier to use the second location for the files you record. Also, if
|
||||
revent files for a workload exist in both locations, the files under
|
||||
``$WA_USER_DIRECTORY/dependencies`` will be used in favour
|
||||
of those installed with WA.
|
||||
|
||||
.. [*] It's not just about screen resolution -- the event codes may be different
|
||||
even if devices use the same screen.
|
||||
|
@ -12,8 +12,9 @@ Installation
|
||||
.. module:: wa
|
||||
|
||||
This page describes the 3 methods of installing Workload Automation 3. The first
|
||||
option is to use :ref:`pip` which
|
||||
will install the latest release of WA, the latest development version from :ref:`github <github>` or via a :ref:`dockerfile`.
|
||||
option is to use :ref:`pip` which will install the latest release of WA, the
|
||||
latest development version from :ref:`github <github>` or via a
|
||||
:ref:`dockerfile`.
|
||||
|
||||
|
||||
Prerequisites
|
||||
@ -22,11 +23,11 @@ Prerequisites
|
||||
Operating System
|
||||
----------------
|
||||
|
||||
WA runs on a native Linux install. It was tested with Ubuntu 14.04,
|
||||
but any recent Linux distribution should work. It should run on either
|
||||
32-bit or 64-bit OS, provided the correct version of Android (see below)
|
||||
was installed. Officially, **other environments are not supported**. WA
|
||||
has been known to run on Linux Virtual machines and in Cygwin environments,
|
||||
WA runs on a native Linux install. It has been tested on recent Ubuntu releases,
|
||||
but other recent Linux distributions should work as well. It should run on
|
||||
either 32-bit or 64-bit OS, provided the correct version of dependencies (see
|
||||
below) are installed. Officially, **other environments are not supported**.
|
||||
WA has been known to run on Linux Virtual machines and in Cygwin environments,
|
||||
though additional configuration may be required in both cases (known issues
|
||||
include makings sure USB/serial connections are passed to the VM, and wrong
|
||||
python/pip binaries being picked up in Cygwin). WA *should* work on other
|
||||
@ -45,7 +46,8 @@ possible to get limited functionality with minimal porting effort).
|
||||
Android SDK
|
||||
-----------
|
||||
|
||||
You need to have the Android SDK with at least one platform installed.
|
||||
To interact with Android devices you will need to have the Android SDK
|
||||
with at least one platform installed.
|
||||
To install it, download the ADT Bundle from here_. Extract it
|
||||
and add ``<path_to_android_sdk>/sdk/platform-tools`` and ``<path_to_android_sdk>/sdk/tools``
|
||||
to your ``PATH``. To test that you've installed it properly, run ``adb
|
||||
@ -72,7 +74,11 @@ the install location of the SDK (i.e. ``<path_to_android_sdk>/sdk``).
|
||||
Python
|
||||
------
|
||||
|
||||
Workload Automation 3 currently supports both Python 2.7 and Python 3.
|
||||
Workload Automation 3 currently supports Python 3.5+
|
||||
|
||||
.. note:: If your system's default python version is still Python 2, please
|
||||
replace the commands listed here with their Python3 equivalent
|
||||
(e.g. python3, pip3 etc.)
|
||||
|
||||
.. _pip:
|
||||
|
||||
@ -94,11 +100,11 @@ similar distributions, this may be done with APT::
|
||||
sudo -H pip install --upgrade pip
|
||||
sudo -H pip install --upgrade setuptools
|
||||
|
||||
If you do run into this issue after already installing some packages,
|
||||
If you do run into this issue after already installing some packages,
|
||||
you can resolve it by running ::
|
||||
|
||||
sudo chmod -R a+r /usr/local/lib/python2.7/dist-packagessudo
|
||||
find /usr/local/lib/python2.7/dist-packages -type d -exec chmod a+x {} \;
|
||||
sudo chmod -R a+r /usr/local/lib/python3.X/dist-packages
|
||||
sudo find /usr/local/lib/python3.X/dist-packages -type d -exec chmod a+x {} \;
|
||||
|
||||
(The paths above will work for Ubuntu; they may need to be adjusted
|
||||
for other distros).
|
||||
@ -171,9 +177,11 @@ install them upfront (e.g. if you're planning to use WA to an environment that
|
||||
may not always have Internet access).
|
||||
|
||||
* nose
|
||||
* PyDAQmx
|
||||
* pymongo
|
||||
* jinja2
|
||||
* mock
|
||||
* daqpower
|
||||
* sphinx
|
||||
* sphinx_rtd_theme
|
||||
* psycopg2-binary
|
||||
|
||||
|
||||
|
||||
@ -184,20 +192,33 @@ Installing
|
||||
|
||||
Installing the latest released version from PyPI (Python Package Index)::
|
||||
|
||||
sudo -H pip install wa
|
||||
sudo -H pip install wlauto
|
||||
|
||||
This will install WA along with its mandatory dependencies. If you would like to
|
||||
install all optional dependencies at the same time, do the following instead::
|
||||
|
||||
sudo -H pip install wa[all]
|
||||
sudo -H pip install wlauto[all]
|
||||
|
||||
|
||||
Alternatively, you can also install the latest development version from GitHub
|
||||
(you will need git installed for this to work)::
|
||||
|
||||
git clone git@github.com:ARM-software/workload-automation.git workload-automation
|
||||
sudo -H pip install ./workload-automation
|
||||
cd workload-automation
|
||||
sudo -H python setup.py install
|
||||
|
||||
.. note:: Please note that if using pip to install from github this will most
|
||||
likely result in an older and incompatible version of devlib being
|
||||
installed alongside WA. If you wish to use pip please also manually
|
||||
install the latest version of
|
||||
`devlib <https://github.com/ARM-software/devlib>`_.
|
||||
|
||||
.. note:: Please note that while a `requirements.txt` is included, this is
|
||||
designed to be a reference of known working packages rather to than to
|
||||
be used as part of a standard installation. The version restrictions
|
||||
in place as part of `setup.py` should automatically ensure the correct
|
||||
packages are install however if encountering issues please try
|
||||
updating/downgrading to the package versions list within.
|
||||
|
||||
|
||||
If the above succeeds, try ::
|
||||
@ -221,7 +242,7 @@ image in a container.
|
||||
|
||||
The Dockerfile can be found in the "extras" directory or online at
|
||||
`<https://github.com/ARM-software /workload- automation/blob/next/extras/Dockerfile>`_
|
||||
which contains addional information about how to build and to use the file.
|
||||
which contains additional information about how to build and to use the file.
|
||||
|
||||
|
||||
(Optional) Post Installation
|
||||
|
@ -20,7 +20,7 @@ Install
|
||||
.. note:: This is a quick summary. For more detailed instructions, please see
|
||||
the :ref:`installation` section.
|
||||
|
||||
Make sure you have Python 2.7 or Python 3 and a recent Android SDK with API
|
||||
Make sure you have Python 3.5+ and a recent Android SDK with API
|
||||
level 18 or above installed on your system. A complete install of the Android
|
||||
SDK is required, as WA uses a number of its utilities, not just adb. For the
|
||||
SDK, make sure that either ``ANDROID_HOME`` environment variable is set, or that
|
||||
@ -125,7 +125,7 @@ There are multiple options for configuring your device depending on your
|
||||
particular use case.
|
||||
|
||||
You can either add your configuration to the default configuration file
|
||||
``config.yaml``, under the ``$WA_USER_HOME/`` directory or you can specify it in
|
||||
``config.yaml``, under the ``$WA_USER_DIRECTORY/`` directory or you can specify it in
|
||||
the ``config`` section of your agenda directly.
|
||||
|
||||
Alternatively if you are using multiple devices, you may want to create separate
|
||||
@ -318,7 +318,7 @@ like this:
|
||||
config:
|
||||
augmentations:
|
||||
- ~execution_time
|
||||
- json
|
||||
- targz
|
||||
iterations: 2
|
||||
workloads:
|
||||
- memcpy
|
||||
@ -332,7 +332,7 @@ This agenda:
|
||||
- Specifies two workloads: memcpy and dhrystone.
|
||||
- Specifies that dhrystone should run in one thread and execute five million loops.
|
||||
- Specifies that each of the two workloads should be run twice.
|
||||
- Enables json output processor, in addition to the output processors enabled in
|
||||
- Enables the targz output processor, in addition to the output processors enabled in
|
||||
the config.yaml.
|
||||
- Disables execution_time instrument, if it is enabled in the config.yaml
|
||||
|
||||
@ -352,13 +352,13 @@ in-depth information please see the :ref:`Create Command <create-command>` docum
|
||||
|
||||
In order to populate the agenda with relevant information you can supply all of
|
||||
the plugins you wish to use as arguments to the command, for example if we want
|
||||
to create an agenda file for running ``dhystrone`` on a 'generic android' device and we
|
||||
to create an agenda file for running ``dhrystone`` on a `generic_android` device and we
|
||||
want to enable the ``execution_time`` and ``trace-cmd`` instruments and display the
|
||||
metrics using the ``csv`` output processor. We would use the following command::
|
||||
|
||||
wa create agenda generic_android dhrystone execution_time trace-cmd csv -o my_agenda.yaml
|
||||
|
||||
This will produce a `my_agenda.yaml` file containing all the relevant
|
||||
This will produce a ``my_agenda.yaml`` file containing all the relevant
|
||||
configuration for the specified plugins along with their default values as shown
|
||||
below:
|
||||
|
||||
@ -373,6 +373,7 @@ below:
|
||||
device: generic_android
|
||||
device_config:
|
||||
adb_server: null
|
||||
adb_port: null
|
||||
big_core: null
|
||||
core_clusters: null
|
||||
core_names: null
|
||||
@ -399,6 +400,7 @@ below:
|
||||
no_install: false
|
||||
report: true
|
||||
report_on_target: false
|
||||
mode: write-to-memory
|
||||
csv:
|
||||
extra_columns: null
|
||||
use_all_classifiers: false
|
||||
@ -483,14 +485,14 @@ that parses the contents of the output directory:
|
||||
>>> ro = RunOutput('./wa_output')
|
||||
>>> for job in ro.jobs:
|
||||
... if job.status != 'OK':
|
||||
... print 'Job "{}" did not complete successfully: {}'.format(job, job.status)
|
||||
... print('Job "{}" did not complete successfully: {}'.format(job, job.status))
|
||||
... continue
|
||||
... print 'Job "{}":'.format(job)
|
||||
... print('Job "{}":'.format(job))
|
||||
... for metric in job.metrics:
|
||||
... if metric.units:
|
||||
... print '\t{}: {} {}'.format(metric.name, metric.value, metric.units)
|
||||
... print('\t{}: {} {}'.format(metric.name, metric.value, metric.units))
|
||||
... else:
|
||||
... print '\t{}: {}'.format(metric.name, metric.value)
|
||||
... print('\t{}: {}'.format(metric.name, metric.value))
|
||||
...
|
||||
Job "wk1-dhrystone-1":
|
||||
thread 0 score: 20833333
|
||||
|
@ -18,6 +18,3 @@ User Reference
|
||||
-------------------
|
||||
|
||||
.. include:: user_information/user_reference/output_directory.rst
|
||||
|
||||
|
||||
|
||||
|
@ -30,7 +30,7 @@ An example agenda can be seen here:
|
||||
|
||||
device: generic_android
|
||||
device_config:
|
||||
device: R32C801B8XY # Th adb name of our device we want to run on
|
||||
device: R32C801B8XY # The adb name of our device we want to run on
|
||||
disable_selinux: true
|
||||
load_default_modules: true
|
||||
package_data_directory: /data/data
|
||||
@ -45,6 +45,7 @@ An example agenda can be seen here:
|
||||
no_install: false
|
||||
report: true
|
||||
report_on_target: false
|
||||
mode: write-to-disk
|
||||
csv: # Provide config for the csv augmentation
|
||||
use_all_classifiers: true
|
||||
|
||||
@ -116,7 +117,9 @@ whole will behave. The most common options that that you may want to specify are
|
||||
to connect to (e.g. ``host`` for an SSH connection or
|
||||
``device`` to specific an ADB name) as well as configure other
|
||||
options for the device for example the ``working_directory``
|
||||
or the list of ``modules`` to be loaded onto the device.
|
||||
or the list of ``modules`` to be loaded onto the device. (For
|
||||
more information please see
|
||||
:ref:`here <android-general-device-setup>`)
|
||||
:execution_order: Defines the order in which the agenda spec will be executed.
|
||||
:reboot_policy: Defines when during execution of a run a Device will be rebooted.
|
||||
:max_retries: The maximum number of times failed jobs will be retried before giving up.
|
||||
@ -124,7 +127,7 @@ whole will behave. The most common options that that you may want to specify are
|
||||
|
||||
For more information and a full list of these configuration options please see
|
||||
:ref:`Run Configuration <run-configuration>` and
|
||||
:ref:`"Meta Configuration" <meta-configuration>`.
|
||||
:ref:`Meta Configuration <meta-configuration>`.
|
||||
|
||||
|
||||
Plugins
|
||||
|
@ -102,6 +102,91 @@ remove the high level configuration.
|
||||
|
||||
Dependent on specificity, configuration parameters from different sources will
|
||||
have different inherent priorities. Within an agenda, the configuration in
|
||||
"workload" entries wil be more specific than "sections" entries, which in turn
|
||||
"workload" entries will be more specific than "sections" entries, which in turn
|
||||
are more specific than parameters in the "config" entry.
|
||||
|
||||
.. _config-include:
|
||||
|
||||
Configuration Includes
|
||||
----------------------
|
||||
|
||||
It is possible to include other files in your config files and agendas. This is
|
||||
done by specifying ``include#`` (note the trailing hash) as a key in one of the
|
||||
mappings, with the value being the path to the file to be included. The path
|
||||
must be either absolute, or relative to the location of the file it is being
|
||||
included from (*not* to the current working directory). The path may also
|
||||
include ``~`` to indicate current user's home directory.
|
||||
|
||||
The include is performed by removing the ``include#`` loading the contents of
|
||||
the specified into the mapping that contained it. In cases where the mapping
|
||||
already contains the key to be loaded, values will be merged using the usual
|
||||
merge method (for overwrites, values in the mapping take precedence over those
|
||||
from the included files).
|
||||
|
||||
Below is an example of an agenda that includes other files. The assumption is
|
||||
that all of those files are in one directory
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# agenda.yaml
|
||||
config:
|
||||
augmentations: [trace-cmd]
|
||||
include#: ./my-config.yaml
|
||||
sections:
|
||||
- include#: ./section1.yaml
|
||||
- include#: ./section2.yaml
|
||||
include#: ./workloads.yaml
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# my-config.yaml
|
||||
augmentations: [cpufreq]
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# section1.yaml
|
||||
runtime_parameters:
|
||||
frequency: max
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# section2.yaml
|
||||
runtime_parameters:
|
||||
frequency: min
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# workloads.yaml
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
|
||||
The above is equivalent to having a single file like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# agenda.yaml
|
||||
config:
|
||||
augmentations: [cpufreq, trace-cmd]
|
||||
sections:
|
||||
- runtime_parameters:
|
||||
frequency: max
|
||||
- runtime_parameters:
|
||||
frequency: min
|
||||
workloads:
|
||||
- dhrystone
|
||||
- memcpy
|
||||
|
||||
Some additional details about the implementation and its limitations:
|
||||
|
||||
- The ``include#`` *must* be a key in a mapping, and the contents of the
|
||||
included file *must* be a mapping as well; it is not possible to include a
|
||||
list (e.g. in the examples above ``workload:`` part *must* be in the included
|
||||
file.
|
||||
- Being a key in a mapping, there can only be one ``include#`` entry per block.
|
||||
- The included file *must* have a ``.yaml`` extension.
|
||||
- Nested inclusions *are* allowed. I.e. included files may themselves include
|
||||
files; in such cases the included paths must be relative to *that* file, and
|
||||
not the "main" file.
|
||||
|
||||
|
@ -40,7 +40,7 @@ Will display help for this subcommand that will look something like this:
|
||||
AGENDA Agenda for this workload automation run. This defines
|
||||
which workloads will be executed, how many times, with
|
||||
which tunables, etc. See example agendas in
|
||||
/usr/local/lib/python2.7/dist-packages/wa for an
|
||||
/usr/local/lib/python3.X/dist-packages/wa for an
|
||||
example of how this file should be structured.
|
||||
|
||||
optional arguments:
|
||||
@ -238,6 +238,33 @@ Which will produce something like::
|
||||
This will be populated with default values which can then be customised for the
|
||||
particular use case.
|
||||
|
||||
Additionally the create command can be used to initialize (and update) a
|
||||
Postgres database which can be used by the ``postgres`` output processor.
|
||||
|
||||
The most of database connection parameters have a default value however they can
|
||||
be overridden via command line arguments. When initializing the database WA will
|
||||
also save the supplied parameters into the default user config file so that they
|
||||
do not need to be specified time the output processor is used.
|
||||
|
||||
As an example if we had a database server running on at 10.0.0.2 using the
|
||||
standard port we could use the following command to initialize a database for
|
||||
use with WA::
|
||||
|
||||
wa create database -a 10.0.0.2 -u my_username -p Pa55w0rd
|
||||
|
||||
This will log into the database server with the supplied credentials and create
|
||||
a database (defaulting to 'wa') and will save the configuration to the
|
||||
``~/.workload_automation/config.yaml`` file.
|
||||
|
||||
With updates to WA there may be changes to the database schema used. In this
|
||||
case the create command can also be used with the ``-U`` flag to update the
|
||||
database to use the new schema as follows::
|
||||
|
||||
wa create database -a 10.0.0.2 -u my_username -p Pa55w0rd -U
|
||||
|
||||
This will upgrade the database sequentially until the database schema is using
|
||||
the latest version.
|
||||
|
||||
.. _process-command:
|
||||
|
||||
Process
|
||||
|
@ -87,6 +87,7 @@ __failed
|
||||
the failed attempts.
|
||||
|
||||
.. _job_execution_subd:
|
||||
|
||||
job execution output subdirectory
|
||||
Each subdirectory will be named ``<job id>_<workload label>_<iteration
|
||||
number>``, and will, at minimum, contain a ``result.json`` (see above).
|
||||
|
@ -33,6 +33,7 @@ states.
|
||||
iterations: 1
|
||||
runtime_parameters:
|
||||
screen_on: false
|
||||
unlock_screen: 'vertical'
|
||||
- name: benchmarkpi
|
||||
iterations: 1
|
||||
sections:
|
||||
@ -98,7 +99,7 @@ CPUFreq
|
||||
|
||||
:governor: A ``string`` that can be used to specify the governor for all cores if there are common governors available.
|
||||
|
||||
:governor_tunable: A ``dict`` that can be used to specify governor
|
||||
:gov_tunables: A ``dict`` that can be used to specify governor
|
||||
tunables for all cores, unlike the other common parameters these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
@ -113,7 +114,7 @@ CPUFreq
|
||||
|
||||
:<core_name>_governor: A ``string`` that can be used to specify the governor for cores of a particular type e.g. 'A72'.
|
||||
|
||||
:<core_name>_governor_tunable: A ``dict`` that can be used to specify governor
|
||||
:<core_name>_gov_tunables: A ``dict`` that can be used to specify governor
|
||||
tunables for cores of a particular type e.g. 'A72', these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
@ -129,7 +130,7 @@ CPUFreq
|
||||
|
||||
:cpu<no>_governor: A ``string`` that can be used to specify the governor for a particular core e.g. 'cpu0'.
|
||||
|
||||
:cpu<no>_governor_tunable: A ``dict`` that can be used to specify governor
|
||||
:cpu<no>_gov_tunables: A ``dict`` that can be used to specify governor
|
||||
tunables for a particular core e.g. 'cpu0', these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
@ -147,7 +148,7 @@ If big.LITTLE is detected for the device an additional set of parameters are ava
|
||||
|
||||
:big_governor: A ``string`` that can be used to specify the governor for the big cores.
|
||||
|
||||
:big_governor_tunable: A ``dict`` that can be used to specify governor
|
||||
:big_gov_tunables: A ``dict`` that can be used to specify governor
|
||||
tunables for the big cores, these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
@ -162,7 +163,7 @@ If big.LITTLE is detected for the device an additional set of parameters are ava
|
||||
|
||||
:little_governor: A ``string`` that can be used to specify the governor for the little cores.
|
||||
|
||||
:little_governor_tunable: A ``dict`` that can be used to specify governor
|
||||
:little_gov_tunables: A ``dict`` that can be used to specify governor
|
||||
tunables for the little cores, these are not
|
||||
validated at the beginning of the run therefore incorrect values will cause
|
||||
an error during runtime.
|
||||
@ -208,6 +209,13 @@ Android Specific Runtime Parameters
|
||||
:screen_on: A ``boolean`` to specify whether the devices screen should be
|
||||
turned on. Defaults to ``True``.
|
||||
|
||||
:unlock_screen: A ``String`` to specify how the devices screen should be
|
||||
unlocked. Unlocking screen is disabled by default. ``vertical``, ``diagonal``
|
||||
and ``horizontal`` are the supported values (see :meth:`devlib.AndroidTarget.swipe_to_unlock`).
|
||||
Note that unlocking succeeds when no passcode is set. Since unlocking screen
|
||||
requires turning on the screen, this option overrides value of ``screen_on``
|
||||
option.
|
||||
|
||||
.. _setting-sysfiles:
|
||||
|
||||
Setting Sysfiles
|
||||
|
@ -6,7 +6,7 @@
|
||||
#
|
||||
# docker build -t wa .
|
||||
#
|
||||
# This will create an image called wadocker, which is preconfigured to
|
||||
# This will create an image called wa, which is preconfigured to
|
||||
# run WA and devlib. Please note that the build process automatically
|
||||
# accepts the licenses for the Android SDK, so please be sure that you
|
||||
# are willing to accept these prior to building and running the image
|
||||
@ -17,6 +17,13 @@
|
||||
#
|
||||
# docker run -it --privileged -v /dev/bus/usb:/dev/bus/usb --volume ${PWD}:/workspace --workdir /workspace wa
|
||||
#
|
||||
# If using selinux you may need to add the `z` option when mounting
|
||||
# volumes e.g.:
|
||||
# --volume ${PWD}:/workspace:z
|
||||
# Warning: Please ensure you do not use this option when mounting
|
||||
# system directores. For more information please see:
|
||||
# https://docs.docker.com/storage/bind-mounts/#configure-the-selinux-label
|
||||
#
|
||||
# The above command starts the container in privileged mode, with
|
||||
# access to USB devices. The current directory is mounted into the
|
||||
# image, allowing you to work from there. Any files written to this
|
||||
@ -32,27 +39,80 @@
|
||||
#
|
||||
# When you are finished, please run `exit` to leave the container.
|
||||
#
|
||||
# The relevant environment variables are stored in a separate
|
||||
# file which is automatically sourced in an interactive shell.
|
||||
# If running from a non-interactive environment this can
|
||||
# be manually sourced with `source /home/wa/.wa_environment`
|
||||
#
|
||||
# NOTE: Please make sure that the ADB server is NOT running on the
|
||||
# host. If in doubt, run `adb kill-server` before running the docker
|
||||
# container.
|
||||
#
|
||||
|
||||
# We want to make sure to base this on a recent ubuntu release
|
||||
FROM ubuntu:17.10
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# Please update the references below to use different versions of
|
||||
# devlib, WA or the Android SDK
|
||||
ARG DEVLIB_REF=v1.0.0
|
||||
ARG WA_REF=v3.0.0
|
||||
ARG DEVLIB_REF=v1.3.4
|
||||
ARG WA_REF=v3.3.1
|
||||
ARG ANDROID_SDK_URL=https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y python-pip git wget zip openjdk-8-jre-headless vim emacs nano curl sshpass ssh usbutils
|
||||
RUN pip install pandas
|
||||
# Set a default timezone to use
|
||||
ENV TZ=Europe/London
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apache2-utils \
|
||||
bison \
|
||||
cmake \
|
||||
curl \
|
||||
emacs \
|
||||
flex \
|
||||
git \
|
||||
libcdk5-dev \
|
||||
libiio-dev \
|
||||
libxml2 \
|
||||
libxml2-dev \
|
||||
locales \
|
||||
nano \
|
||||
openjdk-8-jre-headless \
|
||||
python3 \
|
||||
python3-pip \
|
||||
ssh \
|
||||
sshpass \
|
||||
sudo \
|
||||
trace-cmd \
|
||||
usbutils \
|
||||
vim \
|
||||
wget \
|
||||
zip
|
||||
|
||||
# Clone and download iio-capture
|
||||
RUN git clone -v https://github.com/BayLibre/iio-capture.git /tmp/iio-capture && \
|
||||
cd /tmp/iio-capture && \
|
||||
make && \
|
||||
make install
|
||||
|
||||
RUN pip3 install pandas
|
||||
|
||||
# Ensure we're using utf-8 as our default encoding
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
|
||||
# Let's get the two repos we need, and install them
|
||||
RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && cd /tmp/devlib && git checkout $DEVLIB_REF && python setup.py install
|
||||
RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && cd /tmp/wa && git checkout $WA_REF && python setup.py install
|
||||
RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && \
|
||||
cd /tmp/devlib && \
|
||||
git checkout $DEVLIB_REF && \
|
||||
python3 setup.py install && \
|
||||
pip3 install .[full]
|
||||
RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && \
|
||||
cd /tmp/wa && \
|
||||
git checkout $WA_REF && \
|
||||
python3 setup.py install && \
|
||||
pip3 install .[all]
|
||||
|
||||
# Clean-up
|
||||
RUN rm -R /tmp/devlib /tmp/wa
|
||||
@ -66,10 +126,19 @@ RUN mkdir -p /home/wa/.android
|
||||
RUN mkdir -p /home/wa/AndroidSDK && cd /home/wa/AndroidSDK && wget $ANDROID_SDK_URL -O sdk.zip && unzip sdk.zip
|
||||
RUN cd /home/wa/AndroidSDK/tools/bin && yes | ./sdkmanager --licenses && ./sdkmanager platform-tools && ./sdkmanager 'build-tools;27.0.3'
|
||||
|
||||
# Update the path
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/platform-tools:${PATH}' >> /home/wa/.bashrc
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/build-tools:${PATH}' >> /home/wa/.bashrc
|
||||
RUN echo 'export ANDROID_HOME=/home/wa/AndroidSDK' >> /home/wa/.bashrc
|
||||
# Download Monsoon
|
||||
RUN mkdir -p /home/wa/monsoon
|
||||
RUN curl https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py\?format\=TEXT | base64 --decode > /home/wa/monsoon/monsoon.py
|
||||
RUN chmod +x /home/wa/monsoon/monsoon.py
|
||||
|
||||
# Update WA's required environment variables.
|
||||
RUN echo 'export PATH=/home/wa/monsoon:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/platform-tools:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/build-tools:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export ANDROID_HOME=/home/wa/AndroidSDK' >> /home/wa/.wa_environment
|
||||
|
||||
# Source WA environment variables in an interactive environment
|
||||
RUN echo 'source /home/wa/.wa_environment' >> /home/wa/.bashrc
|
||||
|
||||
# Generate some ADB keys. These will change each time the image is build but will otherwise persist.
|
||||
RUN /home/wa/AndroidSDK/platform-tools/adb keygen /home/wa/.android/adbkey
|
||||
|
@ -43,7 +43,7 @@ ignore=external
|
||||
# https://bitbucket.org/logilab/pylint/issue/232/wrong-hanging-indentation-false-positive
|
||||
# TODO: disabling no-value-for-parameter and logging-format-interpolation, as they appear to be broken
|
||||
# in version 1.4.1 and return a lot of false postives; should be re-enabled once fixed.
|
||||
disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330,no-value-for-parameter,logging-format-interpolation,no-else-return,inconsistent-return-statements,keyword-arg-before-vararg,consider-using-enumerate,no-member
|
||||
disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330,no-value-for-parameter,logging-format-interpolation,no-else-return,inconsistent-return-statements,keyword-arg-before-vararg,consider-using-enumerate,no-member,super-with-arguments,useless-object-inheritance,raise-missing-from,no-else-raise,no-else-break,no-else-continue
|
||||
|
||||
[FORMAT]
|
||||
max-module-lines=4000
|
||||
|
3
pytest.ini
Normal file
3
pytest.ini
Normal file
@ -0,0 +1,3 @@
|
||||
[pytest]
|
||||
filterwarnings=
|
||||
ignore::DeprecationWarning:past[.*]
|
30
requirements.txt
Normal file
30
requirements.txt
Normal file
@ -0,0 +1,30 @@
|
||||
bcrypt==4.0.1
|
||||
certifi==2024.7.4
|
||||
cffi==1.15.1
|
||||
charset-normalizer==3.1.0
|
||||
colorama==0.4.6
|
||||
cryptography==43.0.1
|
||||
devlib==1.3.4
|
||||
future==0.18.3
|
||||
idna==3.7
|
||||
Louie-latest==1.3.1
|
||||
lxml==4.9.2
|
||||
nose==1.3.7
|
||||
numpy==1.24.3
|
||||
pandas==2.0.1
|
||||
paramiko==3.4.0
|
||||
pexpect==4.8.0
|
||||
ptyprocess==0.7.0
|
||||
pycparser==2.21
|
||||
PyNaCl==1.5.0
|
||||
pyserial==3.5
|
||||
python-dateutil==2.8.2
|
||||
pytz==2023.3
|
||||
PyYAML==6.0
|
||||
requests==2.32.0
|
||||
scp==0.14.5
|
||||
six==1.16.0
|
||||
tzdata==2023.3
|
||||
urllib3==1.26.19
|
||||
wlauto==3.3.1
|
||||
wrapt==1.15.0
|
40
setup.py
Normal file → Executable file
40
setup.py
Normal file → Executable file
@ -29,9 +29,10 @@ except ImportError:
|
||||
wa_dir = os.path.join(os.path.dirname(__file__), 'wa')
|
||||
|
||||
sys.path.insert(0, os.path.join(wa_dir, 'framework'))
|
||||
from version import get_wa_version, get_wa_version_with_commit
|
||||
from version import (get_wa_version, get_wa_version_with_commit,
|
||||
format_version, required_devlib_version)
|
||||
|
||||
# happends if falling back to distutils
|
||||
# happens if falling back to distutils
|
||||
warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
|
||||
warnings.filterwarnings('ignore', "Unknown distribution option: 'extras_require'")
|
||||
|
||||
@ -41,7 +42,7 @@ except OSError:
|
||||
pass
|
||||
|
||||
packages = []
|
||||
data_files = {}
|
||||
data_files = {'': [os.path.join(wa_dir, 'commands', 'postgres_schema.sql')]}
|
||||
source_dir = os.path.dirname(__file__)
|
||||
for root, dirs, files in os.walk(wa_dir):
|
||||
rel_dir = os.path.relpath(root, source_dir)
|
||||
@ -61,54 +62,62 @@ for root, dirs, files in os.walk(wa_dir):
|
||||
|
||||
scripts = [os.path.join('scripts', s) for s in os.listdir('scripts')]
|
||||
|
||||
with open("README.rst", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
devlib_version = format_version(required_devlib_version)
|
||||
params = dict(
|
||||
name='wlauto',
|
||||
description='A framework for automating workload execution and measurement collection on ARM devices.',
|
||||
long_description=long_description,
|
||||
version=get_wa_version_with_commit(),
|
||||
packages=packages,
|
||||
package_data=data_files,
|
||||
include_package_data=True,
|
||||
scripts=scripts,
|
||||
url='https://github.com/ARM-software/workload-automation',
|
||||
license='Apache v2',
|
||||
maintainer='ARM Architecture & Technology Device Lab',
|
||||
maintainer_email='workload-automation@arm.com',
|
||||
python_requires='>= 3.7',
|
||||
setup_requires=[
|
||||
'numpy'
|
||||
'numpy<=1.16.4; python_version<"3"',
|
||||
'numpy; python_version>="3"',
|
||||
],
|
||||
install_requires=[
|
||||
'python-dateutil', # converting between UTC and local time.
|
||||
'pexpect>=3.3', # Send/receive to/from device
|
||||
'pyserial', # Serial port interface
|
||||
'colorama', # Printing with colors
|
||||
'pyYAML', # YAML-formatted agenda parsing
|
||||
'pyYAML>=5.1b3', # YAML-formatted agenda parsing
|
||||
'requests', # Fetch assets over HTTP
|
||||
'devlib>=0.0.4', # Interacting with devices
|
||||
'devlib>={}'.format(devlib_version), # Interacting with devices
|
||||
'louie-latest', # callbacks dispatch
|
||||
'wrapt', # better decorators
|
||||
'pandas>=0.23.0', # Data analysis and manipulation
|
||||
'pandas>=0.23.0,<=0.24.2; python_version<"3.5.3"', # Data analysis and manipulation
|
||||
'pandas>=0.23.0; python_version>="3.5.3"', # Data analysis and manipulation
|
||||
'future', # Python 2-3 compatiblity
|
||||
],
|
||||
dependency_links=['https://github.com/ARM-software/devlib/tarball/master#egg=devlib-0.0.4'],
|
||||
|
||||
dependency_links=['https://github.com/ARM-software/devlib/tarball/master#egg=devlib-{}'.format(devlib_version)],
|
||||
extras_require={
|
||||
'other': ['jinja2'],
|
||||
'test': ['nose', 'mock'],
|
||||
'mongodb': ['pymongo'],
|
||||
'notify': ['notify2'],
|
||||
'doc': ['sphinx'],
|
||||
'doc': ['sphinx', 'sphinx_rtd_theme'],
|
||||
'postgres': ['psycopg2-binary'],
|
||||
'daq': ['daqpower'],
|
||||
},
|
||||
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Environment :: Console',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
],
|
||||
)
|
||||
|
||||
all_extras = list(chain(iter(params['extras_require'].values())))
|
||||
params['extras_require']['everything'] = all_extras
|
||||
params['extras_require']['all'] = all_extras
|
||||
|
||||
|
||||
class sdist(orig_sdist):
|
||||
@ -122,7 +131,6 @@ class sdist(orig_sdist):
|
||||
orig_sdist.initialize_options(self)
|
||||
self.strip_commit = False
|
||||
|
||||
|
||||
def run(self):
|
||||
if self.strip_commit:
|
||||
self.distribution.get_version = get_wa_version
|
||||
|
23
tests/ci/idle_agenda.yaml
Normal file
23
tests/ci/idle_agenda.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
config:
|
||||
iterations: 1
|
||||
augmentations:
|
||||
- ~~
|
||||
- status
|
||||
device: generic_local
|
||||
device_config:
|
||||
big_core: null
|
||||
core_clusters: null
|
||||
core_names: null
|
||||
executables_directory: null
|
||||
keep_password: true
|
||||
load_default_modules: false
|
||||
model: null
|
||||
modules: null
|
||||
password: null
|
||||
shell_prompt: !<tag:wa:regex> '40:^.*(shell|root|juno)@?.*:[/~]\S* *[#$] '
|
||||
unrooted: True
|
||||
working_directory: null
|
||||
workloads:
|
||||
- name: idle
|
||||
params:
|
||||
duration: 1
|
@ -17,7 +17,7 @@
|
||||
from wa import Plugin
|
||||
|
||||
|
||||
class TestDevice(Plugin):
|
||||
class MockDevice(Plugin):
|
||||
|
||||
name = 'test-device'
|
||||
kind = 'device'
|
||||
|
7
tests/data/includes/agenda.yaml
Normal file
7
tests/data/includes/agenda.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
config:
|
||||
augmentations: [~execution_time]
|
||||
include#: configs/test.yaml
|
||||
sections:
|
||||
- include#: sections/section1.yaml
|
||||
- include#: sections/section2.yaml
|
||||
include#: workloads.yaml
|
1
tests/data/includes/configs/test.yaml
Normal file
1
tests/data/includes/configs/test.yaml
Normal file
@ -0,0 +1 @@
|
||||
augmentations: [cpufreq, trace-cmd]
|
2
tests/data/includes/section-include.yaml
Normal file
2
tests/data/includes/section-include.yaml
Normal file
@ -0,0 +1,2 @@
|
||||
classifiers:
|
||||
included: true
|
1
tests/data/includes/sections/section1.yaml
Normal file
1
tests/data/includes/sections/section1.yaml
Normal file
@ -0,0 +1 @@
|
||||
classifiers: {'section': 'one'}
|
2
tests/data/includes/sections/section2.yaml
Normal file
2
tests/data/includes/sections/section2.yaml
Normal file
@ -0,0 +1,2 @@
|
||||
classifiers: {'section': 'two'}
|
||||
include#: ../section-include.yaml
|
2
tests/data/includes/user/config.yaml
Normal file
2
tests/data/includes/user/config.yaml
Normal file
@ -0,0 +1,2 @@
|
||||
augmentations: [execution_time]
|
||||
|
5
tests/data/includes/workloads.yaml
Normal file
5
tests/data/includes/workloads.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
workloads:
|
||||
- dhrystone
|
||||
- name: memcpy
|
||||
classifiers:
|
||||
memcpy: True
|
@ -17,19 +17,26 @@
|
||||
# pylint: disable=E0611
|
||||
# pylint: disable=R0201
|
||||
import os
|
||||
import yaml
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from unittest import TestCase
|
||||
|
||||
from nose.tools import assert_equal, assert_in, raises, assert_true
|
||||
|
||||
|
||||
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
|
||||
os.environ['WA_USER_DIRECTORY'] = os.path.join(DATA_DIR, 'includes')
|
||||
|
||||
from wa.framework.configuration.execution import ConfigManager
|
||||
from wa.framework.configuration.parsers import AgendaParser
|
||||
from wa.framework.exception import ConfigError
|
||||
from wa.utils.serializer import yaml
|
||||
from wa.utils.types import reset_all_counters
|
||||
|
||||
YAML_TEST_FILE = os.path.join(os.path.dirname(__file__), 'data', 'test-agenda.yaml')
|
||||
YAML_BAD_SYNTAX_FILE = os.path.join(os.path.dirname(__file__), 'data', 'bad-syntax-agenda.yaml')
|
||||
|
||||
YAML_TEST_FILE = os.path.join(DATA_DIR, 'test-agenda.yaml')
|
||||
YAML_BAD_SYNTAX_FILE = os.path.join(DATA_DIR, 'bad-syntax-agenda.yaml')
|
||||
INCLUDES_TEST_FILE = os.path.join(DATA_DIR, 'includes', 'agenda.yaml')
|
||||
|
||||
invalid_agenda_text = """
|
||||
workloads:
|
||||
@ -37,8 +44,6 @@ workloads:
|
||||
workload_parameters:
|
||||
test: 1
|
||||
"""
|
||||
invalid_agenda = yaml.load(invalid_agenda_text)
|
||||
invalid_agenda.name = 'invalid1'
|
||||
|
||||
duplicate_agenda_text = """
|
||||
global:
|
||||
@ -51,14 +56,10 @@ workloads:
|
||||
- id: "1"
|
||||
workload_name: benchmarkpi
|
||||
"""
|
||||
duplicate_agenda = yaml.load(duplicate_agenda_text)
|
||||
duplicate_agenda.name = 'invalid2'
|
||||
|
||||
short_agenda_text = """
|
||||
workloads: [antutu, dhrystone, benchmarkpi]
|
||||
"""
|
||||
short_agenda = yaml.load(short_agenda_text)
|
||||
short_agenda.name = 'short'
|
||||
|
||||
default_ids_agenda_text = """
|
||||
workloads:
|
||||
@ -71,8 +72,6 @@ workloads:
|
||||
cpus: 1
|
||||
- vellamo
|
||||
"""
|
||||
default_ids_agenda = yaml.load(default_ids_agenda_text)
|
||||
default_ids_agenda.name = 'default_ids'
|
||||
|
||||
sectioned_agenda_text = """
|
||||
sections:
|
||||
@ -95,8 +94,6 @@ sections:
|
||||
workloads:
|
||||
- memcpy
|
||||
"""
|
||||
sectioned_agenda = yaml.load(sectioned_agenda_text)
|
||||
sectioned_agenda.name = 'sectioned'
|
||||
|
||||
dup_sectioned_agenda_text = """
|
||||
sections:
|
||||
@ -109,8 +106,22 @@ sections:
|
||||
workloads:
|
||||
- memcpy
|
||||
"""
|
||||
dup_sectioned_agenda = yaml.load(dup_sectioned_agenda_text)
|
||||
dup_sectioned_agenda.name = 'dup-sectioned'
|
||||
|
||||
yaml_anchors_agenda_text = """
|
||||
workloads:
|
||||
- name: dhrystone
|
||||
params: &dhrystone_single_params
|
||||
cleanup_assets: true
|
||||
cpus: 0
|
||||
delay: 3
|
||||
duration: 0
|
||||
mloops: 10
|
||||
threads: 1
|
||||
- name: dhrystone
|
||||
params:
|
||||
<<: *dhrystone_single_params
|
||||
threads: 4
|
||||
"""
|
||||
|
||||
|
||||
class AgendaTest(TestCase):
|
||||
@ -125,6 +136,8 @@ class AgendaTest(TestCase):
|
||||
assert_equal(len(self.config.jobs_config.root_node.workload_entries), 4)
|
||||
|
||||
def test_duplicate_id(self):
|
||||
duplicate_agenda = yaml.load(duplicate_agenda_text)
|
||||
|
||||
try:
|
||||
self.parser.load(self.config, duplicate_agenda, 'test')
|
||||
except ConfigError as e:
|
||||
@ -133,6 +146,8 @@ class AgendaTest(TestCase):
|
||||
raise Exception('ConfigError was not raised for an agenda with duplicate ids.')
|
||||
|
||||
def test_yaml_missing_field(self):
|
||||
invalid_agenda = yaml.load(invalid_agenda_text)
|
||||
|
||||
try:
|
||||
self.parser.load(self.config, invalid_agenda, 'test')
|
||||
except ConfigError as e:
|
||||
@ -141,20 +156,26 @@ class AgendaTest(TestCase):
|
||||
raise Exception('ConfigError was not raised for an invalid agenda.')
|
||||
|
||||
def test_defaults(self):
|
||||
short_agenda = yaml.load(short_agenda_text)
|
||||
self.parser.load(self.config, short_agenda, 'test')
|
||||
|
||||
workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
assert_equal(len(workload_entries), 3)
|
||||
assert_equal(workload_entries[0].config['workload_name'], 'antutu')
|
||||
assert_equal(workload_entries[0].id, 'wk1')
|
||||
|
||||
def test_default_id_assignment(self):
|
||||
default_ids_agenda = yaml.load(default_ids_agenda_text)
|
||||
|
||||
self.parser.load(self.config, default_ids_agenda, 'test2')
|
||||
workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
assert_equal(workload_entries[0].id, 'wk2')
|
||||
assert_equal(workload_entries[3].id, 'wk3')
|
||||
|
||||
def test_sections(self):
|
||||
sectioned_agenda = yaml.load(sectioned_agenda_text)
|
||||
self.parser.load(self.config, sectioned_agenda, 'test')
|
||||
|
||||
root_node_workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
leaves = list(self.config.jobs_config.root_node.leaves())
|
||||
section1_workload_entries = leaves[0].workload_entries
|
||||
@ -164,10 +185,58 @@ class AgendaTest(TestCase):
|
||||
assert_true(section1_workload_entries[0].config['workload_parameters']['markers_enabled'])
|
||||
assert_equal(section2_workload_entries[0].config['workload_name'], 'antutu')
|
||||
|
||||
def test_yaml_anchors(self):
|
||||
yaml_anchors_agenda = yaml.load(yaml_anchors_agenda_text)
|
||||
self.parser.load(self.config, yaml_anchors_agenda, 'test')
|
||||
|
||||
workload_entries = self.config.jobs_config.root_node.workload_entries
|
||||
assert_equal(len(workload_entries), 2)
|
||||
assert_equal(workload_entries[0].config['workload_name'], 'dhrystone')
|
||||
assert_equal(workload_entries[0].config['workload_parameters']['threads'], 1)
|
||||
assert_equal(workload_entries[0].config['workload_parameters']['delay'], 3)
|
||||
assert_equal(workload_entries[1].config['workload_name'], 'dhrystone')
|
||||
assert_equal(workload_entries[1].config['workload_parameters']['threads'], 4)
|
||||
assert_equal(workload_entries[1].config['workload_parameters']['delay'], 3)
|
||||
|
||||
@raises(ConfigError)
|
||||
def test_dup_sections(self):
|
||||
dup_sectioned_agenda = yaml.load(dup_sectioned_agenda_text)
|
||||
self.parser.load(self.config, dup_sectioned_agenda, 'test')
|
||||
|
||||
@raises(ConfigError)
|
||||
def test_bad_syntax(self):
|
||||
self.parser.load_from_path(self.config, YAML_BAD_SYNTAX_FILE)
|
||||
|
||||
|
||||
class FakeTargetManager:
|
||||
|
||||
def merge_runtime_parameters(self, params):
|
||||
return params
|
||||
|
||||
def validate_runtime_parameters(self, params):
|
||||
pass
|
||||
|
||||
|
||||
class IncludesTest(TestCase):
|
||||
|
||||
def test_includes(self):
|
||||
from pprint import pprint
|
||||
parser = AgendaParser()
|
||||
cm = ConfigManager()
|
||||
tm = FakeTargetManager()
|
||||
|
||||
includes = parser.load_from_path(cm, INCLUDES_TEST_FILE)
|
||||
include_set = set([os.path.basename(i) for i in includes])
|
||||
assert_equal(include_set,
|
||||
set(['test.yaml', 'section1.yaml', 'section2.yaml',
|
||||
'section-include.yaml', 'workloads.yaml']))
|
||||
|
||||
job_classifiers = {j.id: j.classifiers
|
||||
for j in cm.jobs_config.generate_job_specs(tm)}
|
||||
assert_equal(job_classifiers,
|
||||
{
|
||||
's1-wk1': {'section': 'one'},
|
||||
's2-wk1': {'section': 'two', 'included': True},
|
||||
's1-wk2': {'section': 'one', 'memcpy': True},
|
||||
's2-wk2': {'section': 'two', 'included': True, 'memcpy': True},
|
||||
})
|
||||
|
@ -16,6 +16,7 @@
|
||||
import unittest
|
||||
from nose.tools import assert_equal
|
||||
|
||||
from wa.framework.configuration.execution import ConfigManager
|
||||
from wa.utils.misc import merge_config_values
|
||||
|
||||
|
||||
@ -38,3 +39,21 @@ class TestConfigUtils(unittest.TestCase):
|
||||
if v2 is not None:
|
||||
assert_equal(type(result), type(v2))
|
||||
|
||||
|
||||
|
||||
class TestConfigParser(unittest.TestCase):
|
||||
|
||||
def test_param_merge(self):
|
||||
config = ConfigManager()
|
||||
|
||||
config.load_config({'workload_params': {'one': 1, 'three': {'ex': 'x'}}, 'runtime_params': {'aye': 'a'}}, 'file_one')
|
||||
config.load_config({'workload_params': {'two': 2, 'three': {'why': 'y'}}, 'runtime_params': {'bee': 'b'}}, 'file_two')
|
||||
|
||||
assert_equal(
|
||||
config.jobs_config.job_spec_template['workload_parameters'],
|
||||
{'one': 1, 'two': 2, 'three': {'why': 'y'}},
|
||||
)
|
||||
assert_equal(
|
||||
config.jobs_config.job_spec_template['runtime_parameters'],
|
||||
{'aye': 'a', 'bee': 'b'},
|
||||
)
|
||||
|
@ -21,9 +21,10 @@ from nose.tools import assert_equal, assert_raises
|
||||
|
||||
from wa.utils.exec_control import (init_environment, reset_environment,
|
||||
activate_environment, once,
|
||||
once_per_class, once_per_instance)
|
||||
once_per_class, once_per_instance,
|
||||
once_per_attribute_value)
|
||||
|
||||
class TestClass(object):
|
||||
class MockClass(object):
|
||||
|
||||
called = 0
|
||||
|
||||
@ -32,7 +33,7 @@ class TestClass(object):
|
||||
|
||||
@once
|
||||
def called_once(self):
|
||||
TestClass.called += 1
|
||||
MockClass.called += 1
|
||||
|
||||
@once
|
||||
def initilize_once(self):
|
||||
@ -50,7 +51,7 @@ class TestClass(object):
|
||||
return '{}: Called={}'.format(self.__class__.__name__, self.called)
|
||||
|
||||
|
||||
class SubClass(TestClass):
|
||||
class SubClass(MockClass):
|
||||
|
||||
def __init__(self):
|
||||
super(SubClass, self).__init__()
|
||||
@ -110,7 +111,19 @@ class AnotherClass(object):
|
||||
self.count += 1
|
||||
|
||||
|
||||
class AnotherSubClass(TestClass):
|
||||
class NamedClass:
|
||||
|
||||
count = 0
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
@once_per_attribute_value('name')
|
||||
def initilize(self):
|
||||
NamedClass.count += 1
|
||||
|
||||
|
||||
class AnotherSubClass(MockClass):
|
||||
|
||||
def __init__(self):
|
||||
super(AnotherSubClass, self).__init__()
|
||||
@ -142,7 +155,7 @@ class EnvironmentManagementTest(TestCase):
|
||||
|
||||
def test_reset_current_environment(self):
|
||||
activate_environment('CURRENT_ENVIRONMENT')
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
t1.initilize_once()
|
||||
assert_equal(t1.count, 1)
|
||||
|
||||
@ -152,7 +165,7 @@ class EnvironmentManagementTest(TestCase):
|
||||
|
||||
def test_switch_environment(self):
|
||||
activate_environment('ENVIRONMENT1')
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
t1.initilize_once()
|
||||
assert_equal(t1.count, 1)
|
||||
|
||||
@ -166,7 +179,7 @@ class EnvironmentManagementTest(TestCase):
|
||||
|
||||
def test_reset_environment_name(self):
|
||||
activate_environment('ENVIRONMENT')
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
t1.initilize_once()
|
||||
assert_equal(t1.count, 1)
|
||||
|
||||
@ -195,7 +208,7 @@ class OnlyOnceEnvironmentTest(TestCase):
|
||||
reset_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def test_single_instance(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
ac = AnotherClass()
|
||||
|
||||
t1.initilize_once()
|
||||
@ -209,8 +222,8 @@ class OnlyOnceEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_mulitple_instances(self):
|
||||
t1 = TestClass()
|
||||
t2 = TestClass()
|
||||
t1 = MockClass()
|
||||
t2 = MockClass()
|
||||
|
||||
t1.initilize_once()
|
||||
assert_equal(t1.count, 1)
|
||||
@ -220,7 +233,7 @@ class OnlyOnceEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_sub_classes(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
sc = SubClass()
|
||||
ss = SubSubClass()
|
||||
asc = AnotherSubClass()
|
||||
@ -250,7 +263,7 @@ class OncePerClassEnvironmentTest(TestCase):
|
||||
reset_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def test_single_instance(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
ac = AnotherClass()
|
||||
|
||||
t1.initilize_once_per_class()
|
||||
@ -264,8 +277,8 @@ class OncePerClassEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_mulitple_instances(self):
|
||||
t1 = TestClass()
|
||||
t2 = TestClass()
|
||||
t1 = MockClass()
|
||||
t2 = MockClass()
|
||||
|
||||
t1.initilize_once_per_class()
|
||||
assert_equal(t1.count, 1)
|
||||
@ -275,7 +288,7 @@ class OncePerClassEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_sub_classes(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
sc1 = SubClass()
|
||||
sc2 = SubClass()
|
||||
ss1 = SubSubClass()
|
||||
@ -308,7 +321,7 @@ class OncePerInstanceEnvironmentTest(TestCase):
|
||||
reset_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def test_single_instance(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
ac = AnotherClass()
|
||||
|
||||
t1.initilize_once_per_instance()
|
||||
@ -322,8 +335,8 @@ class OncePerInstanceEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_mulitple_instances(self):
|
||||
t1 = TestClass()
|
||||
t2 = TestClass()
|
||||
t1 = MockClass()
|
||||
t2 = MockClass()
|
||||
|
||||
t1.initilize_once_per_instance()
|
||||
assert_equal(t1.count, 1)
|
||||
@ -333,7 +346,7 @@ class OncePerInstanceEnvironmentTest(TestCase):
|
||||
|
||||
|
||||
def test_sub_classes(self):
|
||||
t1 = TestClass()
|
||||
t1 = MockClass()
|
||||
sc = SubClass()
|
||||
ss = SubSubClass()
|
||||
asc = AnotherSubClass()
|
||||
@ -352,3 +365,30 @@ class OncePerInstanceEnvironmentTest(TestCase):
|
||||
asc.initilize_once_per_instance()
|
||||
asc.initilize_once_per_instance()
|
||||
assert_equal(asc.count, 2)
|
||||
|
||||
|
||||
class OncePerAttributeValueTest(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
activate_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def tearDown(self):
|
||||
reset_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def test_once_attribute_value(self):
|
||||
classes = [
|
||||
NamedClass('Rick'),
|
||||
NamedClass('Morty'),
|
||||
NamedClass('Rick'),
|
||||
NamedClass('Morty'),
|
||||
NamedClass('Morty'),
|
||||
NamedClass('Summer'),
|
||||
]
|
||||
|
||||
for c in classes:
|
||||
c.initilize()
|
||||
|
||||
for c in classes:
|
||||
c.initilize()
|
||||
|
||||
assert_equal(NamedClass.count, 3)
|
||||
|
315
tests/test_execution.py
Normal file
315
tests/test_execution.py
Normal file
@ -0,0 +1,315 @@
|
||||
# Copyright 2020 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from unittest import TestCase
|
||||
|
||||
from mock.mock import Mock
|
||||
from nose.tools import assert_equal
|
||||
from datetime import datetime
|
||||
|
||||
from wa.framework.configuration import RunConfiguration
|
||||
from wa.framework.configuration.core import JobSpec, Status
|
||||
from wa.framework.execution import ExecutionContext, Runner
|
||||
from wa.framework.job import Job
|
||||
from wa.framework.output import RunOutput, init_run_output
|
||||
from wa.framework.output_processor import ProcessorManager
|
||||
import wa.framework.signal as signal
|
||||
from wa.framework.run import JobState
|
||||
from wa.framework.exception import ExecutionError
|
||||
|
||||
|
||||
class MockConfigManager(Mock):
|
||||
|
||||
@property
|
||||
def jobs(self):
|
||||
return self._joblist
|
||||
|
||||
@property
|
||||
def loaded_config_sources(self):
|
||||
return []
|
||||
|
||||
@property
|
||||
def plugin_cache(self):
|
||||
return MockPluginCache()
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(MockConfigManager, self).__init__(*args, **kwargs)
|
||||
self._joblist = None
|
||||
self.run_config = RunConfiguration()
|
||||
|
||||
def to_pod(self):
|
||||
return {}
|
||||
|
||||
|
||||
class MockPluginCache(Mock):
|
||||
|
||||
def list_plugins(self, kind=None):
|
||||
return []
|
||||
|
||||
|
||||
class MockProcessorManager(Mock):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(MockProcessorManager, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_enabled(self):
|
||||
return []
|
||||
|
||||
|
||||
class JobState_force_retry(JobState):
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
return self._status
|
||||
|
||||
@status.setter
|
||||
def status(self, value):
|
||||
if(self.retries != self.times_to_retry) and (value == Status.RUNNING):
|
||||
self._status = Status.FAILED
|
||||
if self.output:
|
||||
self.output.status = Status.FAILED
|
||||
else:
|
||||
self._status = value
|
||||
if self.output:
|
||||
self.output.status = value
|
||||
|
||||
def __init__(self, to_retry, *args, **kwargs):
|
||||
self.retries = 0
|
||||
self._status = Status.NEW
|
||||
self.times_to_retry = to_retry
|
||||
self.output = None
|
||||
super(JobState_force_retry, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class Job_force_retry(Job):
|
||||
'''This class imitates a job that retries as many times as specified by
|
||||
``retries`` in its constructor'''
|
||||
|
||||
def __init__(self, to_retry, *args, **kwargs):
|
||||
super(Job_force_retry, self).__init__(*args, **kwargs)
|
||||
self.state = JobState_force_retry(to_retry, self.id, self.label, self.iteration, Status.NEW)
|
||||
self.initialized = False
|
||||
self.finalized = False
|
||||
|
||||
def initialize(self, context):
|
||||
self.initialized = True
|
||||
return super().initialize(context)
|
||||
|
||||
def finalize(self, context):
|
||||
self.finalized = True
|
||||
return super().finalize(context)
|
||||
|
||||
|
||||
class TestRunState(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.path = tempfile.mkstemp()[1]
|
||||
os.remove(self.path)
|
||||
self.initialise_signals()
|
||||
self.context = get_context(self.path)
|
||||
self.job_spec = get_jobspec()
|
||||
|
||||
def tearDown(self):
|
||||
signal.disconnect(self._verify_serialized_state, signal.RUN_INITIALIZED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_STARTED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_RESTARTED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_COMPLETED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_FAILED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_ABORTED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.RUN_FINALIZED)
|
||||
|
||||
def test_job_state_transitions_pass(self):
|
||||
'''Tests state equality when the job passes first try'''
|
||||
job = Job(self.job_spec, 1, self.context)
|
||||
job.workload = Mock()
|
||||
|
||||
self.context.cm._joblist = [job]
|
||||
self.context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(self.context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
def test_job_state_transitions_fail(self):
|
||||
'''Tests state equality when job fails completely'''
|
||||
job = Job_force_retry(3, self.job_spec, 1, self.context)
|
||||
job.workload = Mock()
|
||||
|
||||
self.context.cm._joblist = [job]
|
||||
self.context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(self.context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
def test_job_state_transitions_retry(self):
|
||||
'''Tests state equality when job fails initially'''
|
||||
job = Job_force_retry(1, self.job_spec, 1, self.context)
|
||||
job.workload = Mock()
|
||||
|
||||
self.context.cm._joblist = [job]
|
||||
self.context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(self.context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
def initialise_signals(self):
|
||||
signal.connect(self._verify_serialized_state, signal.RUN_INITIALIZED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_STARTED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_RESTARTED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_COMPLETED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_FAILED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_ABORTED)
|
||||
signal.connect(self._verify_serialized_state, signal.RUN_FINALIZED)
|
||||
|
||||
def _verify_serialized_state(self, _):
|
||||
fs_state = RunOutput(self.path).state
|
||||
ex_state = self.context.run_output.state
|
||||
|
||||
assert_equal(fs_state.status, ex_state.status)
|
||||
fs_js_zip = zip(
|
||||
[value for key, value in fs_state.jobs.items()],
|
||||
[value for key, value in ex_state.jobs.items()]
|
||||
)
|
||||
for fs_jobstate, ex_jobstate in fs_js_zip:
|
||||
assert_equal(fs_jobstate.iteration, ex_jobstate.iteration)
|
||||
assert_equal(fs_jobstate.retries, ex_jobstate.retries)
|
||||
assert_equal(fs_jobstate.status, ex_jobstate.status)
|
||||
|
||||
|
||||
class TestJobState(TestCase):
|
||||
|
||||
def test_job_retry_status(self):
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
job = Job_force_retry(2, job_spec, 1, context)
|
||||
job.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job]
|
||||
context.run_state.add_job(job)
|
||||
|
||||
verifier = lambda _: assert_equal(job.status, Status.PENDING)
|
||||
signal.connect(verifier, signal.JOB_RESTARTED)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
runner.run()
|
||||
signal.disconnect(verifier, signal.JOB_RESTARTED)
|
||||
|
||||
def test_skipped_job_state(self):
|
||||
# Test, if the first job fails and the bail parameter set,
|
||||
# that the remaining jobs have status: SKIPPED
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
context.cm.run_config.bail_on_job_failure = True
|
||||
|
||||
job1 = Job_force_retry(3, job_spec, 1, context)
|
||||
job2 = Job(job_spec, 1, context)
|
||||
job1.workload = Mock()
|
||||
job2.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job1, job2]
|
||||
context.run_state.add_job(job1)
|
||||
context.run_state.add_job(job2)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
try:
|
||||
runner.run()
|
||||
except ExecutionError:
|
||||
assert_equal(job2.status, Status.SKIPPED)
|
||||
else:
|
||||
assert False, "ExecutionError not raised"
|
||||
|
||||
def test_normal_job_finalized(self):
|
||||
# Test that a job is initialized then finalized normally
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
job = Job_force_retry(0, job_spec, 1, context)
|
||||
job.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job]
|
||||
context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
assert_equal(job.initialized, True)
|
||||
assert_equal(job.finalized, True)
|
||||
|
||||
def test_skipped_job_finalized(self):
|
||||
# Test that a skipped job has been finalized
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
context.cm.run_config.bail_on_job_failure = True
|
||||
|
||||
job1 = Job_force_retry(3, job_spec, 1, context)
|
||||
job2 = Job_force_retry(0, job_spec, 1, context)
|
||||
job1.workload = Mock()
|
||||
job2.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job1, job2]
|
||||
context.run_state.add_job(job1)
|
||||
context.run_state.add_job(job2)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
try:
|
||||
runner.run()
|
||||
except ExecutionError:
|
||||
assert_equal(job2.finalized, True)
|
||||
else:
|
||||
assert False, "ExecutionError not raised"
|
||||
|
||||
def test_failed_job_finalized(self):
|
||||
# Test that a failed job, while the bail parameter is set,
|
||||
# is finalized
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
context.cm.run_config.bail_on_job_failure = True
|
||||
|
||||
job1 = Job_force_retry(3, job_spec, 1, context)
|
||||
job1.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job1]
|
||||
context.run_state.add_job(job1)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
try:
|
||||
runner.run()
|
||||
except ExecutionError:
|
||||
assert_equal(job1.finalized, True)
|
||||
else:
|
||||
assert False, "ExecutionError not raised"
|
||||
|
||||
|
||||
def get_context(path=None):
|
||||
if not path:
|
||||
path = tempfile.mkstemp()[1]
|
||||
os.remove(path)
|
||||
|
||||
config = MockConfigManager()
|
||||
output = init_run_output(path, config)
|
||||
|
||||
return ExecutionContext(config, Mock(), output)
|
||||
|
||||
|
||||
def get_jobspec():
|
||||
job_spec = JobSpec()
|
||||
job_spec.augmentations = {}
|
||||
job_spec.finalize()
|
||||
return job_spec
|
@ -30,6 +30,27 @@ class Callable(object):
|
||||
return self.val
|
||||
|
||||
|
||||
class TestSignalDisconnect(unittest.TestCase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.callback_ctr = 0
|
||||
|
||||
def setUp(self):
|
||||
signal.connect(self._call_me_once, 'first')
|
||||
signal.connect(self._call_me_once, 'second')
|
||||
|
||||
def test_handler_disconnected(self):
|
||||
signal.send('first')
|
||||
signal.send('second')
|
||||
|
||||
def _call_me_once(self):
|
||||
assert_equal(self.callback_ctr, 0)
|
||||
self.callback_ctr += 1
|
||||
signal.disconnect(self._call_me_once, 'first')
|
||||
signal.disconnect(self._call_me_once, 'second')
|
||||
|
||||
|
||||
class TestPriorityDispatcher(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@ -61,12 +82,16 @@ class TestPriorityDispatcher(unittest.TestCase):
|
||||
|
||||
def test_wrap_propagate(self):
|
||||
d = {'before': False, 'after': False, 'success': False}
|
||||
|
||||
def before():
|
||||
d['before'] = True
|
||||
|
||||
def after():
|
||||
d['after'] = True
|
||||
|
||||
def success():
|
||||
d['success'] = True
|
||||
|
||||
signal.connect(before, signal.BEFORE_WORKLOAD_SETUP)
|
||||
signal.connect(after, signal.AFTER_WORKLOAD_SETUP)
|
||||
signal.connect(success, signal.SUCCESSFUL_WORKLOAD_SETUP)
|
||||
@ -76,7 +101,7 @@ class TestPriorityDispatcher(unittest.TestCase):
|
||||
with signal.wrap('WORKLOAD_SETUP'):
|
||||
raise RuntimeError()
|
||||
except RuntimeError:
|
||||
caught=True
|
||||
caught = True
|
||||
|
||||
assert_true(d['before'])
|
||||
assert_true(d['after'])
|
||||
|
@ -21,7 +21,7 @@ from nose.tools import raises, assert_equal, assert_not_equal, assert_in, assert
|
||||
from nose.tools import assert_true, assert_false, assert_raises, assert_is, assert_list_equal
|
||||
|
||||
from wa.utils.types import (list_or_integer, list_or_bool, caseless_string,
|
||||
arguments, prioritylist, enum, level)
|
||||
arguments, prioritylist, enum, level, toggle_set)
|
||||
|
||||
|
||||
|
||||
@ -149,3 +149,51 @@ class TestEnumLevel(TestCase):
|
||||
s = e.one.to_pod()
|
||||
l = e.from_pod(s)
|
||||
assert_equal(l, e.one)
|
||||
|
||||
|
||||
class TestToggleSet(TestCase):
|
||||
|
||||
def test_equality(self):
|
||||
ts1 = toggle_set(['one', 'two',])
|
||||
ts2 = toggle_set(['one', 'two', '~three'])
|
||||
|
||||
assert_not_equal(ts1, ts2)
|
||||
assert_equal(ts1.values(), ts2.values())
|
||||
assert_equal(ts2, toggle_set(['two', '~three', 'one']))
|
||||
|
||||
def test_merge(self):
|
||||
ts1 = toggle_set(['one', 'two', 'three', '~four', '~five'])
|
||||
ts2 = toggle_set(['two', '~three', 'four', '~five'])
|
||||
|
||||
ts3 = ts1.merge_with(ts2)
|
||||
assert_equal(ts1, toggle_set(['one', 'two', 'three', '~four', '~five']))
|
||||
assert_equal(ts2, toggle_set(['two', '~three', 'four', '~five']))
|
||||
assert_equal(ts3, toggle_set(['one', 'two', '~three', 'four', '~five']))
|
||||
assert_equal(ts3.values(), set(['one', 'two','four']))
|
||||
|
||||
ts4 = ts1.merge_into(ts2)
|
||||
assert_equal(ts1, toggle_set(['one', 'two', 'three', '~four', '~five']))
|
||||
assert_equal(ts2, toggle_set(['two', '~three', 'four', '~five']))
|
||||
assert_equal(ts4, toggle_set(['one', 'two', 'three', '~four', '~five']))
|
||||
assert_equal(ts4.values(), set(['one', 'two', 'three']))
|
||||
|
||||
def test_drop_all_previous(self):
|
||||
ts1 = toggle_set(['one', 'two', 'three'])
|
||||
ts2 = toggle_set(['four', '~~', 'five'])
|
||||
ts3 = toggle_set(['six', 'seven', '~three'])
|
||||
|
||||
ts4 = ts1.merge_with(ts2).merge_with(ts3)
|
||||
assert_equal(ts4, toggle_set(['four', 'five', 'six', 'seven', '~three', '~~']))
|
||||
|
||||
ts5 = ts2.merge_into(ts3).merge_into(ts1)
|
||||
assert_equal(ts5, toggle_set(['four', 'five', '~~']))
|
||||
|
||||
ts6 = ts2.merge_into(ts3).merge_with(ts1)
|
||||
assert_equal(ts6, toggle_set(['one', 'two', 'three', 'four', 'five', '~~']))
|
||||
|
||||
def test_order_on_create(self):
|
||||
ts1 = toggle_set(['one', 'two', 'three', '~one'])
|
||||
assert_equal(ts1, toggle_set(['~one', 'two', 'three']))
|
||||
|
||||
ts1 = toggle_set(['~one', 'two', 'three', 'one'])
|
||||
assert_equal(ts1, toggle_set(['one', 'two', 'three']))
|
||||
|
@ -17,7 +17,7 @@ from wa.framework import pluginloader, signal
|
||||
from wa.framework.command import Command, ComplexCommand, SubCommand
|
||||
from wa.framework.configuration import settings
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.framework.exception import (CommandError, ConfigError, HostError, InstrumentError,
|
||||
from wa.framework.exception import (CommandError, ConfigError, HostError, InstrumentError, # pylint: disable=redefined-builtin
|
||||
JobError, NotFoundError, OutputProcessorError,
|
||||
PluginLoaderError, ResourceError, TargetError,
|
||||
TargetNotRespondingError, TimeoutError, ToolError,
|
||||
@ -33,7 +33,7 @@ from wa.framework.target.descriptor import (TargetDescriptor, TargetDescription,
|
||||
create_target_description, add_description_for_target)
|
||||
from wa.framework.workload import (Workload, ApkWorkload, ApkUiautoWorkload,
|
||||
ApkReventWorkload, UIWorkload, UiautoWorkload,
|
||||
ReventWorkload)
|
||||
PackageHandler, ReventWorkload, TestPackageHandler)
|
||||
|
||||
|
||||
from wa.framework.version import get_wa_version, get_wa_version_with_commit
|
||||
|
Binary file not shown.
Binary file not shown.
@ -13,28 +13,331 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import stat
|
||||
import shutil
|
||||
import string
|
||||
import re
|
||||
import uuid
|
||||
import getpass
|
||||
from collections import OrderedDict
|
||||
from distutils.dir_util import copy_tree
|
||||
|
||||
from devlib.utils.types import identifier
|
||||
try:
|
||||
import psycopg2
|
||||
from psycopg2 import connect, OperationalError, extras
|
||||
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
|
||||
except ImportError as e:
|
||||
psycopg2 = None
|
||||
import_error_msg = e.args[0] if e.args else str(e)
|
||||
|
||||
from wa import ComplexCommand, SubCommand, pluginloader, settings
|
||||
from wa.framework.target.descriptor import list_target_descriptions
|
||||
from wa.framework.exception import ConfigError, CommandError
|
||||
from wa.instruments.energy_measurement import EnergyInstrumentBackend
|
||||
from wa.utils.misc import (ensure_directory_exists as _d, capitalize,
|
||||
ensure_file_directory_exists as _f)
|
||||
from wa.utils.postgres import get_schema, POSTGRES_SCHEMA_DIR
|
||||
from wa.utils.serializer import yaml
|
||||
|
||||
from devlib.utils.types import identifier
|
||||
if sys.version_info >= (3, 8):
|
||||
def copy_tree(src, dst):
|
||||
from shutil import copy, copytree # pylint: disable=import-outside-toplevel
|
||||
copytree(
|
||||
src,
|
||||
dst,
|
||||
# dirs_exist_ok=True only exists in Python >= 3.8
|
||||
dirs_exist_ok=True,
|
||||
# Align with devlib and only copy the content without metadata
|
||||
copy_function=copy
|
||||
)
|
||||
else:
|
||||
def copy_tree(src, dst):
|
||||
# pylint: disable=import-outside-toplevel, redefined-outer-name
|
||||
from distutils.dir_util import copy_tree
|
||||
# Align with devlib and only copy the content without metadata
|
||||
copy_tree(src, dst, preserve_mode=False, preserve_times=False)
|
||||
|
||||
|
||||
TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates')
|
||||
|
||||
|
||||
class CreateDatabaseSubcommand(SubCommand):
|
||||
|
||||
name = 'database'
|
||||
description = """
|
||||
Create a Postgresql database which is compatible with the WA Postgres
|
||||
output processor.
|
||||
"""
|
||||
|
||||
schemafilepath = os.path.join(POSTGRES_SCHEMA_DIR, 'postgres_schema.sql')
|
||||
schemaupdatefilepath = os.path.join(POSTGRES_SCHEMA_DIR, 'postgres_schema_update_v{}.{}.sql')
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateDatabaseSubcommand, self).__init__(*args, **kwargs)
|
||||
self.sql_commands = None
|
||||
self.schema_major = None
|
||||
self.schema_minor = None
|
||||
self.postgres_host = None
|
||||
self.postgres_port = None
|
||||
self.username = None
|
||||
self.password = None
|
||||
self.dbname = None
|
||||
self.config_file = None
|
||||
self.force = None
|
||||
|
||||
def initialize(self, context):
|
||||
self.parser.add_argument(
|
||||
'-a', '--postgres-host', default='localhost',
|
||||
help='The host on which to create the database.')
|
||||
self.parser.add_argument(
|
||||
'-k', '--postgres-port', default='5432',
|
||||
help='The port on which the PostgreSQL server is running.')
|
||||
self.parser.add_argument(
|
||||
'-u', '--username', default='postgres',
|
||||
help='The username with which to connect to the server.')
|
||||
self.parser.add_argument(
|
||||
'-p', '--password',
|
||||
help='The password for the user account.')
|
||||
self.parser.add_argument(
|
||||
'-d', '--dbname', default='wa',
|
||||
help='The name of the database to create.')
|
||||
self.parser.add_argument(
|
||||
'-f', '--force', action='store_true',
|
||||
help='Force overwrite the existing database if one exists.')
|
||||
self.parser.add_argument(
|
||||
'-F', '--force-update-config', action='store_true',
|
||||
help='Force update the config file if an entry exists.')
|
||||
self.parser.add_argument(
|
||||
'-r', '--config-file', default=settings.user_config_file,
|
||||
help='Path to the config file to be updated.')
|
||||
self.parser.add_argument(
|
||||
'-x', '--schema-version', action='store_true',
|
||||
help='Display the current schema version.')
|
||||
self.parser.add_argument(
|
||||
'-U', '--upgrade', action='store_true',
|
||||
help='Upgrade the database to use the latest schema version.')
|
||||
|
||||
def execute(self, state, args): # pylint: disable=too-many-branches
|
||||
if not psycopg2:
|
||||
raise CommandError(
|
||||
'The module psycopg2 is required for the wa '
|
||||
+ 'create database command.')
|
||||
|
||||
if args.dbname == 'postgres':
|
||||
raise ValueError('Databasename to create cannot be postgres.')
|
||||
|
||||
self._parse_args(args)
|
||||
self.schema_major, self.schema_minor, self.sql_commands = get_schema(self.schemafilepath)
|
||||
|
||||
# Display the version if needed and exit
|
||||
if args.schema_version:
|
||||
self.logger.info(
|
||||
'The current schema version is {}.{}'.format(self.schema_major,
|
||||
self.schema_minor))
|
||||
return
|
||||
|
||||
if args.upgrade:
|
||||
self.update_schema()
|
||||
return
|
||||
|
||||
# Open user configuration
|
||||
with open(self.config_file, 'r') as config_file:
|
||||
config = yaml.load(config_file)
|
||||
if 'postgres' in config and not args.force_update_config:
|
||||
raise CommandError(
|
||||
"The entry 'postgres' already exists in the config file. "
|
||||
+ "Please specify the -F flag to force an update.")
|
||||
|
||||
possible_connection_errors = [
|
||||
(
|
||||
re.compile('FATAL: role ".*" does not exist'),
|
||||
'Username does not exist or password is incorrect'
|
||||
),
|
||||
(
|
||||
re.compile('FATAL: password authentication failed for user'),
|
||||
'Password was incorrect'
|
||||
),
|
||||
(
|
||||
re.compile('fe_sendauth: no password supplied'),
|
||||
'Passwordless connection is not enabled. '
|
||||
'Please enable trust in pg_hba for this host '
|
||||
'or use a password'
|
||||
),
|
||||
(
|
||||
re.compile('FATAL: no pg_hba.conf entry for'),
|
||||
'Host is not allowed to connect to the specified database '
|
||||
'using this user according to pg_hba.conf. Please change the '
|
||||
'rules in pg_hba or your connection method'
|
||||
),
|
||||
(
|
||||
re.compile('FATAL: pg_hba.conf rejects connection'),
|
||||
'Connection was rejected by pg_hba.conf'
|
||||
),
|
||||
]
|
||||
|
||||
def predicate(error, handle):
|
||||
if handle[0].match(str(error)):
|
||||
raise CommandError(handle[1] + ': \n' + str(error))
|
||||
|
||||
# Attempt to create database
|
||||
try:
|
||||
self.create_database()
|
||||
except OperationalError as e:
|
||||
for handle in possible_connection_errors:
|
||||
predicate(e, handle)
|
||||
raise e
|
||||
|
||||
# Update the configuration file
|
||||
self._update_configuration_file(config)
|
||||
|
||||
def create_database(self):
|
||||
self._validate_version()
|
||||
|
||||
self._check_database_existence()
|
||||
|
||||
self._create_database_postgres()
|
||||
|
||||
self._apply_database_schema(self.sql_commands, self.schema_major, self.schema_minor)
|
||||
|
||||
self.logger.info(
|
||||
"Successfully created the database {}".format(self.dbname))
|
||||
|
||||
def update_schema(self):
|
||||
self._validate_version()
|
||||
schema_major, schema_minor, _ = get_schema(self.schemafilepath)
|
||||
meta_oid, current_major, current_minor = self._get_database_schema_version()
|
||||
|
||||
while not (schema_major == current_major and schema_minor == current_minor):
|
||||
current_minor = self._update_schema_minors(current_major, current_minor, meta_oid)
|
||||
current_major, current_minor = self._update_schema_major(current_major, current_minor, meta_oid)
|
||||
msg = "Database schema update of '{}' to v{}.{} complete"
|
||||
self.logger.info(msg.format(self.dbname, schema_major, schema_minor))
|
||||
|
||||
def _update_schema_minors(self, major, minor, meta_oid):
|
||||
# Upgrade all available minor versions
|
||||
while True:
|
||||
minor += 1
|
||||
schema_update = os.path.join(POSTGRES_SCHEMA_DIR,
|
||||
self.schemaupdatefilepath.format(major, minor))
|
||||
if not os.path.exists(schema_update):
|
||||
break
|
||||
|
||||
_, _, sql_commands = get_schema(schema_update)
|
||||
self._apply_database_schema(sql_commands, major, minor, meta_oid)
|
||||
msg = "Updated the database schema to v{}.{}"
|
||||
self.logger.debug(msg.format(major, minor))
|
||||
|
||||
# Return last existing update file version
|
||||
return minor - 1
|
||||
|
||||
def _update_schema_major(self, current_major, current_minor, meta_oid):
|
||||
current_major += 1
|
||||
schema_update = os.path.join(POSTGRES_SCHEMA_DIR,
|
||||
self.schemaupdatefilepath.format(current_major, 0))
|
||||
if not os.path.exists(schema_update):
|
||||
return (current_major - 1, current_minor)
|
||||
|
||||
# Reset minor to 0 with major version bump
|
||||
current_minor = 0
|
||||
_, _, sql_commands = get_schema(schema_update)
|
||||
self._apply_database_schema(sql_commands, current_major, current_minor, meta_oid)
|
||||
msg = "Updated the database schema to v{}.{}"
|
||||
self.logger.debug(msg.format(current_major, current_minor))
|
||||
return (current_major, current_minor)
|
||||
|
||||
def _validate_version(self):
|
||||
conn = connect(user=self.username,
|
||||
password=self.password, host=self.postgres_host, port=self.postgres_port)
|
||||
if conn.server_version < 90400:
|
||||
msg = 'Postgres version too low. Please ensure that you are using atleast v9.4'
|
||||
raise CommandError(msg)
|
||||
|
||||
def _get_database_schema_version(self):
|
||||
conn = connect(dbname=self.dbname, user=self.username,
|
||||
password=self.password, host=self.postgres_host, port=self.postgres_port)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('''SELECT
|
||||
DatabaseMeta.oid,
|
||||
DatabaseMeta.schema_major,
|
||||
DatabaseMeta.schema_minor
|
||||
FROM
|
||||
DatabaseMeta;''')
|
||||
return cursor.fetchone()
|
||||
|
||||
def _check_database_existence(self):
|
||||
try:
|
||||
connect(dbname=self.dbname, user=self.username,
|
||||
password=self.password, host=self.postgres_host, port=self.postgres_port)
|
||||
except OperationalError as e:
|
||||
# Expect an operational error (database's non-existence)
|
||||
if not re.compile('FATAL: database ".*" does not exist').match(str(e)):
|
||||
raise e
|
||||
else:
|
||||
if not self.force:
|
||||
raise CommandError(
|
||||
"Database {} already exists. ".format(self.dbname)
|
||||
+ "Please specify the -f flag to create it from afresh."
|
||||
)
|
||||
|
||||
def _create_database_postgres(self):
|
||||
conn = connect(dbname='postgres', user=self.username,
|
||||
password=self.password, host=self.postgres_host, port=self.postgres_port)
|
||||
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('DROP DATABASE IF EXISTS ' + self.dbname)
|
||||
cursor.execute('CREATE DATABASE ' + self.dbname)
|
||||
conn.commit()
|
||||
cursor.close()
|
||||
conn.close()
|
||||
|
||||
def _apply_database_schema(self, sql_commands, schema_major, schema_minor, meta_uuid=None):
|
||||
conn = connect(dbname=self.dbname, user=self.username,
|
||||
password=self.password, host=self.postgres_host, port=self.postgres_port)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(sql_commands)
|
||||
|
||||
if not meta_uuid:
|
||||
extras.register_uuid()
|
||||
meta_uuid = uuid.uuid4()
|
||||
cursor.execute("INSERT INTO DatabaseMeta VALUES (%s, %s, %s)",
|
||||
(meta_uuid,
|
||||
schema_major,
|
||||
schema_minor
|
||||
))
|
||||
else:
|
||||
cursor.execute("UPDATE DatabaseMeta SET schema_major = %s, schema_minor = %s WHERE oid = %s;",
|
||||
(schema_major,
|
||||
schema_minor,
|
||||
meta_uuid
|
||||
))
|
||||
|
||||
conn.commit()
|
||||
cursor.close()
|
||||
conn.close()
|
||||
|
||||
def _update_configuration_file(self, config):
|
||||
''' Update the user configuration file with the newly created database's
|
||||
configuration.
|
||||
'''
|
||||
config['postgres'] = OrderedDict(
|
||||
[('host', self.postgres_host), ('port', self.postgres_port),
|
||||
('dbname', self.dbname), ('username', self.username), ('password', self.password)])
|
||||
with open(self.config_file, 'w+') as config_file:
|
||||
yaml.dump(config, config_file)
|
||||
|
||||
def _parse_args(self, args):
|
||||
self.postgres_host = args.postgres_host
|
||||
self.postgres_port = args.postgres_port
|
||||
self.username = args.username
|
||||
self.password = args.password
|
||||
self.dbname = args.dbname
|
||||
self.config_file = args.config_file
|
||||
self.force = args.force
|
||||
|
||||
|
||||
class CreateAgendaSubcommand(SubCommand):
|
||||
|
||||
name = 'agenda'
|
||||
@ -51,6 +354,7 @@ class CreateAgendaSubcommand(SubCommand):
|
||||
self.parser.add_argument('-o', '--output', metavar='FILE',
|
||||
help='Output file. If not specfied, STDOUT will be used instead.')
|
||||
|
||||
# pylint: disable=too-many-branches
|
||||
def execute(self, state, args):
|
||||
agenda = OrderedDict()
|
||||
agenda['config'] = OrderedDict(augmentations=[], iterations=args.iterations)
|
||||
@ -71,7 +375,15 @@ class CreateAgendaSubcommand(SubCommand):
|
||||
extcls = pluginloader.get_plugin_class(name)
|
||||
config = pluginloader.get_default_config(name)
|
||||
|
||||
if extcls.kind == 'workload':
|
||||
# Handle special case for EnergyInstrumentBackends
|
||||
if issubclass(extcls, EnergyInstrumentBackend):
|
||||
if 'energy_measurement' not in agenda['config']['augmentations']:
|
||||
energy_config = pluginloader.get_default_config('energy_measurement')
|
||||
agenda['config']['augmentations'].append('energy_measurement')
|
||||
agenda['config']['energy_measurement'] = energy_config
|
||||
agenda['config']['energy_measurement']['instrument'] = extcls.name
|
||||
agenda['config']['energy_measurement']['instrument_parameters'] = config
|
||||
elif extcls.kind == 'workload':
|
||||
entry = OrderedDict()
|
||||
entry['name'] = extcls.name
|
||||
if name != extcls.name:
|
||||
@ -79,11 +391,12 @@ class CreateAgendaSubcommand(SubCommand):
|
||||
entry['params'] = config
|
||||
agenda['workloads'].append(entry)
|
||||
else:
|
||||
if extcls.kind == 'instrument':
|
||||
agenda['config']['augmentations'].append(name)
|
||||
if extcls.kind == 'output_processor':
|
||||
agenda['config']['augmentations'].append(name)
|
||||
agenda['config'][name] = config
|
||||
if extcls.kind in ('instrument', 'output_processor'):
|
||||
if extcls.name not in agenda['config']['augmentations']:
|
||||
agenda['config']['augmentations'].append(extcls.name)
|
||||
|
||||
if extcls.name not in agenda['config']:
|
||||
agenda['config'][extcls.name] = config
|
||||
|
||||
if args.output:
|
||||
wfh = open(args.output, 'w')
|
||||
@ -104,14 +417,14 @@ class CreateWorkloadSubcommand(SubCommand):
|
||||
self.parser.add_argument('name', metavar='NAME',
|
||||
help='Name of the workload to be created')
|
||||
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
|
||||
help='The location at which the workload will be created. If not specified, ' +
|
||||
'this defaults to "~/.workload_automation/plugins".')
|
||||
help='The location at which the workload will be created. If not specified, '
|
||||
+ 'this defaults to "~/.workload_automation/plugins".')
|
||||
self.parser.add_argument('-f', '--force', action='store_true',
|
||||
help='Create the new workload even if a workload with the specified ' +
|
||||
'name already exists.')
|
||||
help='Create the new workload even if a workload with the specified '
|
||||
+ 'name already exists.')
|
||||
self.parser.add_argument('-k', '--kind', metavar='KIND', default='basic', choices=list(create_funcs.keys()),
|
||||
help='The type of workload to be created. The available options ' +
|
||||
'are: {}'.format(', '.join(list(create_funcs.keys()))))
|
||||
help='The type of workload to be created. The available options '
|
||||
+ 'are: {}'.format(', '.join(list(create_funcs.keys()))))
|
||||
|
||||
def execute(self, state, args): # pylint: disable=R0201
|
||||
where = args.path or 'local'
|
||||
@ -134,8 +447,8 @@ class CreatePackageSubcommand(SubCommand):
|
||||
self.parser.add_argument('name', metavar='NAME',
|
||||
help='Name of the package to be created')
|
||||
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
|
||||
help='The location at which the new package will be created. If not specified, ' +
|
||||
'current working directory will be used.')
|
||||
help='The location at which the new package will be created. If not specified, '
|
||||
+ 'current working directory will be used.')
|
||||
self.parser.add_argument('-f', '--force', action='store_true',
|
||||
help='Create the new package even if a file or directory with the same name '
|
||||
'already exists at the specified location.')
|
||||
@ -170,6 +483,7 @@ class CreateCommand(ComplexCommand):
|
||||
object-specific arguments.
|
||||
'''
|
||||
subcmd_classes = [
|
||||
CreateDatabaseSubcommand,
|
||||
CreateWorkloadSubcommand,
|
||||
CreateAgendaSubcommand,
|
||||
CreatePackageSubcommand,
|
||||
@ -240,6 +554,7 @@ def create_uiauto_project(path, name):
|
||||
wfh.write(render_template(os.path.join('uiauto', 'UiAutomation.java'),
|
||||
{'name': name, 'package_name': package_name}))
|
||||
|
||||
|
||||
# Mapping of workload types to their corresponding creation method
|
||||
create_funcs = {
|
||||
'basic': create_template_workload,
|
||||
@ -266,5 +581,5 @@ def get_class_name(name, postfix=''):
|
||||
|
||||
|
||||
def touch(path):
|
||||
with open(path, 'w') as _:
|
||||
with open(path, 'w') as _: # NOQA
|
||||
pass
|
||||
|
201
wa/commands/postgres_schemas/postgres_schema.sql
Normal file
201
wa/commands/postgres_schemas/postgres_schema.sql
Normal file
@ -0,0 +1,201 @@
|
||||
--!VERSION!1.6!ENDVERSION!
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
CREATE EXTENSION IF NOT EXISTS "lo";
|
||||
|
||||
-- In future, it may be useful to implement rules on which Parameter oid fields can be none depeendent on the value in the type column;
|
||||
|
||||
DROP TABLE IF EXISTS DatabaseMeta;
|
||||
DROP TABLE IF EXISTS Parameters;
|
||||
DROP TABLE IF EXISTS Classifiers;
|
||||
DROP TABLE IF EXISTS LargeObjects;
|
||||
DROP TABLE IF EXISTS Artifacts;
|
||||
DROP TABLE IF EXISTS Metrics;
|
||||
DROP TABLE IF EXISTS Augmentations;
|
||||
DROP TABLE IF EXISTS Jobs_Augs;
|
||||
DROP TABLE IF EXISTS ResourceGetters;
|
||||
DROP TABLE IF EXISTS Resource_Getters;
|
||||
DROP TABLE IF EXISTS Events;
|
||||
DROP TABLE IF EXISTS Targets;
|
||||
DROP TABLE IF EXISTS Jobs;
|
||||
DROP TABLE IF EXISTS Runs;
|
||||
|
||||
DROP TYPE IF EXISTS status_enum;
|
||||
DROP TYPE IF EXISTS param_enum;
|
||||
|
||||
CREATE TYPE status_enum AS ENUM ('UNKNOWN(0)','NEW(1)','PENDING(2)','STARTED(3)','CONNECTED(4)', 'INITIALIZED(5)', 'RUNNING(6)', 'OK(7)', 'PARTIAL(8)', 'FAILED(9)', 'ABORTED(10)', 'SKIPPED(11)');
|
||||
|
||||
CREATE TYPE param_enum AS ENUM ('workload', 'resource_getter', 'augmentation', 'device', 'runtime', 'boot');
|
||||
|
||||
-- In future, it might be useful to create an ENUM type for the artifact kind, or simply a generic enum type;
|
||||
|
||||
CREATE TABLE DatabaseMeta (
|
||||
oid uuid NOT NULL,
|
||||
schema_major int,
|
||||
schema_minor int,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Runs (
|
||||
oid uuid NOT NULL,
|
||||
event_summary text,
|
||||
basepath text,
|
||||
status status_enum,
|
||||
timestamp timestamp,
|
||||
run_name text,
|
||||
project text,
|
||||
project_stage text,
|
||||
retry_on_status status_enum[],
|
||||
max_retries int,
|
||||
bail_on_init_failure boolean,
|
||||
allow_phone_home boolean,
|
||||
run_uuid uuid,
|
||||
start_time timestamp,
|
||||
end_time timestamp,
|
||||
duration float,
|
||||
metadata jsonb,
|
||||
_pod_version int,
|
||||
_pod_serialization_version int,
|
||||
state jsonb,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Jobs (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
status status_enum,
|
||||
retry int,
|
||||
label text,
|
||||
job_id text,
|
||||
iterations int,
|
||||
workload_name text,
|
||||
metadata jsonb,
|
||||
_pod_version int,
|
||||
_pod_serialization_version int,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Targets (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
target text,
|
||||
modules text[],
|
||||
cpus text[],
|
||||
os text,
|
||||
os_version jsonb,
|
||||
hostid bigint,
|
||||
hostname text,
|
||||
abi text,
|
||||
is_rooted boolean,
|
||||
kernel_version text,
|
||||
kernel_release text,
|
||||
kernel_sha1 text,
|
||||
kernel_config text[],
|
||||
sched_features text[],
|
||||
page_size_kb int,
|
||||
screen_resolution int[],
|
||||
prop json,
|
||||
android_id text,
|
||||
_pod_version int,
|
||||
_pod_serialization_version int,
|
||||
system_id text,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Events (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
timestamp timestamp,
|
||||
message text,
|
||||
_pod_version int,
|
||||
_pod_serialization_version int,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Resource_Getters (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
name text,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Augmentations (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
name text,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Jobs_Augs (
|
||||
oid uuid NOT NULL,
|
||||
job_oid uuid NOT NULL references Jobs(oid) ON DELETE CASCADE,
|
||||
augmentation_oid uuid NOT NULL references Augmentations(oid) ON DELETE CASCADE,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Metrics (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
name text,
|
||||
value double precision,
|
||||
units text,
|
||||
lower_is_better boolean,
|
||||
_pod_version int,
|
||||
_pod_serialization_version int,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE LargeObjects (
|
||||
oid uuid NOT NULL,
|
||||
lo_oid lo NOT NULL,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
-- Trigger that allows you to manage large objects from the LO table directly;
|
||||
CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON LargeObjects
|
||||
FOR EACH ROW EXECUTE PROCEDURE lo_manage(lo_oid);
|
||||
|
||||
CREATE TABLE Artifacts (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
name text,
|
||||
large_object_uuid uuid NOT NULL references LargeObjects(oid),
|
||||
description text,
|
||||
kind text,
|
||||
_pod_version int,
|
||||
_pod_serialization_version int,
|
||||
is_dir boolean,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE RULE del_lo AS
|
||||
ON DELETE TO Artifacts
|
||||
DO DELETE FROM LargeObjects
|
||||
WHERE LargeObjects.oid = old.large_object_uuid
|
||||
;
|
||||
|
||||
CREATE TABLE Classifiers (
|
||||
oid uuid NOT NULL,
|
||||
artifact_oid uuid references Artifacts(oid) ON DELETE CASCADE,
|
||||
metric_oid uuid references Metrics(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid) ON DELETE CASCADE,
|
||||
run_oid uuid references Runs(oid) ON DELETE CASCADE,
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Parameters (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
augmentation_oid uuid references Augmentations(oid),
|
||||
resource_getter_oid uuid references Resource_Getters(oid),
|
||||
name text,
|
||||
value text,
|
||||
value_type text,
|
||||
type param_enum,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
30
wa/commands/postgres_schemas/postgres_schema_update_v1.2.sql
Normal file
30
wa/commands/postgres_schemas/postgres_schema_update_v1.2.sql
Normal file
@ -0,0 +1,30 @@
|
||||
ALTER TABLE resourcegetters RENAME TO resource_getters;
|
||||
|
||||
ALTER TABLE classifiers ADD COLUMN job_oid uuid references Jobs(oid);
|
||||
ALTER TABLE classifiers ADD COLUMN run_oid uuid references Runs(oid);
|
||||
|
||||
ALTER TABLE targets ADD COLUMN page_size_kb int;
|
||||
ALTER TABLE targets ADD COLUMN screen_resolution int[];
|
||||
ALTER TABLE targets ADD COLUMN prop text;
|
||||
ALTER TABLE targets ADD COLUMN android_id text;
|
||||
ALTER TABLE targets ADD COLUMN _pod_version int;
|
||||
ALTER TABLE targets ADD COLUMN _pod_serialization_version int;
|
||||
|
||||
ALTER TABLE jobs RENAME COLUMN retries TO retry;
|
||||
ALTER TABLE jobs ADD COLUMN _pod_version int;
|
||||
ALTER TABLE jobs ADD COLUMN _pod_serialization_version int;
|
||||
|
||||
ALTER TABLE runs ADD COLUMN project_stage text;
|
||||
ALTER TABLE runs ADD COLUMN state jsonb;
|
||||
ALTER TABLE runs ADD COLUMN duration float;
|
||||
ALTER TABLE runs ADD COLUMN _pod_version int;
|
||||
ALTER TABLE runs ADD COLUMN _pod_serialization_version int;
|
||||
|
||||
ALTER TABLE artifacts ADD COLUMN _pod_version int;
|
||||
ALTER TABLE artifacts ADD COLUMN _pod_serialization_version int;
|
||||
|
||||
ALTER TABLE events ADD COLUMN _pod_version int;
|
||||
ALTER TABLE events ADD COLUMN _pod_serialization_version int;
|
||||
|
||||
ALTER TABLE metrics ADD COLUMN _pod_version int;
|
||||
ALTER TABLE metrics ADD COLUMN _pod_serialization_version int;
|
@ -0,0 +1,3 @@
|
||||
ALTER TABLE targets ADD COLUMN system_id text;
|
||||
|
||||
ALTER TABLE artifacts ADD COLUMN is_dir boolean;
|
@ -0,0 +1,2 @@
|
||||
ALTER TABLE targets ADD COLUMN modules text[];
|
||||
|
@ -0,0 +1 @@
|
||||
ALTER TABLE targets ALTER hostid TYPE BIGINT;
|
109
wa/commands/postgres_schemas/postgres_schema_update_v1.6.sql
Normal file
109
wa/commands/postgres_schemas/postgres_schema_update_v1.6.sql
Normal file
@ -0,0 +1,109 @@
|
||||
ALTER TABLE jobs
|
||||
DROP CONSTRAINT jobs_run_oid_fkey,
|
||||
ADD CONSTRAINT jobs_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE targets
|
||||
DROP CONSTRAINT targets_run_oid_fkey,
|
||||
ADD CONSTRAINT targets_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE events
|
||||
DROP CONSTRAINT events_run_oid_fkey,
|
||||
ADD CONSTRAINT events_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE resource_getters
|
||||
DROP CONSTRAINT resource_getters_run_oid_fkey,
|
||||
ADD CONSTRAINT resource_getters_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE augmentations
|
||||
DROP CONSTRAINT augmentations_run_oid_fkey,
|
||||
ADD CONSTRAINT augmentations_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE jobs_augs
|
||||
DROP CONSTRAINT jobs_augs_job_oid_fkey,
|
||||
DROP CONSTRAINT jobs_augs_augmentation_oid_fkey,
|
||||
ADD CONSTRAINT jobs_augs_job_oid_fkey
|
||||
FOREIGN KEY (job_oid)
|
||||
REFERENCES Jobs(oid)
|
||||
ON DELETE CASCADE,
|
||||
ADD CONSTRAINT jobs_augs_augmentation_oid_fkey
|
||||
FOREIGN KEY (augmentation_oid)
|
||||
REFERENCES Augmentations(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE metrics
|
||||
DROP CONSTRAINT metrics_run_oid_fkey,
|
||||
ADD CONSTRAINT metrics_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE artifacts
|
||||
DROP CONSTRAINT artifacts_run_oid_fkey,
|
||||
ADD CONSTRAINT artifacts_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
CREATE RULE del_lo AS
|
||||
ON DELETE TO Artifacts
|
||||
DO DELETE FROM LargeObjects
|
||||
WHERE LargeObjects.oid = old.large_object_uuid
|
||||
;
|
||||
|
||||
ALTER TABLE classifiers
|
||||
DROP CONSTRAINT classifiers_artifact_oid_fkey,
|
||||
DROP CONSTRAINT classifiers_metric_oid_fkey,
|
||||
DROP CONSTRAINT classifiers_job_oid_fkey,
|
||||
DROP CONSTRAINT classifiers_run_oid_fkey,
|
||||
|
||||
ADD CONSTRAINT classifiers_artifact_oid_fkey
|
||||
FOREIGN KEY (artifact_oid)
|
||||
REFERENCES artifacts(oid)
|
||||
ON DELETE CASCADE,
|
||||
|
||||
ADD CONSTRAINT classifiers_metric_oid_fkey
|
||||
FOREIGN KEY (metric_oid)
|
||||
REFERENCES metrics(oid)
|
||||
ON DELETE CASCADE,
|
||||
|
||||
ADD CONSTRAINT classifiers_job_oid_fkey
|
||||
FOREIGN KEY (job_oid)
|
||||
REFERENCES jobs(oid)
|
||||
ON DELETE CASCADE,
|
||||
|
||||
ADD CONSTRAINT classifiers_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE parameters
|
||||
DROP CONSTRAINT parameters_run_oid_fkey,
|
||||
ADD CONSTRAINT parameters_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
@ -17,6 +17,7 @@ import os
|
||||
|
||||
from wa import Command
|
||||
from wa import discover_wa_outputs
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.framework.exception import CommandError
|
||||
from wa.framework.output import RunOutput
|
||||
from wa.framework.output_processor import ProcessorManager
|
||||
@ -30,6 +31,9 @@ class ProcessContext(object):
|
||||
self.target_info = None
|
||||
self.job_output = None
|
||||
|
||||
def add_augmentation(self, aug):
|
||||
pass
|
||||
|
||||
|
||||
class ProcessCommand(Command):
|
||||
|
||||
@ -54,8 +58,9 @@ class ProcessCommand(Command):
|
||||
""")
|
||||
self.parser.add_argument('-f', '--force', action='store_true',
|
||||
help="""
|
||||
Run processors that have already been
|
||||
run. By default these will be skipped.
|
||||
Run processors that have already been run. By
|
||||
default these will be skipped. Also, forces
|
||||
processing of in-progress runs.
|
||||
""")
|
||||
self.parser.add_argument('-r', '--recursive', action='store_true',
|
||||
help="""
|
||||
@ -64,7 +69,7 @@ class ProcessCommand(Command):
|
||||
instead of just processing the root.
|
||||
""")
|
||||
|
||||
def execute(self, config, args):
|
||||
def execute(self, config, args): # pylint: disable=arguments-differ,too-many-branches,too-many-statements
|
||||
process_directory = os.path.expandvars(args.directory)
|
||||
self.logger.debug('Using process directory: {}'.format(process_directory))
|
||||
if not os.path.exists(process_directory):
|
||||
@ -73,10 +78,18 @@ class ProcessCommand(Command):
|
||||
if not args.recursive:
|
||||
output_list = [RunOutput(process_directory)]
|
||||
else:
|
||||
output_list = [output for output in discover_wa_outputs(process_directory)]
|
||||
output_list = list(discover_wa_outputs(process_directory))
|
||||
|
||||
pc = ProcessContext()
|
||||
for run_output in output_list:
|
||||
if run_output.status < Status.OK and not args.force:
|
||||
msg = 'Skipping {} as it has not completed -- {}'
|
||||
self.logger.info(msg.format(run_output.basepath, run_output.status))
|
||||
continue
|
||||
|
||||
pc.run_output = run_output
|
||||
pc.target_info = run_output.target_info
|
||||
|
||||
if not args.recursive:
|
||||
self.logger.info('Installing output processors')
|
||||
else:
|
||||
@ -92,7 +105,7 @@ class ProcessCommand(Command):
|
||||
|
||||
pm = ProcessorManager(loader=config.plugin_cache)
|
||||
for proc in config.get_processors():
|
||||
pm.install(proc, None)
|
||||
pm.install(proc, pc)
|
||||
if args.additional_processors:
|
||||
for proc in args.additional_processors:
|
||||
# Do not add any processors that are already present since
|
||||
@ -100,14 +113,18 @@ class ProcessCommand(Command):
|
||||
try:
|
||||
pm.get_output_processor(proc)
|
||||
except ValueError:
|
||||
pm.install(proc, None)
|
||||
pm.install(proc, pc)
|
||||
|
||||
pm.validate()
|
||||
pm.initialize()
|
||||
pm.initialize(pc)
|
||||
|
||||
pc.run_output = run_output
|
||||
pc.target_info = run_output.target_info
|
||||
for job_output in run_output.jobs:
|
||||
if job_output.status < Status.OK or job_output.status in [Status.SKIPPED, Status.ABORTED]:
|
||||
msg = 'Skipping job {} {} iteration {} -- {}'
|
||||
self.logger.info(msg.format(job_output.id, job_output.label,
|
||||
job_output.iteration, job_output.status))
|
||||
continue
|
||||
|
||||
pc.job_output = job_output
|
||||
pm.enable_all()
|
||||
if not args.force:
|
||||
@ -136,7 +153,8 @@ class ProcessCommand(Command):
|
||||
self.logger.info('Processing run')
|
||||
pm.process_run_output(pc)
|
||||
pm.export_run_output(pc)
|
||||
pm.finalize()
|
||||
pm.finalize(pc)
|
||||
|
||||
run_output.write_info()
|
||||
run_output.write_result()
|
||||
self.logger.info('Done.')
|
||||
|
288
wa/commands/report.py
Normal file
288
wa/commands/report.py
Normal file
@ -0,0 +1,288 @@
|
||||
from collections import Counter
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
import os
|
||||
|
||||
from wa import Command, settings
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.framework.output import RunOutput, discover_wa_outputs
|
||||
from wa.utils.doc import underline
|
||||
from wa.utils.log import COLOR_MAP, RESET_COLOR
|
||||
from wa.utils.terminalsize import get_terminal_size
|
||||
|
||||
|
||||
class ReportCommand(Command):
|
||||
|
||||
name = 'report'
|
||||
description = '''
|
||||
Monitor an ongoing run and provide information on its progress.
|
||||
|
||||
Specify the output directory of the run you would like the monitor;
|
||||
alternatively report will attempt to discover wa output directories
|
||||
within the current directory. The output includes run information such as
|
||||
the UUID, start time, duration, project name and a short summary of the
|
||||
run's progress (number of completed jobs, the number of jobs in each
|
||||
different status).
|
||||
|
||||
If verbose output is specified, the output includes a list of all events
|
||||
labelled as not specific to any job, followed by a list of the jobs in the
|
||||
order executed, with their retries (if any), current status and, if the job
|
||||
is finished, a list of events that occurred during that job's execution.
|
||||
|
||||
This is an example of a job status line:
|
||||
|
||||
wk1 (exoplayer) [1] - 2, PARTIAL
|
||||
|
||||
It contains two entries delimited by a comma: the job's descriptor followed
|
||||
by its completion status (``PARTIAL``, in this case). The descriptor
|
||||
consists of the following elements:
|
||||
|
||||
- the job ID (``wk1``)
|
||||
- the job label (which defaults to the workload name) in parentheses
|
||||
- job iteration number in square brakets (``1`` in this case)
|
||||
- a hyphen followed by the retry attempt number.
|
||||
(note: this will only be shown if the job has been retried as least
|
||||
once. If the job has not yet run, or if it completed on the first
|
||||
attempt, the hyphen and retry count -- which in that case would be
|
||||
zero -- will not appear).
|
||||
'''
|
||||
|
||||
def initialize(self, context):
|
||||
self.parser.add_argument('-d', '--directory',
|
||||
help='''
|
||||
Specify the WA output path. report will
|
||||
otherwise attempt to discover output
|
||||
directories in the current directory.
|
||||
''')
|
||||
|
||||
def execute(self, state, args):
|
||||
if args.directory:
|
||||
output_path = args.directory
|
||||
run_output = RunOutput(output_path)
|
||||
else:
|
||||
possible_outputs = list(discover_wa_outputs(os.getcwd()))
|
||||
num_paths = len(possible_outputs)
|
||||
|
||||
if num_paths > 1:
|
||||
print('More than one possible output directory found,'
|
||||
' please choose a path from the following:'
|
||||
)
|
||||
|
||||
for i in range(num_paths):
|
||||
print("{}: {}".format(i, possible_outputs[i].basepath))
|
||||
|
||||
while True:
|
||||
try:
|
||||
select = int(input())
|
||||
except ValueError:
|
||||
print("Please select a valid path number")
|
||||
continue
|
||||
|
||||
if select not in range(num_paths):
|
||||
print("Please select a valid path number")
|
||||
continue
|
||||
break
|
||||
|
||||
run_output = possible_outputs[select]
|
||||
|
||||
else:
|
||||
run_output = possible_outputs[0]
|
||||
|
||||
rm = RunMonitor(run_output)
|
||||
print(rm.generate_output(args.verbose))
|
||||
|
||||
|
||||
class RunMonitor:
|
||||
|
||||
@property
|
||||
def elapsed_time(self):
|
||||
if self._elapsed is None:
|
||||
if self.ro.info.duration is None:
|
||||
self._elapsed = datetime.utcnow() - self.ro.info.start_time
|
||||
else:
|
||||
self._elapsed = self.ro.info.duration
|
||||
return self._elapsed
|
||||
|
||||
@property
|
||||
def job_outputs(self):
|
||||
if self._job_outputs is None:
|
||||
self._job_outputs = {
|
||||
(j_o.id, j_o.label, j_o.iteration): j_o for j_o in self.ro.jobs
|
||||
}
|
||||
return self._job_outputs
|
||||
|
||||
@property
|
||||
def projected_duration(self):
|
||||
elapsed = self.elapsed_time.total_seconds()
|
||||
proj = timedelta(seconds=elapsed * (len(self.jobs) / len(self.segmented['finished'])))
|
||||
return proj - self.elapsed_time
|
||||
|
||||
def __init__(self, ro):
|
||||
self.ro = ro
|
||||
self._elapsed = None
|
||||
self._p_duration = None
|
||||
self._job_outputs = None
|
||||
self._termwidth = None
|
||||
self._fmt = _simple_formatter()
|
||||
self.get_data()
|
||||
|
||||
def get_data(self):
|
||||
self.jobs = [state for label_id, state in self.ro.state.jobs.items()]
|
||||
if self.jobs:
|
||||
rc = self.ro.run_config
|
||||
self.segmented = segment_jobs_by_state(self.jobs,
|
||||
rc.max_retries,
|
||||
rc.retry_on_status
|
||||
)
|
||||
|
||||
def generate_run_header(self):
|
||||
info = self.ro.info
|
||||
|
||||
header = underline('Run Info')
|
||||
header += "UUID: {}\n".format(info.uuid)
|
||||
if info.run_name:
|
||||
header += "Run name: {}\n".format(info.run_name)
|
||||
if info.project:
|
||||
header += "Project: {}\n".format(info.project)
|
||||
if info.project_stage:
|
||||
header += "Project stage: {}\n".format(info.project_stage)
|
||||
|
||||
if info.start_time:
|
||||
duration = _seconds_as_smh(self.elapsed_time.total_seconds())
|
||||
header += ("Start time: {}\n"
|
||||
"Duration: {:02}:{:02}:{:02}\n"
|
||||
).format(info.start_time,
|
||||
duration[2], duration[1], duration[0],
|
||||
)
|
||||
if self.segmented['finished'] and not info.end_time:
|
||||
p_duration = _seconds_as_smh(self.projected_duration.total_seconds())
|
||||
header += "Projected time remaining: {:02}:{:02}:{:02}\n".format(
|
||||
p_duration[2], p_duration[1], p_duration[0]
|
||||
)
|
||||
|
||||
elif self.ro.info.end_time:
|
||||
header += "End time: {}\n".format(info.end_time)
|
||||
|
||||
return header + '\n'
|
||||
|
||||
def generate_job_summary(self):
|
||||
total = len(self.jobs)
|
||||
num_fin = len(self.segmented['finished'])
|
||||
|
||||
summary = underline('Job Summary')
|
||||
summary += 'Total: {}, Completed: {} ({}%)\n'.format(
|
||||
total, num_fin, (num_fin / total) * 100
|
||||
) if total > 0 else 'No jobs created\n'
|
||||
|
||||
ctr = Counter()
|
||||
for run_state, jobs in ((k, v) for k, v in self.segmented.items() if v):
|
||||
if run_state == 'finished':
|
||||
ctr.update([job.status.name.lower() for job in jobs])
|
||||
else:
|
||||
ctr[run_state] += len(jobs)
|
||||
|
||||
return summary + ', '.join(
|
||||
[str(count) + ' ' + self._fmt.highlight_keyword(status) for status, count in ctr.items()]
|
||||
) + '\n\n'
|
||||
|
||||
def generate_job_detail(self):
|
||||
detail = underline('Job Detail')
|
||||
for job in self.jobs:
|
||||
detail += ('{} ({}) [{}]{}, {}\n').format(
|
||||
job.id,
|
||||
job.label,
|
||||
job.iteration,
|
||||
' - ' + str(job.retries)if job.retries else '',
|
||||
self._fmt.highlight_keyword(str(job.status))
|
||||
)
|
||||
|
||||
job_output = self.job_outputs[(job.id, job.label, job.iteration)]
|
||||
for event in job_output.events:
|
||||
detail += self._fmt.fit_term_width(
|
||||
'\t{}\n'.format(event.summary)
|
||||
)
|
||||
return detail
|
||||
|
||||
def generate_run_detail(self):
|
||||
detail = underline('Run Events') if self.ro.events else ''
|
||||
|
||||
for event in self.ro.events:
|
||||
detail += '{}\n'.format(event.summary)
|
||||
|
||||
return detail + '\n'
|
||||
|
||||
def generate_output(self, verbose):
|
||||
if not self.jobs:
|
||||
return 'No jobs found in output directory\n'
|
||||
|
||||
output = self.generate_run_header()
|
||||
output += self.generate_job_summary()
|
||||
|
||||
if verbose:
|
||||
output += self.generate_run_detail()
|
||||
output += self.generate_job_detail()
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def _seconds_as_smh(seconds):
|
||||
seconds = int(seconds)
|
||||
hours = seconds // 3600
|
||||
minutes = (seconds % 3600) // 60
|
||||
seconds = seconds % 60
|
||||
return seconds, minutes, hours
|
||||
|
||||
|
||||
def segment_jobs_by_state(jobstates, max_retries, retry_status):
|
||||
finished_states = [
|
||||
Status.PARTIAL, Status.FAILED,
|
||||
Status.ABORTED, Status.OK, Status.SKIPPED
|
||||
]
|
||||
|
||||
segmented = {
|
||||
'finished': [], 'other': [], 'running': [],
|
||||
'pending': [], 'uninitialized': []
|
||||
}
|
||||
|
||||
for jobstate in jobstates:
|
||||
if (jobstate.status in retry_status) and jobstate.retries < max_retries:
|
||||
segmented['running'].append(jobstate)
|
||||
elif jobstate.status in finished_states:
|
||||
segmented['finished'].append(jobstate)
|
||||
elif jobstate.status == Status.RUNNING:
|
||||
segmented['running'].append(jobstate)
|
||||
elif jobstate.status == Status.PENDING:
|
||||
segmented['pending'].append(jobstate)
|
||||
elif jobstate.status == Status.NEW:
|
||||
segmented['uninitialized'].append(jobstate)
|
||||
else:
|
||||
segmented['other'].append(jobstate)
|
||||
|
||||
return segmented
|
||||
|
||||
|
||||
class _simple_formatter:
|
||||
color_map = {
|
||||
'running': COLOR_MAP[logging.INFO],
|
||||
'partial': COLOR_MAP[logging.WARNING],
|
||||
'failed': COLOR_MAP[logging.CRITICAL],
|
||||
'aborted': COLOR_MAP[logging.ERROR]
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.termwidth = get_terminal_size()[0]
|
||||
self.color = settings.logging['color']
|
||||
|
||||
def fit_term_width(self, text):
|
||||
text = text.expandtabs()
|
||||
if len(text) <= self.termwidth:
|
||||
return text
|
||||
else:
|
||||
return text[0:self.termwidth - 4] + " ...\n"
|
||||
|
||||
def highlight_keyword(self, kw):
|
||||
if not self.color or kw not in _simple_formatter.color_map:
|
||||
return kw
|
||||
|
||||
color = _simple_formatter.color_map[kw.lower()]
|
||||
return '{}{}{}'.format(color, kw, RESET_COLOR)
|
@ -25,10 +25,6 @@ from wa.framework.target.manager import TargetManager
|
||||
from wa.utils.revent import ReventRecorder
|
||||
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
raw_input = input # pylint: disable=redefined-builtin
|
||||
|
||||
|
||||
class RecordCommand(Command):
|
||||
|
||||
name = 'record'
|
||||
@ -96,11 +92,11 @@ class RecordCommand(Command):
|
||||
if args.workload and args.output:
|
||||
self.logger.error("Output file cannot be specified with Workload")
|
||||
sys.exit()
|
||||
if not args.workload and (args.setup or args.extract_results or
|
||||
args.teardown or args.all):
|
||||
if not args.workload and (args.setup or args.extract_results
|
||||
or args.teardown or args.all):
|
||||
self.logger.error("Cannot specify a recording stage without a Workload")
|
||||
sys.exit()
|
||||
if not (args.all or args.teardown or args.extract_results or args.run or args.setup):
|
||||
if args.workload and not any([args.all, args.teardown, args.extract_results, args.run, args.setup]):
|
||||
self.logger.error("Please specify which workload stages you wish to record")
|
||||
sys.exit()
|
||||
|
||||
@ -120,6 +116,7 @@ class RecordCommand(Command):
|
||||
outdir = os.getcwd()
|
||||
|
||||
self.tm = TargetManager(device, device_config, outdir)
|
||||
self.tm.initialize()
|
||||
self.target = self.tm.target
|
||||
self.revent_recorder = ReventRecorder(self.target)
|
||||
self.revent_recorder.deploy()
|
||||
@ -136,11 +133,11 @@ class RecordCommand(Command):
|
||||
def record(self, revent_file, name, output_path):
|
||||
msg = 'Press Enter when you are ready to record {}...'
|
||||
self.logger.info(msg.format(name))
|
||||
raw_input('')
|
||||
input('')
|
||||
self.revent_recorder.start_record(revent_file)
|
||||
msg = 'Press Enter when you have finished recording {}...'
|
||||
self.logger.info(msg.format(name))
|
||||
raw_input('')
|
||||
input('')
|
||||
self.revent_recorder.stop_record()
|
||||
|
||||
if not os.path.isdir(output_path):
|
||||
@ -261,6 +258,7 @@ class ReplayCommand(Command):
|
||||
device_config = state.run_config.device_config or {}
|
||||
|
||||
target_manager = TargetManager(device, device_config, None)
|
||||
target_manager.initialize()
|
||||
self.target = target_manager.target
|
||||
revent_file = self.target.path.join(self.target.working_directory,
|
||||
os.path.split(args.recording)[1])
|
||||
|
@ -84,7 +84,7 @@ class RunCommand(Command):
|
||||
be specified multiple times.
|
||||
""")
|
||||
|
||||
def execute(self, config, args):
|
||||
def execute(self, config, args): # pylint: disable=arguments-differ
|
||||
output = self.set_up_output_directory(config, args)
|
||||
log.add_file(output.logfile)
|
||||
output.add_artifact('runlog', output.logfile, kind='log',
|
||||
@ -97,8 +97,10 @@ class RunCommand(Command):
|
||||
|
||||
parser = AgendaParser()
|
||||
if os.path.isfile(args.agenda):
|
||||
parser.load_from_path(config, args.agenda)
|
||||
includes = parser.load_from_path(config, args.agenda)
|
||||
shutil.copy(args.agenda, output.raw_config_dir)
|
||||
for inc in includes:
|
||||
shutil.copy(inc, output.raw_config_dir)
|
||||
else:
|
||||
try:
|
||||
pluginloader.get_plugin_class(args.agenda, kind='workload')
|
||||
@ -110,6 +112,11 @@ class RunCommand(Command):
|
||||
'by running "wa list workloads".'
|
||||
raise ConfigError(msg.format(args.agenda))
|
||||
|
||||
# Update run info with newly parsed config values
|
||||
output.info.project = config.run_config.project
|
||||
output.info.project_stage = config.run_config.project_stage
|
||||
output.info.run_name = config.run_config.run_name
|
||||
|
||||
executor = Executor()
|
||||
executor.execute(config, output)
|
||||
|
||||
|
28
wa/commands/schema_changelog.rst
Normal file
28
wa/commands/schema_changelog.rst
Normal file
@ -0,0 +1,28 @@
|
||||
# 1
|
||||
## 1.0
|
||||
- First version
|
||||
## 1.1
|
||||
- LargeObjects table added as a substitute for the previous plan to
|
||||
use the filesystem and a path reference to store artifacts. This
|
||||
was done following an extended discussion and tests that verified
|
||||
that the savings in processing power were not enough to warrant
|
||||
the creation of a dedicated server or file handler.
|
||||
## 1.2
|
||||
- Rename the `resourcegetters` table to `resource_getters` for consistency.
|
||||
- Add Job and Run level classifiers.
|
||||
- Add missing android specific properties to targets.
|
||||
- Add new POD meta data to relevant tables.
|
||||
- Correct job column name from `retires` to `retry`.
|
||||
- Add missing run information.
|
||||
## 1.3
|
||||
- Add missing "system_id" field from TargetInfo.
|
||||
- Enable support for uploading Artifact that represent directories.
|
||||
## 1.4
|
||||
- Add "modules" field to TargetInfo to list the modules loaded by the target
|
||||
during the run.
|
||||
## 1.5
|
||||
- Change the type of the "hostid" in TargetInfo from Int to Bigint.
|
||||
## 1.6
|
||||
- Add cascading deletes to most tables to allow easy deletion of a run
|
||||
and its associated data
|
||||
- Add rule to delete associated large object on deletion of artifact
|
@ -21,6 +21,8 @@
|
||||
import sys
|
||||
from subprocess import call, Popen, PIPE
|
||||
|
||||
from devlib.utils.misc import escape_double_quotes
|
||||
|
||||
from wa import Command
|
||||
from wa.framework import pluginloader
|
||||
from wa.framework.configuration.core import MetaConfiguration, RunConfiguration
|
||||
@ -31,8 +33,6 @@ from wa.utils.doc import (strip_inlined_text, get_rst_from_plugin,
|
||||
get_params_rst, underline)
|
||||
from wa.utils.misc import which
|
||||
|
||||
from devlib.utils.misc import escape_double_quotes
|
||||
|
||||
|
||||
class ShowCommand(Command):
|
||||
|
||||
@ -73,11 +73,8 @@ class ShowCommand(Command):
|
||||
|
||||
if which('pandoc'):
|
||||
p = Popen(['pandoc', '-f', 'rst', '-t', 'man'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
if sys.version_info[0] == 3:
|
||||
output, _ = p.communicate(rst_output.encode(sys.stdin.encoding))
|
||||
output = output.decode(sys.stdout.encoding)
|
||||
else:
|
||||
output, _ = p.communicate(rst_output)
|
||||
output, _ = p.communicate(rst_output.encode(sys.stdin.encoding))
|
||||
output = output.decode(sys.stdout.encoding)
|
||||
|
||||
# Make sure to double escape back slashes
|
||||
output = output.replace('\\', '\\\\\\')
|
||||
|
@ -59,7 +59,7 @@ params = dict(
|
||||
'Environment :: Console',
|
||||
'License :: Other/Proprietary License',
|
||||
'Operating System :: Unix',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -1,18 +1,18 @@
|
||||
apply plugin: 'com.android.application'
|
||||
|
||||
android {
|
||||
compileSdkVersion 18
|
||||
buildToolsVersion '25.0.0'
|
||||
compileSdkVersion 28
|
||||
buildToolsVersion '28.0.0'
|
||||
defaultConfig {
|
||||
applicationId "${package_name}"
|
||||
minSdkVersion 18
|
||||
targetSdkVersion 25
|
||||
targetSdkVersion 28
|
||||
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
|
||||
}
|
||||
buildTypes {
|
||||
applicationVariants.all { variant ->
|
||||
variant.outputs.each { output ->
|
||||
output.outputFile = file("$$project.buildDir/apk/${package_name}.apk")
|
||||
output.outputFileName = "${package_name}.apk"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ fi
|
||||
|
||||
# Copy base class library from wlauto dist
|
||||
libs_dir=app/libs
|
||||
base_class=`python -c "import os, wa; print os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar')"`
|
||||
base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"`
|
||||
mkdir -p $$libs_dir
|
||||
cp $$base_class $$libs_dir
|
||||
|
||||
@ -31,8 +31,8 @@ fi
|
||||
|
||||
# If successful move APK file to workload folder (overwrite previous)
|
||||
rm -f ../$package_name
|
||||
if [[ -f app/build/apk/$package_name.apk ]]; then
|
||||
cp app/build/apk/$package_name.apk ../$package_name.apk
|
||||
if [[ -f app/build/outputs/apk/debug/$package_name.apk ]]; then
|
||||
cp app/build/outputs/apk/debug/$package_name.apk ../$package_name.apk
|
||||
else
|
||||
echo 'ERROR: UiAutomator apk could not be found!'
|
||||
exit 9
|
||||
|
@ -3,9 +3,10 @@
|
||||
buildscript {
|
||||
repositories {
|
||||
jcenter()
|
||||
google()
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:2.3.1'
|
||||
classpath 'com.android.tools.build:gradle:7.2.1'
|
||||
|
||||
// NOTE: Do not place your application dependencies here; they belong
|
||||
// in the individual module build.gradle files
|
||||
@ -15,6 +16,7 @@ buildscript {
|
||||
allprojects {
|
||||
repositories {
|
||||
jcenter()
|
||||
google()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-3.3-all.zip
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip
|
||||
|
@ -65,7 +65,6 @@ class SubCommand(object):
|
||||
options to the command's parser). ``context`` is always ``None``.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def execute(self, state, args):
|
||||
"""
|
||||
|
@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import logging
|
||||
from copy import copy, deepcopy
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
@ -22,7 +23,7 @@ from wa.utils import log
|
||||
from wa.utils.misc import (get_article, merge_config_values)
|
||||
from wa.utils.types import (identifier, integer, boolean, list_of_strings,
|
||||
list_of, toggle_set, obj_dict, enum)
|
||||
from wa.utils.serializer import is_pod
|
||||
from wa.utils.serializer import is_pod, Podable
|
||||
|
||||
|
||||
# Mapping for kind conversion; see docs for convert_types below
|
||||
@ -36,6 +37,8 @@ Status = enum(['UNKNOWN', 'NEW', 'PENDING',
|
||||
'STARTED', 'CONNECTED', 'INITIALIZED', 'RUNNING',
|
||||
'OK', 'PARTIAL', 'FAILED', 'ABORTED', 'SKIPPED'])
|
||||
|
||||
logger = logging.getLogger('config')
|
||||
|
||||
|
||||
##########################
|
||||
### CONFIG POINT TYPES ###
|
||||
@ -55,10 +58,11 @@ class RebootPolicy(object):
|
||||
executing the first workload spec.
|
||||
:each_spec: The device will be rebooted before running a new workload spec.
|
||||
:each_iteration: The device will be rebooted before each new iteration.
|
||||
:run_completion: The device will be rebooted after the run has been completed.
|
||||
|
||||
"""
|
||||
|
||||
valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_job']
|
||||
valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_job', 'run_completion']
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
@ -89,6 +93,10 @@ class RebootPolicy(object):
|
||||
def reboot_on_each_spec(self):
|
||||
return self.policy == 'each_spec'
|
||||
|
||||
@property
|
||||
def reboot_on_run_completion(self):
|
||||
return self.policy == 'run_completion'
|
||||
|
||||
def __str__(self):
|
||||
return self.policy
|
||||
|
||||
@ -110,7 +118,9 @@ class status_list(list):
|
||||
list.append(self, str(item).upper())
|
||||
|
||||
|
||||
class LoggingConfig(dict):
|
||||
class LoggingConfig(Podable, dict):
|
||||
|
||||
_pod_serialization_version = 1
|
||||
|
||||
defaults = {
|
||||
'file_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
|
||||
@ -121,9 +131,14 @@ class LoggingConfig(dict):
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
return LoggingConfig(pod)
|
||||
pod = LoggingConfig._upgrade_pod(pod)
|
||||
pod_version = pod.pop('_pod_version')
|
||||
instance = LoggingConfig(pod)
|
||||
instance._pod_version = pod_version # pylint: disable=protected-access
|
||||
return instance
|
||||
|
||||
def __init__(self, config=None):
|
||||
super(LoggingConfig, self).__init__()
|
||||
dict.__init__(self)
|
||||
if isinstance(config, dict):
|
||||
config = {identifier(k.lower()): v for k, v in config.items()}
|
||||
@ -142,7 +157,14 @@ class LoggingConfig(dict):
|
||||
raise ValueError(config)
|
||||
|
||||
def to_pod(self):
|
||||
return self
|
||||
pod = super(LoggingConfig, self).to_pod()
|
||||
pod.update(self)
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
def _pod_upgrade_v1(pod):
|
||||
pod['_pod_version'] = pod.get('_pod_version', 1)
|
||||
return pod
|
||||
|
||||
|
||||
def expanded_path(path):
|
||||
@ -178,7 +200,8 @@ class ConfigurationPoint(object):
|
||||
constraint=None,
|
||||
merge=False,
|
||||
aliases=None,
|
||||
global_alias=None):
|
||||
global_alias=None,
|
||||
deprecated=False):
|
||||
"""
|
||||
Create a new Parameter object.
|
||||
|
||||
@ -229,10 +252,12 @@ class ConfigurationPoint(object):
|
||||
:param global_alias: An alias for this parameter that can be specified at
|
||||
the global level. A global_alias can map onto many
|
||||
ConfigurationPoints.
|
||||
:param deprecated: Specify that this parameter is deprecated and its
|
||||
config should be ignored. If supplied WA will display
|
||||
a warning to the user however will continue execution.
|
||||
"""
|
||||
self.name = identifier(name)
|
||||
if kind in KIND_MAP:
|
||||
kind = KIND_MAP[kind]
|
||||
kind = KIND_MAP.get(kind, kind)
|
||||
if kind is not None and not callable(kind):
|
||||
raise ValueError('Kind must be callable.')
|
||||
self.kind = kind
|
||||
@ -252,6 +277,7 @@ class ConfigurationPoint(object):
|
||||
self.merge = merge
|
||||
self.aliases = aliases or []
|
||||
self.global_alias = global_alias
|
||||
self.deprecated = deprecated
|
||||
|
||||
if self.default is not None:
|
||||
try:
|
||||
@ -267,6 +293,11 @@ class ConfigurationPoint(object):
|
||||
return False
|
||||
|
||||
def set_value(self, obj, value=None, check_mandatory=True):
|
||||
if self.deprecated:
|
||||
if value is not None:
|
||||
msg = 'Depreciated parameter supplied for "{}" in "{}". The value will be ignored.'
|
||||
logger.warning(msg.format(self.name, obj.name))
|
||||
return
|
||||
if value is None:
|
||||
if self.default is not None:
|
||||
value = self.kind(self.default)
|
||||
@ -288,6 +319,8 @@ class ConfigurationPoint(object):
|
||||
setattr(obj, self.name, value)
|
||||
|
||||
def validate(self, obj, check_mandatory=True):
|
||||
if self.deprecated:
|
||||
return
|
||||
value = getattr(obj, self.name, None)
|
||||
if value is not None:
|
||||
self.validate_value(obj.name, value)
|
||||
@ -347,8 +380,9 @@ def _to_pod(cfg_point, value):
|
||||
raise ValueError(msg.format(cfg_point.name, value))
|
||||
|
||||
|
||||
class Configuration(object):
|
||||
class Configuration(Podable):
|
||||
|
||||
_pod_serialization_version = 1
|
||||
config_points = []
|
||||
name = ''
|
||||
|
||||
@ -357,7 +391,7 @@ class Configuration(object):
|
||||
|
||||
@classmethod
|
||||
def from_pod(cls, pod):
|
||||
instance = cls()
|
||||
instance = super(Configuration, cls).from_pod(pod)
|
||||
for cfg_point in cls.config_points:
|
||||
if cfg_point.name in pod:
|
||||
value = pod.pop(cfg_point.name)
|
||||
@ -370,6 +404,7 @@ class Configuration(object):
|
||||
return instance
|
||||
|
||||
def __init__(self):
|
||||
super(Configuration, self).__init__()
|
||||
for confpoint in self.config_points:
|
||||
confpoint.set_value(self, check_mandatory=False)
|
||||
|
||||
@ -393,12 +428,17 @@ class Configuration(object):
|
||||
cfg_point.validate(self)
|
||||
|
||||
def to_pod(self):
|
||||
pod = {}
|
||||
pod = super(Configuration, self).to_pod()
|
||||
for cfg_point in self.config_points:
|
||||
value = getattr(self, cfg_point.name, None)
|
||||
pod[cfg_point.name] = _to_pod(cfg_point, value)
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
def _pod_upgrade_v1(pod):
|
||||
pod['_pod_version'] = pod.get('_pod_version', 1)
|
||||
return pod
|
||||
|
||||
|
||||
# This configuration for the core WA framework
|
||||
class MetaConfiguration(Configuration):
|
||||
@ -429,6 +469,7 @@ class MetaConfiguration(Configuration):
|
||||
description="""
|
||||
The local mount point for the filer hosting WA assets.
|
||||
""",
|
||||
default=''
|
||||
),
|
||||
ConfigurationPoint(
|
||||
'logging',
|
||||
@ -445,7 +486,6 @@ class MetaConfiguration(Configuration):
|
||||
contain bash color escape codes. Set this to ``False`` if
|
||||
console output will be piped somewhere that does not know
|
||||
how to handle those.
|
||||
|
||||
""",
|
||||
),
|
||||
ConfigurationPoint(
|
||||
@ -482,6 +522,10 @@ class MetaConfiguration(Configuration):
|
||||
def plugins_directory(self):
|
||||
return os.path.join(self.user_directory, 'plugins')
|
||||
|
||||
@property
|
||||
def cache_directory(self):
|
||||
return os.path.join(self.user_directory, 'cache')
|
||||
|
||||
@property
|
||||
def plugin_paths(self):
|
||||
return [self.plugins_directory] + (self.extra_plugin_paths or [])
|
||||
@ -494,6 +538,14 @@ class MetaConfiguration(Configuration):
|
||||
def additional_packages_file(self):
|
||||
return os.path.join(self.user_directory, 'packages')
|
||||
|
||||
@property
|
||||
def target_info_cache_file(self):
|
||||
return os.path.join(self.cache_directory, 'targets.json')
|
||||
|
||||
@property
|
||||
def apk_info_cache_file(self):
|
||||
return os.path.join(self.cache_directory, 'apk_info.json')
|
||||
|
||||
def __init__(self, environ=None):
|
||||
super(MetaConfiguration, self).__init__()
|
||||
if environ is None:
|
||||
@ -615,15 +667,18 @@ class RunConfiguration(Configuration):
|
||||
``"each_spec"``
|
||||
The device will be rebooted before running a new workload spec.
|
||||
|
||||
.. note:: this acts the same as each_job when execution order
|
||||
.. note:: This acts the same as ``each_job`` when execution order
|
||||
is set to by_iteration
|
||||
|
||||
``"run_completion"``
|
||||
The device will be rebooted after the run has been completed.
|
||||
'''),
|
||||
ConfigurationPoint(
|
||||
'device',
|
||||
kind=str,
|
||||
default='generic_android',
|
||||
description='''
|
||||
This setting defines what specific Device subclass will be used to
|
||||
This setting defines what specific ``Device`` subclass will be used to
|
||||
interact the connected device. Obviously, this must match your
|
||||
setup.
|
||||
''',
|
||||
@ -677,6 +732,17 @@ class RunConfiguration(Configuration):
|
||||
failed, but continue attempting to run others.
|
||||
'''
|
||||
),
|
||||
ConfigurationPoint(
|
||||
'bail_on_job_failure',
|
||||
kind=bool,
|
||||
default=False,
|
||||
description='''
|
||||
When a job fails during its run phase, WA will attempt to retry the
|
||||
job, then continue with remaining jobs after. Setting this to
|
||||
``True`` means WA will skip remaining jobs and end the run if a job
|
||||
has retried the maximum number of times, and still fails.
|
||||
'''
|
||||
),
|
||||
ConfigurationPoint(
|
||||
'allow_phone_home',
|
||||
kind=bool, default=True,
|
||||
@ -700,8 +766,12 @@ class RunConfiguration(Configuration):
|
||||
meta_pod[cfg_point.name] = pod.pop(cfg_point.name, None)
|
||||
|
||||
device_config = pod.pop('device_config', None)
|
||||
augmentations = pod.pop('augmentations', {})
|
||||
getters = pod.pop('resource_getters', {})
|
||||
instance = super(RunConfiguration, cls).from_pod(pod)
|
||||
instance.device_config = device_config
|
||||
instance.augmentations = augmentations
|
||||
instance.resource_getters = getters
|
||||
for cfg_point in cls.meta_data:
|
||||
cfg_point.set_value(instance, meta_pod[cfg_point.name])
|
||||
|
||||
@ -712,6 +782,8 @@ class RunConfiguration(Configuration):
|
||||
for confpoint in self.meta_data:
|
||||
confpoint.set_value(self, check_mandatory=False)
|
||||
self.device_config = None
|
||||
self.augmentations = {}
|
||||
self.resource_getters = {}
|
||||
|
||||
def merge_device_config(self, plugin_cache):
|
||||
"""
|
||||
@ -725,9 +797,21 @@ class RunConfiguration(Configuration):
|
||||
self.device_config = plugin_cache.get_plugin_config(self.device,
|
||||
generic_name="device_config")
|
||||
|
||||
def add_augmentation(self, aug):
|
||||
if aug.name in self.augmentations:
|
||||
raise ValueError('Augmentation "{}" already added.'.format(aug.name))
|
||||
self.augmentations[aug.name] = aug.get_config()
|
||||
|
||||
def add_resource_getter(self, getter):
|
||||
if getter.name in self.resource_getters:
|
||||
raise ValueError('Resource getter "{}" already added.'.format(getter.name))
|
||||
self.resource_getters[getter.name] = getter.get_config()
|
||||
|
||||
def to_pod(self):
|
||||
pod = super(RunConfiguration, self).to_pod()
|
||||
pod['device_config'] = dict(self.device_config or {})
|
||||
pod['augmentations'] = self.augmentations
|
||||
pod['resource_getters'] = self.resource_getters
|
||||
return pod
|
||||
|
||||
|
||||
@ -746,12 +830,12 @@ class JobSpec(Configuration):
|
||||
description='''
|
||||
The name of the workload to run.
|
||||
'''),
|
||||
ConfigurationPoint('workload_parameters', kind=obj_dict,
|
||||
ConfigurationPoint('workload_parameters', kind=obj_dict, merge=True,
|
||||
aliases=["params", "workload_params", "parameters"],
|
||||
description='''
|
||||
Parameter to be passed to the workload
|
||||
'''),
|
||||
ConfigurationPoint('runtime_parameters', kind=obj_dict,
|
||||
ConfigurationPoint('runtime_parameters', kind=obj_dict, merge=True,
|
||||
aliases=["runtime_params"],
|
||||
description='''
|
||||
Runtime parameters to be set prior to running
|
||||
@ -952,8 +1036,8 @@ class JobGenerator(object):
|
||||
if name == "augmentations":
|
||||
self.update_augmentations(value)
|
||||
|
||||
def add_section(self, section, workloads):
|
||||
new_node = self.root_node.add_section(section)
|
||||
def add_section(self, section, workloads, group):
|
||||
new_node = self.root_node.add_section(section, group)
|
||||
with log.indentcontext():
|
||||
for workload in workloads:
|
||||
new_node.add_workload(workload)
|
||||
@ -1015,6 +1099,12 @@ def create_job_spec(workload_entry, sections, target_manager, plugin_cache,
|
||||
# PHASE 2.1: Merge general job spec configuration
|
||||
for section in sections:
|
||||
job_spec.update_config(section, check_mandatory=False)
|
||||
|
||||
# Add classifiers for any present groups
|
||||
if section.id == 'global' or section.group is None:
|
||||
# Ignore global config and default group
|
||||
continue
|
||||
job_spec.classifiers[section.group] = section.id
|
||||
job_spec.update_config(workload_entry, check_mandatory=False)
|
||||
|
||||
# PHASE 2.2: Merge global, section and workload entry "workload_parameters"
|
||||
|
@ -18,31 +18,44 @@ from itertools import groupby, chain
|
||||
|
||||
from future.moves.itertools import zip_longest
|
||||
|
||||
from devlib.utils.types import identifier
|
||||
|
||||
from wa.framework.configuration.core import (MetaConfiguration, RunConfiguration,
|
||||
JobGenerator, settings)
|
||||
from wa.framework.configuration.parsers import ConfigParser
|
||||
from wa.framework.configuration.plugin_cache import PluginCache
|
||||
from wa.framework.exception import NotFoundError
|
||||
from wa.framework.exception import NotFoundError, ConfigError
|
||||
from wa.framework.job import Job
|
||||
from wa.utils import log
|
||||
from wa.utils.serializer import Podable
|
||||
|
||||
|
||||
class CombinedConfig(object):
|
||||
class CombinedConfig(Podable):
|
||||
|
||||
_pod_serialization_version = 1
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
instance = CombinedConfig()
|
||||
instance = super(CombinedConfig, CombinedConfig).from_pod(pod)
|
||||
instance.settings = MetaConfiguration.from_pod(pod.get('settings', {}))
|
||||
instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {}))
|
||||
return instance
|
||||
|
||||
def __init__(self, settings=None, run_config=None): # pylint: disable=redefined-outer-name
|
||||
super(CombinedConfig, self).__init__()
|
||||
self.settings = settings
|
||||
self.run_config = run_config
|
||||
|
||||
def to_pod(self):
|
||||
return {'settings': self.settings.to_pod(),
|
||||
'run_config': self.run_config.to_pod()}
|
||||
pod = super(CombinedConfig, self).to_pod()
|
||||
pod['settings'] = self.settings.to_pod()
|
||||
pod['run_config'] = self.run_config.to_pod()
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
def _pod_upgrade_v1(pod):
|
||||
pod['_pod_version'] = pod.get('_pod_version', 1)
|
||||
return pod
|
||||
|
||||
|
||||
class ConfigManager(object):
|
||||
@ -90,15 +103,16 @@ class ConfigManager(object):
|
||||
self.agenda = None
|
||||
|
||||
def load_config_file(self, filepath):
|
||||
self._config_parser.load_from_path(self, filepath)
|
||||
includes = self._config_parser.load_from_path(self, filepath)
|
||||
self.loaded_config_sources.append(filepath)
|
||||
self.loaded_config_sources.extend(includes)
|
||||
|
||||
def load_config(self, values, source):
|
||||
self._config_parser.load(self, values, source)
|
||||
self.loaded_config_sources.append(source)
|
||||
|
||||
def get_plugin(self, name=None, kind=None, *args, **kwargs):
|
||||
return self.plugin_cache.get_plugin(name, kind, *args, **kwargs)
|
||||
return self.plugin_cache.get_plugin(identifier(name), kind, *args, **kwargs)
|
||||
|
||||
def get_instruments(self, target):
|
||||
instruments = []
|
||||
@ -122,15 +136,21 @@ class ConfigManager(object):
|
||||
processors.append(proc)
|
||||
return processors
|
||||
|
||||
def get_config(self):
|
||||
return CombinedConfig(self.settings, self.run_config)
|
||||
|
||||
def finalize(self):
|
||||
if not self.agenda:
|
||||
msg = 'Attempting to finalize config before agenda has been set'
|
||||
raise RuntimeError(msg)
|
||||
self.run_config.merge_device_config(self.plugin_cache)
|
||||
return CombinedConfig(self.settings, self.run_config)
|
||||
return self.get_config()
|
||||
|
||||
def generate_jobs(self, context):
|
||||
job_specs = self.jobs_config.generate_job_specs(context.tm)
|
||||
if not job_specs:
|
||||
msg = 'No jobs available for running.'
|
||||
raise ConfigError(msg)
|
||||
exec_order = self.run_config.execution_order
|
||||
log.indent()
|
||||
for spec, i in permute_iterations(job_specs, exec_order):
|
||||
|
@ -18,11 +18,14 @@ import os
|
||||
import logging
|
||||
from functools import reduce # pylint: disable=redefined-builtin
|
||||
|
||||
from devlib.utils.types import identifier
|
||||
|
||||
from wa.framework.configuration.core import JobSpec
|
||||
from wa.framework.exception import ConfigError
|
||||
from wa.utils import log
|
||||
from wa.utils.serializer import json, read_pod, SerializerSyntaxError
|
||||
from wa.utils.types import toggle_set, counter
|
||||
from wa.utils.misc import merge_config_values, isiterable
|
||||
|
||||
|
||||
logger = logging.getLogger('config')
|
||||
@ -31,7 +34,9 @@ logger = logging.getLogger('config')
|
||||
class ConfigParser(object):
|
||||
|
||||
def load_from_path(self, state, filepath):
|
||||
self.load(state, _load_file(filepath, "Config"), filepath)
|
||||
raw, includes = _load_file(filepath, "Config")
|
||||
self.load(state, raw, filepath)
|
||||
return includes
|
||||
|
||||
def load(self, state, raw, source, wrap_exceptions=True): # pylint: disable=too-many-branches
|
||||
logger.debug('Parsing config from "{}"'.format(source))
|
||||
@ -72,8 +77,8 @@ class ConfigParser(object):
|
||||
for name, values in raw.items():
|
||||
# Assume that all leftover config is for a plug-in or a global
|
||||
# alias it is up to PluginCache to assert this assumption
|
||||
logger.debug('Caching "{}" with "{}"'.format(name, values))
|
||||
state.plugin_cache.add_configs(name, values, source)
|
||||
logger.debug('Caching "{}" with "{}"'.format(identifier(name), values))
|
||||
state.plugin_cache.add_configs(identifier(name), values, source)
|
||||
|
||||
except ConfigError as e:
|
||||
if wrap_exceptions:
|
||||
@ -87,8 +92,9 @@ class ConfigParser(object):
|
||||
class AgendaParser(object):
|
||||
|
||||
def load_from_path(self, state, filepath):
|
||||
raw = _load_file(filepath, 'Agenda')
|
||||
raw, includes = _load_file(filepath, 'Agenda')
|
||||
self.load(state, raw, filepath)
|
||||
return includes
|
||||
|
||||
def load(self, state, raw, source):
|
||||
logger.debug('Parsing agenda from "{}"'.format(source))
|
||||
@ -190,9 +196,10 @@ class AgendaParser(object):
|
||||
raise ConfigError(msg.format(json.dumps(section, indent=None)))
|
||||
section['runtime_params'] = section.pop('params')
|
||||
|
||||
group = section.pop('group', None)
|
||||
section = _construct_valid_entry(section, seen_sect_ids,
|
||||
"s", state.jobs_config)
|
||||
state.jobs_config.add_section(section, workloads)
|
||||
state.jobs_config.add_section(section, workloads, group)
|
||||
|
||||
|
||||
########################
|
||||
@ -222,12 +229,72 @@ def _load_file(filepath, error_name):
|
||||
raise ValueError("{} does not exist".format(filepath))
|
||||
try:
|
||||
raw = read_pod(filepath)
|
||||
includes = _process_includes(raw, filepath, error_name)
|
||||
except SerializerSyntaxError as e:
|
||||
raise ConfigError('Error parsing {} {}: {}'.format(error_name, filepath, e))
|
||||
if not isinstance(raw, dict):
|
||||
message = '{} does not contain a valid {} structure; top level must be a dict.'
|
||||
raise ConfigError(message.format(filepath, error_name))
|
||||
return raw
|
||||
return raw, includes
|
||||
|
||||
|
||||
def _config_values_from_includes(filepath, include_path, error_name):
|
||||
source_dir = os.path.dirname(filepath)
|
||||
included_files = []
|
||||
|
||||
if isinstance(include_path, str):
|
||||
include_path = os.path.expanduser(os.path.join(source_dir, include_path))
|
||||
|
||||
replace_value, includes = _load_file(include_path, error_name)
|
||||
|
||||
included_files.append(include_path)
|
||||
included_files.extend(includes)
|
||||
elif isinstance(include_path, list):
|
||||
replace_value = {}
|
||||
|
||||
for path in include_path:
|
||||
include_path = os.path.expanduser(os.path.join(source_dir, path))
|
||||
|
||||
sub_replace_value, includes = _load_file(include_path, error_name)
|
||||
for key, val in sub_replace_value.items():
|
||||
replace_value[key] = merge_config_values(val, replace_value.get(key, None))
|
||||
|
||||
included_files.append(include_path)
|
||||
included_files.extend(includes)
|
||||
else:
|
||||
message = "{} does not contain a valid {} structure; value for 'include#' must be a string or a list"
|
||||
raise ConfigError(message.format(filepath, error_name))
|
||||
|
||||
return replace_value, included_files
|
||||
|
||||
|
||||
def _process_includes(raw, filepath, error_name):
|
||||
if not raw:
|
||||
return []
|
||||
|
||||
included_files = []
|
||||
replace_value = None
|
||||
|
||||
if hasattr(raw, 'items'):
|
||||
for key, value in raw.items():
|
||||
if key == 'include#':
|
||||
replace_value, includes = _config_values_from_includes(filepath, value, error_name)
|
||||
included_files.extend(includes)
|
||||
elif hasattr(value, 'items') or isiterable(value):
|
||||
includes = _process_includes(value, filepath, error_name)
|
||||
included_files.extend(includes)
|
||||
elif isiterable(raw):
|
||||
for element in raw:
|
||||
if hasattr(element, 'items') or isiterable(element):
|
||||
includes = _process_includes(element, filepath, error_name)
|
||||
included_files.extend(includes)
|
||||
|
||||
if replace_value is not None:
|
||||
del raw['include#']
|
||||
for key, value in replace_value.items():
|
||||
raw[key] = merge_config_values(value, raw.get(key, None))
|
||||
|
||||
return included_files
|
||||
|
||||
|
||||
def merge_augmentations(raw):
|
||||
@ -257,7 +324,7 @@ def merge_augmentations(raw):
|
||||
raise ConfigError(msg.format(value, n, exc))
|
||||
|
||||
# Make sure none of the specified aliases conflict with each other
|
||||
to_check = [e for e in entries]
|
||||
to_check = list(entries)
|
||||
while len(to_check) > 1:
|
||||
check_entry = to_check.pop()
|
||||
for e in to_check:
|
||||
|
@ -84,9 +84,9 @@ class PluginCache(object):
|
||||
'defined in a config file, move the entry content into the top level'
|
||||
raise ConfigError(msg.format((plugin_name)))
|
||||
|
||||
if (not self.loader.has_plugin(plugin_name) and
|
||||
plugin_name not in self.targets and
|
||||
plugin_name not in GENERIC_CONFIGS):
|
||||
if (not self.loader.has_plugin(plugin_name)
|
||||
and plugin_name not in self.targets
|
||||
and plugin_name not in GENERIC_CONFIGS):
|
||||
msg = 'configuration provided for unknown plugin "{}"'
|
||||
raise ConfigError(msg.format(plugin_name))
|
||||
|
||||
@ -95,8 +95,8 @@ class PluginCache(object):
|
||||
raise ConfigError(msg.format(plugin_name, repr(values), type(values)))
|
||||
|
||||
for name, value in values.items():
|
||||
if (plugin_name not in GENERIC_CONFIGS and
|
||||
name not in self.get_plugin_parameters(plugin_name)):
|
||||
if (plugin_name not in GENERIC_CONFIGS
|
||||
and name not in self.get_plugin_parameters(plugin_name)):
|
||||
msg = "'{}' is not a valid parameter for '{}'"
|
||||
raise ConfigError(msg.format(name, plugin_name))
|
||||
|
||||
|
@ -33,6 +33,7 @@ class JobSpecSource(object):
|
||||
def id(self):
|
||||
return self.config['id']
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@ -69,14 +70,20 @@ class SectionNode(JobSpecSource):
|
||||
def is_leaf(self):
|
||||
return not bool(self.children)
|
||||
|
||||
def __init__(self, config, parent=None):
|
||||
def __init__(self, config, parent=None, group=None):
|
||||
super(SectionNode, self).__init__(config, parent=parent)
|
||||
self.workload_entries = []
|
||||
self.children = []
|
||||
self.group = group
|
||||
|
||||
def add_section(self, section):
|
||||
new_node = SectionNode(section, parent=self)
|
||||
self.children.append(new_node)
|
||||
def add_section(self, section, group=None):
|
||||
# Each level is the same group, only need to check first
|
||||
if not self.children or group == self.children[0].group:
|
||||
new_node = SectionNode(section, parent=self, group=group)
|
||||
self.children.append(new_node)
|
||||
else:
|
||||
for child in self.children:
|
||||
new_node = child.add_section(section, group)
|
||||
return new_node
|
||||
|
||||
def add_workload(self, workload_config):
|
||||
|
@ -16,19 +16,25 @@
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
|
||||
import devlib
|
||||
try:
|
||||
from devlib.utils.version import version as installed_devlib_version
|
||||
except ImportError:
|
||||
installed_devlib_version = None
|
||||
|
||||
from wa.framework import pluginloader
|
||||
from wa.framework.command import init_argument_parser
|
||||
from wa.framework.configuration import settings
|
||||
from wa.framework.configuration.execution import ConfigManager
|
||||
from wa.framework.host import init_user_directory, init_config
|
||||
from wa.framework.exception import ConfigError
|
||||
from wa.framework.version import get_wa_version_with_commit
|
||||
from wa.framework.exception import ConfigError, HostError
|
||||
from wa.framework.version import (get_wa_version_with_commit, format_version,
|
||||
required_devlib_version)
|
||||
from wa.utils import log
|
||||
from wa.utils.doc import format_body
|
||||
|
||||
@ -64,6 +70,27 @@ def split_joined_options(argv):
|
||||
return output
|
||||
|
||||
|
||||
# Instead of presenting an obscure error due to a version mismatch explicitly warn the user.
|
||||
def check_devlib_version():
|
||||
if not installed_devlib_version or installed_devlib_version[:-1] <= required_devlib_version[:-1]:
|
||||
# Check the 'dev' field separately to account for comparing with release versions.
|
||||
if installed_devlib_version.dev and installed_devlib_version.dev < required_devlib_version.dev:
|
||||
msg = 'WA requires Devlib version >={}. Please update the currently installed version {}'
|
||||
raise HostError(msg.format(format_version(required_devlib_version), devlib.__version__))
|
||||
|
||||
|
||||
# If the default encoding is not UTF-8 warn the user as this may cause compatibility issues
|
||||
# when parsing files.
|
||||
def check_system_encoding():
|
||||
system_encoding = locale.getpreferredencoding()
|
||||
msg = 'System Encoding: {}'.format(system_encoding)
|
||||
if 'UTF-8' not in system_encoding:
|
||||
logger.warning(msg)
|
||||
logger.warning('To prevent encoding issues please use a locale setting which supports UTF-8')
|
||||
else:
|
||||
logger.debug(msg)
|
||||
|
||||
|
||||
def main():
|
||||
if not os.path.exists(settings.user_directory):
|
||||
init_user_directory()
|
||||
@ -102,6 +129,8 @@ def main():
|
||||
logger.debug('Version: {}'.format(get_wa_version_with_commit()))
|
||||
logger.debug('devlib version: {}'.format(devlib.__full_version__))
|
||||
logger.debug('Command Line: {}'.format(' '.join(sys.argv)))
|
||||
check_devlib_version()
|
||||
check_system_encoding()
|
||||
|
||||
# each command will add its own subparser
|
||||
subparsers = parser.add_subparsers(dest='command')
|
||||
|
@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
# pylint: disable=unused-import
|
||||
from devlib.exception import (DevlibError, HostError, TimeoutError,
|
||||
from devlib.exception import (DevlibError, HostError, TimeoutError, # pylint: disable=redefined-builtin
|
||||
TargetError, TargetNotRespondingError)
|
||||
|
||||
from wa.utils.misc import get_traceback
|
||||
@ -30,60 +30,49 @@ class WAError(Exception):
|
||||
|
||||
class NotFoundError(WAError):
|
||||
"""Raised when the specified item is not found."""
|
||||
pass
|
||||
|
||||
|
||||
class ValidationError(WAError):
|
||||
"""Raised on failure to validate an extension."""
|
||||
pass
|
||||
|
||||
|
||||
class ExecutionError(WAError):
|
||||
"""Error encountered by the execution framework."""
|
||||
pass
|
||||
|
||||
|
||||
class WorkloadError(WAError):
|
||||
"""General Workload error."""
|
||||
pass
|
||||
|
||||
|
||||
class JobError(WAError):
|
||||
"""Job execution error."""
|
||||
pass
|
||||
|
||||
|
||||
class InstrumentError(WAError):
|
||||
"""General Instrument error."""
|
||||
pass
|
||||
|
||||
|
||||
class OutputProcessorError(WAError):
|
||||
"""General OutputProcessor error."""
|
||||
pass
|
||||
|
||||
|
||||
class ResourceError(WAError):
|
||||
"""General Resolver error."""
|
||||
pass
|
||||
|
||||
|
||||
class CommandError(WAError):
|
||||
"""Raised by commands when they have encountered an error condition
|
||||
during execution."""
|
||||
pass
|
||||
|
||||
|
||||
class ToolError(WAError):
|
||||
"""Raised by tools when they have encountered an error condition
|
||||
during execution."""
|
||||
pass
|
||||
|
||||
|
||||
class ConfigError(WAError):
|
||||
"""Raised when configuration provided is invalid. This error suggests that
|
||||
the user should modify their config and try again."""
|
||||
pass
|
||||
|
||||
|
||||
class SerializerSyntaxError(Exception):
|
||||
|
@ -23,10 +23,10 @@ from copy import copy
|
||||
from datetime import datetime
|
||||
|
||||
import wa.framework.signal as signal
|
||||
from wa.framework import instrument
|
||||
from wa.framework import instrument as instrumentation
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.framework.exception import TargetError, HostError, WorkloadError
|
||||
from wa.framework.exception import TargetNotRespondingError, TimeoutError
|
||||
from wa.framework.exception import TargetError, HostError, WorkloadError, ExecutionError
|
||||
from wa.framework.exception import TargetNotRespondingError, TimeoutError # pylint: disable=redefined-builtin
|
||||
from wa.framework.job import Job
|
||||
from wa.framework.output import init_job_output
|
||||
from wa.framework.output_processor import ProcessorManager
|
||||
@ -100,15 +100,13 @@ class ExecutionContext(object):
|
||||
self.tm = tm
|
||||
self.run_output = output
|
||||
self.run_state = output.state
|
||||
self.logger.debug('Loading resource discoverers')
|
||||
self.resolver = ResourceResolver(cm.plugin_cache)
|
||||
self.resolver.load()
|
||||
self.job_queue = None
|
||||
self.completed_jobs = None
|
||||
self.current_job = None
|
||||
self.successful_jobs = 0
|
||||
self.failed_jobs = 0
|
||||
self.run_interrupted = False
|
||||
self._load_resource_getters()
|
||||
|
||||
def start_run(self):
|
||||
self.output.info.start_time = datetime.utcnow()
|
||||
@ -130,8 +128,8 @@ class ExecutionContext(object):
|
||||
self.run_state.status = status
|
||||
self.run_output.status = status
|
||||
self.run_output.info.end_time = datetime.utcnow()
|
||||
self.run_output.info.duration = (self.run_output.info.end_time -
|
||||
self.run_output.info.start_time)
|
||||
self.run_output.info.duration = (self.run_output.info.end_time
|
||||
- self.run_output.info.start_time)
|
||||
self.write_output()
|
||||
|
||||
def finalize(self):
|
||||
@ -143,21 +141,24 @@ class ExecutionContext(object):
|
||||
self.current_job = self.job_queue.pop(0)
|
||||
job_output = init_job_output(self.run_output, self.current_job)
|
||||
self.current_job.set_output(job_output)
|
||||
self.update_job_state(self.current_job)
|
||||
return self.current_job
|
||||
|
||||
def end_job(self):
|
||||
if not self.current_job:
|
||||
raise RuntimeError('No jobs in progress')
|
||||
self.completed_jobs.append(self.current_job)
|
||||
self.update_job_state(self.current_job)
|
||||
self.output.write_result()
|
||||
self.current_job = None
|
||||
|
||||
def set_status(self, status, force=False):
|
||||
def set_status(self, status, force=False, write=True):
|
||||
if not self.current_job:
|
||||
raise RuntimeError('No jobs in progress')
|
||||
self.current_job.set_status(status, force)
|
||||
self.set_job_status(self.current_job, status, force, write)
|
||||
|
||||
def set_job_status(self, job, status, force=False, write=True):
|
||||
job.set_status(status, force)
|
||||
if write:
|
||||
self.run_output.write_state()
|
||||
|
||||
def extract_results(self):
|
||||
self.tm.extract_results(self)
|
||||
@ -165,13 +166,8 @@ class ExecutionContext(object):
|
||||
def move_failed(self, job):
|
||||
self.run_output.move_failed(job.output)
|
||||
|
||||
def update_job_state(self, job):
|
||||
self.run_state.update_job(job)
|
||||
self.run_output.write_state()
|
||||
|
||||
def skip_job(self, job):
|
||||
job.status = Status.SKIPPED
|
||||
self.run_state.update_job(job)
|
||||
self.set_job_status(job, Status.SKIPPED, force=True)
|
||||
self.completed_jobs.append(job)
|
||||
|
||||
def skip_remaining_jobs(self):
|
||||
@ -180,6 +176,9 @@ class ExecutionContext(object):
|
||||
self.skip_job(job)
|
||||
self.write_state()
|
||||
|
||||
def write_config(self):
|
||||
self.run_output.write_config(self.cm.get_config())
|
||||
|
||||
def write_state(self):
|
||||
self.run_output.write_state()
|
||||
|
||||
@ -191,6 +190,9 @@ class ExecutionContext(object):
|
||||
def write_job_specs(self):
|
||||
self.run_output.write_job_specs(self.cm.job_specs)
|
||||
|
||||
def add_augmentation(self, aug):
|
||||
self.cm.run_config.add_augmentation(aug)
|
||||
|
||||
def get_resource(self, resource, strict=True):
|
||||
result = self.resolver.get(resource, strict)
|
||||
if result is None:
|
||||
@ -245,6 +247,11 @@ class ExecutionContext(object):
|
||||
def add_event(self, message):
|
||||
self.output.add_event(message)
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
self.output.add_classifier(name, value, overwrite)
|
||||
if self.current_job:
|
||||
self.current_job.add_classifier(name, value, overwrite)
|
||||
|
||||
def add_metadata(self, key, *args, **kwargs):
|
||||
self.output.add_metadata(key, *args, **kwargs)
|
||||
|
||||
@ -284,7 +291,7 @@ class ExecutionContext(object):
|
||||
try:
|
||||
job.initialize(self)
|
||||
except WorkloadError as e:
|
||||
job.set_status(Status.FAILED)
|
||||
self.set_job_status(job, Status.FAILED, write=False)
|
||||
log.log_error(e, self.logger)
|
||||
failed_ids.append(job.id)
|
||||
|
||||
@ -294,6 +301,14 @@ class ExecutionContext(object):
|
||||
new_queue.append(job)
|
||||
|
||||
self.job_queue = new_queue
|
||||
self.write_state()
|
||||
|
||||
def _load_resource_getters(self):
|
||||
self.logger.debug('Loading resource discoverers')
|
||||
self.resolver = ResourceResolver(self.cm.plugin_cache)
|
||||
self.resolver.load()
|
||||
for getter in self.resolver.getters:
|
||||
self.cm.run_config.add_resource_getter(getter)
|
||||
|
||||
def _get_unique_filepath(self, filename):
|
||||
filepath = os.path.join(self.output_directory, filename)
|
||||
@ -322,7 +337,7 @@ class Executor(object):
|
||||
returning.
|
||||
|
||||
The initial context set up involves combining configuration from various
|
||||
sources, loading of requided workloads, loading and installation of
|
||||
sources, loading of required workloads, loading and installation of
|
||||
instruments and output processors, etc. Static validation of the combined
|
||||
configuration is also performed.
|
||||
|
||||
@ -338,7 +353,7 @@ class Executor(object):
|
||||
def execute(self, config_manager, output):
|
||||
"""
|
||||
Execute the run specified by an agenda. Optionally, selectors may be
|
||||
used to only selecute a subset of the specified agenda.
|
||||
used to only execute a subset of the specified agenda.
|
||||
|
||||
Params::
|
||||
|
||||
@ -365,12 +380,12 @@ class Executor(object):
|
||||
try:
|
||||
self.do_execute(context)
|
||||
except KeyboardInterrupt as e:
|
||||
context.run_output.status = 'ABORTED'
|
||||
context.run_output.status = Status.ABORTED
|
||||
log.log_error(e, self.logger)
|
||||
context.write_output()
|
||||
raise
|
||||
except Exception as e:
|
||||
context.run_output.status = 'FAILED'
|
||||
context.run_output.status = Status.FAILED
|
||||
log.log_error(e, self.logger)
|
||||
context.write_output()
|
||||
raise
|
||||
@ -388,7 +403,7 @@ class Executor(object):
|
||||
attempts = context.cm.run_config.max_retries
|
||||
while attempts:
|
||||
try:
|
||||
self.target_manager.reboot()
|
||||
self.target_manager.reboot(context)
|
||||
except TargetError as e:
|
||||
if attempts:
|
||||
attempts -= 1
|
||||
@ -405,9 +420,9 @@ class Executor(object):
|
||||
context.output.write_state()
|
||||
|
||||
self.logger.info('Installing instruments')
|
||||
for instrument_name in context.cm.get_instruments(self.target_manager.target):
|
||||
instrument.install(instrument_name, context)
|
||||
instrument.validate()
|
||||
for instrument in context.cm.get_instruments(self.target_manager.target):
|
||||
instrumentation.install(instrument, context)
|
||||
instrumentation.validate()
|
||||
|
||||
self.logger.info('Installing output processors')
|
||||
pm = ProcessorManager()
|
||||
@ -415,6 +430,8 @@ class Executor(object):
|
||||
pm.install(proc, context)
|
||||
pm.validate()
|
||||
|
||||
context.write_config()
|
||||
|
||||
self.logger.info('Starting run')
|
||||
runner = Runner(context, pm)
|
||||
signal.send(signal.RUN_STARTED, self, context)
|
||||
@ -432,16 +449,16 @@ class Executor(object):
|
||||
for status in reversed(Status.levels):
|
||||
if status in counter:
|
||||
parts.append('{} {}'.format(counter[status], status))
|
||||
self.logger.info(status_summary + ', '.join(parts))
|
||||
self.logger.info('{}{}'.format(status_summary, ', '.join(parts)))
|
||||
|
||||
self.logger.info('Results can be found in {}'.format(output.basepath))
|
||||
|
||||
if self.error_logged:
|
||||
self.logger.warn('There were errors during execution.')
|
||||
self.logger.warn('Please see {}'.format(output.logfile))
|
||||
self.logger.warning('There were errors during execution.')
|
||||
self.logger.warning('Please see {}'.format(output.logfile))
|
||||
elif self.warning_logged:
|
||||
self.logger.warn('There were warnings during execution.')
|
||||
self.logger.warn('Please see {}'.format(output.logfile))
|
||||
self.logger.warning('There were warnings during execution.')
|
||||
self.logger.warning('Please see {}'.format(output.logfile))
|
||||
|
||||
def _error_signalled_callback(self, _):
|
||||
self.error_logged = True
|
||||
@ -503,7 +520,7 @@ class Runner(object):
|
||||
signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED)
|
||||
signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED)
|
||||
self.context.start_run()
|
||||
self.pm.initialize()
|
||||
self.pm.initialize(self.context)
|
||||
with log.indentcontext():
|
||||
self.context.initialize_jobs()
|
||||
self.context.write_state()
|
||||
@ -519,7 +536,10 @@ class Runner(object):
|
||||
with signal.wrap('RUN_OUTPUT_PROCESSED', self):
|
||||
self.pm.process_run_output(self.context)
|
||||
self.pm.export_run_output(self.context)
|
||||
self.pm.finalize()
|
||||
self.pm.finalize(self.context)
|
||||
if self.context.reboot_policy.reboot_on_run_completion:
|
||||
self.logger.info('Rebooting target on run completion.')
|
||||
self.context.tm.reboot(self.context)
|
||||
signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED)
|
||||
signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED)
|
||||
|
||||
@ -539,15 +559,15 @@ class Runner(object):
|
||||
with signal.wrap('JOB', self, context):
|
||||
context.tm.start()
|
||||
self.do_run_job(job, context)
|
||||
job.set_status(Status.OK)
|
||||
context.set_job_status(job, Status.OK)
|
||||
except (Exception, KeyboardInterrupt) as e: # pylint: disable=broad-except
|
||||
log.log_error(e, self.logger)
|
||||
if isinstance(e, KeyboardInterrupt):
|
||||
context.run_interrupted = True
|
||||
job.set_status(Status.ABORTED)
|
||||
context.set_job_status(job, Status.ABORTED)
|
||||
raise e
|
||||
else:
|
||||
job.set_status(Status.FAILED)
|
||||
context.set_job_status(job, Status.FAILED)
|
||||
if isinstance(e, TargetNotRespondingError):
|
||||
raise e
|
||||
elif isinstance(e, TargetError):
|
||||
@ -570,7 +590,7 @@ class Runner(object):
|
||||
self.context.skip_job(job)
|
||||
return
|
||||
|
||||
job.set_status(Status.RUNNING)
|
||||
context.set_job_status(job, Status.RUNNING)
|
||||
self.send(signal.JOB_STARTED)
|
||||
|
||||
job.configure_augmentations(context, self.pm)
|
||||
@ -581,7 +601,7 @@ class Runner(object):
|
||||
try:
|
||||
job.setup(context)
|
||||
except Exception as e:
|
||||
job.set_status(Status.FAILED)
|
||||
context.set_job_status(job, Status.FAILED)
|
||||
log.log_error(e, self.logger)
|
||||
if isinstance(e, (TargetError, TimeoutError)):
|
||||
context.tm.verify_target_responsive(context)
|
||||
@ -594,10 +614,10 @@ class Runner(object):
|
||||
job.run(context)
|
||||
except KeyboardInterrupt:
|
||||
context.run_interrupted = True
|
||||
job.set_status(Status.ABORTED)
|
||||
context.set_job_status(job, Status.ABORTED)
|
||||
raise
|
||||
except Exception as e:
|
||||
job.set_status(Status.FAILED)
|
||||
context.set_job_status(job, Status.FAILED)
|
||||
log.log_error(e, self.logger)
|
||||
if isinstance(e, (TargetError, TimeoutError)):
|
||||
context.tm.verify_target_responsive(context)
|
||||
@ -610,7 +630,7 @@ class Runner(object):
|
||||
self.pm.process_job_output(context)
|
||||
self.pm.export_job_output(context)
|
||||
except Exception as e:
|
||||
job.set_status(Status.PARTIAL)
|
||||
context.set_job_status(job, Status.PARTIAL)
|
||||
if isinstance(e, (TargetError, TimeoutError)):
|
||||
context.tm.verify_target_responsive(context)
|
||||
self.context.record_ui_state('output-error')
|
||||
@ -618,7 +638,7 @@ class Runner(object):
|
||||
|
||||
except KeyboardInterrupt:
|
||||
context.run_interrupted = True
|
||||
job.set_status(Status.ABORTED)
|
||||
context.set_status(Status.ABORTED)
|
||||
raise
|
||||
finally:
|
||||
# If setup was successfully completed, teardown must
|
||||
@ -640,6 +660,9 @@ class Runner(object):
|
||||
self.logger.error(msg.format(job.id, job.iteration, job.status))
|
||||
self.context.failed_jobs += 1
|
||||
self.send(signal.JOB_FAILED)
|
||||
if rc.bail_on_job_failure:
|
||||
raise ExecutionError('Job {} failed, bailing.'.format(job.id))
|
||||
|
||||
else: # status not in retry_on_status
|
||||
self.logger.info('Job completed with status {}'.format(job.status))
|
||||
if job.status != 'ABORTED':
|
||||
@ -651,8 +674,9 @@ class Runner(object):
|
||||
def retry_job(self, job):
|
||||
retry_job = Job(job.spec, job.iteration, self.context)
|
||||
retry_job.workload = job.workload
|
||||
retry_job.state = job.state
|
||||
retry_job.retries = job.retries + 1
|
||||
retry_job.set_status(Status.PENDING)
|
||||
self.context.set_job_status(retry_job, Status.PENDING, force=True)
|
||||
self.context.job_queue.insert(0, retry_job)
|
||||
self.send(signal.JOB_RESTARTED)
|
||||
|
||||
|
@ -31,7 +31,7 @@ import requests
|
||||
from wa import Parameter, settings, __file__ as _base_filepath
|
||||
from wa.framework.resource import ResourceGetter, SourcePriority, NO_ONE
|
||||
from wa.framework.exception import ResourceError
|
||||
from wa.utils.misc import (ensure_directory_exists as _d,
|
||||
from wa.utils.misc import (ensure_directory_exists as _d, atomic_write_path,
|
||||
ensure_file_directory_exists as _f, sha256, urljoin)
|
||||
from wa.utils.types import boolean, caseless_string
|
||||
|
||||
@ -78,15 +78,20 @@ def get_path_matches(resource, files):
|
||||
return matches
|
||||
|
||||
|
||||
# pylint: disable=too-many-return-statements
|
||||
def get_from_location(basepath, resource):
|
||||
if resource.kind == 'file':
|
||||
path = os.path.join(basepath, resource.path)
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
elif resource.kind == 'executable':
|
||||
path = os.path.join(basepath, 'bin', resource.abi, resource.filename)
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
bin_dir = os.path.join(basepath, 'bin', resource.abi)
|
||||
if not os.path.exists(bin_dir):
|
||||
return None
|
||||
for entry in os.listdir(bin_dir):
|
||||
path = os.path.join(bin_dir, entry)
|
||||
if resource.match(path):
|
||||
return path
|
||||
elif resource.kind == 'revent':
|
||||
path = os.path.join(basepath, 'revent_files')
|
||||
if os.path.exists(path):
|
||||
@ -234,21 +239,19 @@ class Http(ResourceGetter):
|
||||
index_url = urljoin(self.url, 'index.json')
|
||||
response = self.geturl(index_url)
|
||||
if response.status_code != http.client.OK:
|
||||
message = 'Could not fetch "{}"; recieved "{} {}"'
|
||||
message = 'Could not fetch "{}"; received "{} {}"'
|
||||
self.logger.error(message.format(index_url,
|
||||
response.status_code,
|
||||
response.reason))
|
||||
return {}
|
||||
if sys.version_info[0] == 3:
|
||||
content = response.content.decode('utf-8')
|
||||
else:
|
||||
content = response.content
|
||||
content = response.content.decode('utf-8')
|
||||
return json.loads(content)
|
||||
|
||||
def download_asset(self, asset, owner_name):
|
||||
url = urljoin(self.url, owner_name, asset['path'])
|
||||
local_path = _f(os.path.join(settings.dependencies_directory, '__remote',
|
||||
owner_name, asset['path'].replace('/', os.sep)))
|
||||
|
||||
if os.path.exists(local_path) and not self.always_fetch:
|
||||
local_sha = sha256(local_path)
|
||||
if local_sha == asset['sha256']:
|
||||
@ -257,14 +260,15 @@ class Http(ResourceGetter):
|
||||
self.logger.debug('Downloading {}'.format(url))
|
||||
response = self.geturl(url, stream=True)
|
||||
if response.status_code != http.client.OK:
|
||||
message = 'Could not download asset "{}"; recieved "{} {}"'
|
||||
message = 'Could not download asset "{}"; received "{} {}"'
|
||||
self.logger.warning(message.format(url,
|
||||
response.status_code,
|
||||
response.reason))
|
||||
return
|
||||
with open(local_path, 'wb') as wfh:
|
||||
for chunk in response.iter_content(chunk_size=self.chunk_size):
|
||||
wfh.write(chunk)
|
||||
with atomic_write_path(local_path) as at_path:
|
||||
with open(at_path, 'wb') as wfh:
|
||||
for chunk in response.iter_content(chunk_size=self.chunk_size):
|
||||
wfh.write(chunk)
|
||||
return local_path
|
||||
|
||||
def geturl(self, url, stream=False):
|
||||
@ -322,7 +326,8 @@ class Filer(ResourceGetter):
|
||||
|
||||
"""
|
||||
parameters = [
|
||||
Parameter('remote_path', global_alias='remote_assets_path', default='',
|
||||
Parameter('remote_path', global_alias='remote_assets_path',
|
||||
default=settings.assets_repository,
|
||||
description="""
|
||||
Path, on the local system, where the assets are located.
|
||||
"""),
|
||||
|
@ -42,6 +42,7 @@ def init_user_directory(overwrite_existing=False): # pylint: disable=R0914
|
||||
os.makedirs(settings.user_directory)
|
||||
os.makedirs(settings.dependencies_directory)
|
||||
os.makedirs(settings.plugins_directory)
|
||||
os.makedirs(settings.cache_directory)
|
||||
|
||||
generate_default_config(os.path.join(settings.user_directory, 'config.yaml'))
|
||||
|
||||
@ -49,6 +50,7 @@ def init_user_directory(overwrite_existing=False): # pylint: disable=R0914
|
||||
# If running with sudo on POSIX, change the ownership to the real user.
|
||||
real_user = os.getenv('SUDO_USER')
|
||||
if real_user:
|
||||
# pylint: disable=import-outside-toplevel
|
||||
import pwd # done here as module won't import on win32
|
||||
user_entry = pwd.getpwnam(real_user)
|
||||
uid, gid = user_entry.pw_uid, user_entry.pw_gid
|
||||
|
@ -103,8 +103,8 @@ import inspect
|
||||
from collections import OrderedDict
|
||||
|
||||
from wa.framework import signal
|
||||
from wa.framework.plugin import Plugin
|
||||
from wa.framework.exception import (TargetNotRespondingError, TimeoutError,
|
||||
from wa.framework.plugin import TargetedPlugin
|
||||
from wa.framework.exception import (TargetNotRespondingError, TimeoutError, # pylint: disable=redefined-builtin
|
||||
WorkloadError, TargetError)
|
||||
from wa.utils.log import log_error
|
||||
from wa.utils.misc import isiterable
|
||||
@ -324,7 +324,7 @@ def install(instrument, context):
|
||||
if not callable(attr):
|
||||
msg = 'Attribute {} not callable in {}.'
|
||||
raise ValueError(msg.format(attr_name, instrument))
|
||||
argspec = inspect.getargspec(attr)
|
||||
argspec = inspect.getfullargspec(attr)
|
||||
arg_num = len(argspec.args)
|
||||
# Instrument callbacks will be passed exactly two arguments: self
|
||||
# (the instrument instance to which the callback is bound) and
|
||||
@ -345,6 +345,7 @@ def install(instrument, context):
|
||||
|
||||
instrument.logger.context = context
|
||||
installed.append(instrument)
|
||||
context.add_augmentation(instrument)
|
||||
|
||||
|
||||
def uninstall(instrument):
|
||||
@ -416,14 +417,13 @@ def get_disabled():
|
||||
return [i for i in installed if not i.is_enabled]
|
||||
|
||||
|
||||
class Instrument(Plugin):
|
||||
class Instrument(TargetedPlugin):
|
||||
"""
|
||||
Base class for instrument implementations.
|
||||
"""
|
||||
kind = "instrument"
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
super(Instrument, self).__init__(**kwargs)
|
||||
self.target = target
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Instrument, self).__init__(*args, **kwargs)
|
||||
self.is_enabled = True
|
||||
self.is_broken = False
|
||||
|
@ -23,6 +23,7 @@ from datetime import datetime
|
||||
from wa.framework import pluginloader, signal, instrument
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.utils.log import indentcontext
|
||||
from wa.framework.run import JobState
|
||||
|
||||
|
||||
class Job(object):
|
||||
@ -37,24 +38,29 @@ class Job(object):
|
||||
def label(self):
|
||||
return self.spec.label
|
||||
|
||||
@property
|
||||
def classifiers(self):
|
||||
return self.spec.classifiers
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
return self._status
|
||||
return self.state.status
|
||||
|
||||
@property
|
||||
def has_been_initialized(self):
|
||||
return self._has_been_initialized
|
||||
|
||||
@property
|
||||
def retries(self):
|
||||
return self.state.retries
|
||||
|
||||
@status.setter
|
||||
def status(self, value):
|
||||
self._status = value
|
||||
self.state.status = value
|
||||
self.state.timestamp = datetime.utcnow()
|
||||
if self.output:
|
||||
self.output.status = value
|
||||
|
||||
@retries.setter
|
||||
def retries(self, value):
|
||||
self.state.retries = value
|
||||
|
||||
def __init__(self, spec, iteration, context):
|
||||
self.logger = logging.getLogger('job')
|
||||
self.spec = spec
|
||||
@ -63,13 +69,13 @@ class Job(object):
|
||||
self.workload = None
|
||||
self.output = None
|
||||
self.run_time = None
|
||||
self.retries = 0
|
||||
self.classifiers = copy(self.spec.classifiers)
|
||||
self._has_been_initialized = False
|
||||
self._status = Status.NEW
|
||||
self.state = JobState(self.id, self.label, self.iteration, Status.NEW)
|
||||
|
||||
def load(self, target, loader=pluginloader):
|
||||
self.logger.info('Loading job {}'.format(self))
|
||||
if self.iteration == 1:
|
||||
if self.id not in self._workload_cache:
|
||||
self.workload = loader.get_workload(self.spec.workload_name,
|
||||
target,
|
||||
**self.spec.workload_parameters)
|
||||
@ -91,7 +97,6 @@ class Job(object):
|
||||
self.workload.initialize(context)
|
||||
self.set_status(Status.PENDING)
|
||||
self._has_been_initialized = True
|
||||
context.update_job_state(self)
|
||||
|
||||
def configure_augmentations(self, context, pm):
|
||||
self.logger.info('Configuring augmentations')
|
||||
@ -181,6 +186,11 @@ class Job(object):
|
||||
if force or self.status < status:
|
||||
self.status = status
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
if name in self.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier.'.format(name))
|
||||
self.classifiers[name] = value
|
||||
|
||||
def __str__(self):
|
||||
return '{} ({}) [{}]'.format(self.id, self.label, self.iteration)
|
||||
|
||||
|
@ -13,23 +13,36 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
try:
|
||||
import psycopg2
|
||||
from psycopg2 import Error as Psycopg2Error
|
||||
except ImportError:
|
||||
psycopg2 = None
|
||||
Psycopg2Error = None
|
||||
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
from collections import OrderedDict
|
||||
import tarfile
|
||||
import tempfile
|
||||
from collections import OrderedDict, defaultdict
|
||||
from copy import copy, deepcopy
|
||||
from datetime import datetime
|
||||
from io import StringIO
|
||||
|
||||
import devlib
|
||||
|
||||
from wa.framework.configuration.core import JobSpec, Status
|
||||
from wa.framework.configuration.execution import CombinedConfig
|
||||
from wa.framework.exception import HostError
|
||||
from wa.framework.exception import HostError, SerializerSyntaxError, ConfigError
|
||||
from wa.framework.run import RunState, RunInfo
|
||||
from wa.framework.target.info import TargetInfo
|
||||
from wa.framework.version import get_wa_version_with_commit
|
||||
from wa.utils.misc import touch, ensure_directory_exists, isiterable
|
||||
from wa.utils.serializer import write_pod, read_pod
|
||||
from wa.utils.doc import format_simple_table
|
||||
from wa.utils.misc import (touch, ensure_directory_exists, isiterable,
|
||||
format_ordered_dict, safe_extract)
|
||||
from wa.utils.postgres import get_schema_versions
|
||||
from wa.utils.serializer import write_pod, read_pod, Podable, json
|
||||
from wa.utils.types import enum, numeric
|
||||
|
||||
|
||||
@ -135,9 +148,10 @@ class Output(object):
|
||||
if not os.path.exists(path):
|
||||
msg = 'Attempting to add non-existing artifact: {}'
|
||||
raise HostError(msg.format(path))
|
||||
is_dir = os.path.isdir(path)
|
||||
path = os.path.relpath(path, self.basepath)
|
||||
|
||||
self.result.add_artifact(name, path, kind, description, classifiers)
|
||||
self.result.add_artifact(name, path, kind, description, classifiers, is_dir)
|
||||
|
||||
def add_event(self, message):
|
||||
self.result.add_event(message)
|
||||
@ -152,6 +166,9 @@ class Output(object):
|
||||
artifact = self.get_artifact(name)
|
||||
return self.get_path(artifact.path)
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
self.result.add_classifier(name, value, overwrite)
|
||||
|
||||
def add_metadata(self, key, *args, **kwargs):
|
||||
self.result.add_metadata(key, *args, **kwargs)
|
||||
|
||||
@ -166,7 +183,35 @@ class Output(object):
|
||||
return os.path.basename(self.basepath)
|
||||
|
||||
|
||||
class RunOutput(Output):
|
||||
class RunOutputCommon(object):
|
||||
''' Split out common functionality to form a second base of
|
||||
the RunOutput classes
|
||||
'''
|
||||
@property
|
||||
def run_config(self):
|
||||
if self._combined_config:
|
||||
return self._combined_config.run_config
|
||||
|
||||
@property
|
||||
def settings(self):
|
||||
if self._combined_config:
|
||||
return self._combined_config.settings
|
||||
|
||||
def get_job_spec(self, spec_id):
|
||||
for spec in self.job_specs:
|
||||
if spec.id == spec_id:
|
||||
return spec
|
||||
return None
|
||||
|
||||
def list_workloads(self):
|
||||
workloads = []
|
||||
for job in self.jobs:
|
||||
if job.label not in workloads:
|
||||
workloads.append(job.label)
|
||||
return workloads
|
||||
|
||||
|
||||
class RunOutput(Output, RunOutputCommon):
|
||||
|
||||
kind = 'run'
|
||||
|
||||
@ -207,16 +252,6 @@ class RunOutput(Output):
|
||||
path = os.path.join(self.basepath, '__failed')
|
||||
return ensure_directory_exists(path)
|
||||
|
||||
@property
|
||||
def run_config(self):
|
||||
if self._combined_config:
|
||||
return self._combined_config.run_config
|
||||
|
||||
@property
|
||||
def settings(self):
|
||||
if self._combined_config:
|
||||
return self._combined_config.settings
|
||||
|
||||
@property
|
||||
def augmentations(self):
|
||||
run_augs = set([])
|
||||
@ -234,8 +269,8 @@ class RunOutput(Output):
|
||||
self._combined_config = None
|
||||
self.jobs = []
|
||||
self.job_specs = []
|
||||
if (not os.path.isfile(self.statefile) or
|
||||
not os.path.isfile(self.infofile)):
|
||||
if (not os.path.isfile(self.statefile)
|
||||
or not os.path.isfile(self.infofile)):
|
||||
msg = '"{}" does not exist or is not a valid WA output directory.'
|
||||
raise ValueError(msg.format(self.basepath))
|
||||
self.reload()
|
||||
@ -269,6 +304,7 @@ class RunOutput(Output):
|
||||
write_pod(self.state.to_pod(), self.statefile)
|
||||
|
||||
def write_config(self, config):
|
||||
self._combined_config = config
|
||||
write_pod(config.to_pod(), self.configfile)
|
||||
|
||||
def read_config(self):
|
||||
@ -301,19 +337,6 @@ class RunOutput(Output):
|
||||
shutil.move(job_output.basepath, failed_path)
|
||||
job_output.basepath = failed_path
|
||||
|
||||
def get_job_spec(self, spec_id):
|
||||
for spec in self.job_specs:
|
||||
if spec.id == spec_id:
|
||||
return spec
|
||||
return None
|
||||
|
||||
def list_workloads(self):
|
||||
workloads = []
|
||||
for job in self.jobs:
|
||||
if job.label not in workloads:
|
||||
workloads.append(job.label)
|
||||
return workloads
|
||||
|
||||
|
||||
class JobOutput(Output):
|
||||
|
||||
@ -330,13 +353,22 @@ class JobOutput(Output):
|
||||
self.spec = None
|
||||
self.reload()
|
||||
|
||||
@property
|
||||
def augmentations(self):
|
||||
job_augs = set([])
|
||||
for aug in self.spec.augmentations:
|
||||
job_augs.add(aug)
|
||||
return list(job_augs)
|
||||
|
||||
class Result(object):
|
||||
|
||||
class Result(Podable):
|
||||
|
||||
_pod_serialization_version = 1
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
instance = Result()
|
||||
instance.status = Status(pod['status'])
|
||||
instance = super(Result, Result).from_pod(pod)
|
||||
instance.status = Status.from_pod(pod['status'])
|
||||
instance.metrics = [Metric.from_pod(m) for m in pod['metrics']]
|
||||
instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']]
|
||||
instance.events = [Event.from_pod(e) for e in pod['events']]
|
||||
@ -346,6 +378,7 @@ class Result(object):
|
||||
|
||||
def __init__(self):
|
||||
# pylint: disable=no-member
|
||||
super(Result, self).__init__()
|
||||
self.status = Status.NEW
|
||||
self.metrics = []
|
||||
self.artifacts = []
|
||||
@ -359,9 +392,10 @@ class Result(object):
|
||||
logger.debug('Adding metric: {}'.format(metric))
|
||||
self.metrics.append(metric)
|
||||
|
||||
def add_artifact(self, name, path, kind, description=None, classifiers=None):
|
||||
def add_artifact(self, name, path, kind, description=None, classifiers=None,
|
||||
is_dir=False):
|
||||
artifact = Artifact(name, path, kind, description=description,
|
||||
classifiers=classifiers)
|
||||
classifiers=classifiers, is_dir=is_dir)
|
||||
logger.debug('Adding artifact: {}'.format(artifact))
|
||||
self.artifacts.append(artifact)
|
||||
|
||||
@ -380,6 +414,21 @@ class Result(object):
|
||||
return artifact
|
||||
raise HostError('Artifact "{}" not found'.format(name))
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
if name in self.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier.'.format(name))
|
||||
self.classifiers[name] = value
|
||||
|
||||
for metric in self.metrics:
|
||||
if name in metric.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier; clashes with {}.'.format(name, metric))
|
||||
metric.classifiers[name] = value
|
||||
|
||||
for artifact in self.artifacts:
|
||||
if name in artifact.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier; clashes with {}.'.format(name, artifact))
|
||||
artifact.classifiers[name] = value
|
||||
|
||||
def add_metadata(self, key, *args, **kwargs):
|
||||
force = kwargs.pop('force', False)
|
||||
if kwargs:
|
||||
@ -429,21 +478,27 @@ class Result(object):
|
||||
self.metadata[key] = args[0]
|
||||
|
||||
def to_pod(self):
|
||||
return dict(
|
||||
status=str(self.status),
|
||||
metrics=[m.to_pod() for m in self.metrics],
|
||||
artifacts=[a.to_pod() for a in self.artifacts],
|
||||
events=[e.to_pod() for e in self.events],
|
||||
classifiers=copy(self.classifiers),
|
||||
metadata=deepcopy(self.metadata),
|
||||
)
|
||||
pod = super(Result, self).to_pod()
|
||||
pod['status'] = self.status.to_pod()
|
||||
pod['metrics'] = [m.to_pod() for m in self.metrics]
|
||||
pod['artifacts'] = [a.to_pod() for a in self.artifacts]
|
||||
pod['events'] = [e.to_pod() for e in self.events]
|
||||
pod['classifiers'] = copy(self.classifiers)
|
||||
pod['metadata'] = deepcopy(self.metadata)
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
def _pod_upgrade_v1(pod):
|
||||
pod['_pod_version'] = pod.get('_pod_version', 1)
|
||||
pod['status'] = Status(pod['status']).to_pod()
|
||||
return pod
|
||||
|
||||
|
||||
ARTIFACT_TYPES = ['log', 'meta', 'data', 'export', 'raw']
|
||||
ArtifactType = enum(ARTIFACT_TYPES)
|
||||
|
||||
|
||||
class Artifact(object):
|
||||
class Artifact(Podable):
|
||||
"""
|
||||
This is an artifact generated during execution/post-processing of a
|
||||
workload. Unlike metrics, this represents an actual artifact, such as a
|
||||
@ -491,12 +546,20 @@ class Artifact(object):
|
||||
|
||||
"""
|
||||
|
||||
_pod_serialization_version = 2
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
pod = Artifact._upgrade_pod(pod)
|
||||
pod_version = pod.pop('_pod_version')
|
||||
pod['kind'] = ArtifactType(pod['kind'])
|
||||
return Artifact(**pod)
|
||||
instance = Artifact(**pod)
|
||||
instance._pod_version = pod_version # pylint: disable =protected-access
|
||||
instance.is_dir = pod.pop('is_dir')
|
||||
return instance
|
||||
|
||||
def __init__(self, name, path, kind, description=None, classifiers=None):
|
||||
def __init__(self, name, path, kind, description=None, classifiers=None,
|
||||
is_dir=False):
|
||||
""""
|
||||
:param name: Name that uniquely identifies this artifact.
|
||||
:param path: The *relative* path of the artifact. Depending on the
|
||||
@ -512,8 +575,8 @@ class Artifact(object):
|
||||
:param classifiers: A set of key-value pairs to further classify this
|
||||
metric beyond current iteration (e.g. this can be
|
||||
used to identify sub-tests).
|
||||
|
||||
"""
|
||||
super(Artifact, self).__init__()
|
||||
self.name = name
|
||||
self.path = path.replace('/', os.sep) if path is not None else path
|
||||
try:
|
||||
@ -523,20 +586,34 @@ class Artifact(object):
|
||||
raise ValueError(msg.format(kind, ARTIFACT_TYPES))
|
||||
self.description = description
|
||||
self.classifiers = classifiers or {}
|
||||
self.is_dir = is_dir
|
||||
|
||||
def to_pod(self):
|
||||
pod = copy(self.__dict__)
|
||||
pod = super(Artifact, self).to_pod()
|
||||
pod.update(self.__dict__)
|
||||
pod['kind'] = str(self.kind)
|
||||
pod['is_dir'] = self.is_dir
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
def _pod_upgrade_v1(pod):
|
||||
pod['_pod_version'] = pod.get('_pod_version', 1)
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
def _pod_upgrade_v2(pod):
|
||||
pod['is_dir'] = pod.get('is_dir', False)
|
||||
return pod
|
||||
|
||||
def __str__(self):
|
||||
return self.path
|
||||
|
||||
def __repr__(self):
|
||||
return '{} ({}): {}'.format(self.name, self.kind, self.path)
|
||||
ft = 'dir' if self.is_dir else 'file'
|
||||
return '{} ({}) ({}): {}'.format(self.name, ft, self.kind, self.path)
|
||||
|
||||
|
||||
class Metric(object):
|
||||
class Metric(Podable):
|
||||
"""
|
||||
This is a single metric collected from executing a workload.
|
||||
|
||||
@ -553,15 +630,26 @@ class Metric(object):
|
||||
to identify sub-tests).
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ['name', 'value', 'units', 'lower_is_better', 'classifiers']
|
||||
_pod_serialization_version = 1
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
return Metric(**pod)
|
||||
pod = Metric._upgrade_pod(pod)
|
||||
pod_version = pod.pop('_pod_version')
|
||||
instance = Metric(**pod)
|
||||
instance._pod_version = pod_version # pylint: disable =protected-access
|
||||
return instance
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
parts = ['{}={}'.format(n, v) for n, v in self.classifiers.items()]
|
||||
parts.insert(0, self.name)
|
||||
return '/'.join(parts)
|
||||
|
||||
def __init__(self, name, value, units=None, lower_is_better=False,
|
||||
classifiers=None):
|
||||
super(Metric, self).__init__()
|
||||
self.name = name
|
||||
self.value = numeric(value)
|
||||
self.units = units
|
||||
@ -569,13 +657,18 @@ class Metric(object):
|
||||
self.classifiers = classifiers or {}
|
||||
|
||||
def to_pod(self):
|
||||
return dict(
|
||||
name=self.name,
|
||||
value=self.value,
|
||||
units=self.units,
|
||||
lower_is_better=self.lower_is_better,
|
||||
classifiers=self.classifiers,
|
||||
)
|
||||
pod = super(Metric, self).to_pod()
|
||||
pod['name'] = self.name
|
||||
pod['value'] = self.value
|
||||
pod['units'] = self.units
|
||||
pod['lower_is_better'] = self.lower_is_better
|
||||
pod['classifiers'] = self.classifiers
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
def _pod_upgrade_v1(pod):
|
||||
pod['_pod_version'] = pod.get('_pod_version', 1)
|
||||
return pod
|
||||
|
||||
def __str__(self):
|
||||
result = '{}: {}'.format(self.name, self.value)
|
||||
@ -587,23 +680,27 @@ class Metric(object):
|
||||
def __repr__(self):
|
||||
text = self.__str__()
|
||||
if self.classifiers:
|
||||
return '<{} {}>'.format(text, self.classifiers)
|
||||
return '<{} {}>'.format(text, format_ordered_dict(self.classifiers))
|
||||
else:
|
||||
return '<{}>'.format(text)
|
||||
|
||||
|
||||
class Event(object):
|
||||
class Event(Podable):
|
||||
"""
|
||||
An event that occured during a run.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ['timestamp', 'message']
|
||||
_pod_serialization_version = 1
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
pod = Event._upgrade_pod(pod)
|
||||
pod_version = pod.pop('_pod_version')
|
||||
instance = Event(pod['message'])
|
||||
instance.timestamp = pod['timestamp']
|
||||
instance._pod_version = pod_version # pylint: disable =protected-access
|
||||
return instance
|
||||
|
||||
@property
|
||||
@ -615,14 +712,20 @@ class Event(object):
|
||||
return result
|
||||
|
||||
def __init__(self, message):
|
||||
super(Event, self).__init__()
|
||||
self.timestamp = datetime.utcnow()
|
||||
self.message = message
|
||||
self.message = str(message)
|
||||
|
||||
def to_pod(self):
|
||||
return dict(
|
||||
timestamp=self.timestamp,
|
||||
message=self.message,
|
||||
)
|
||||
pod = super(Event, self).to_pod()
|
||||
pod['timestamp'] = self.timestamp
|
||||
pod['message'] = self.message
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
def _pod_upgrade_v1(pod):
|
||||
pod['_pod_version'] = pod.get('_pod_version', 1)
|
||||
return pod
|
||||
|
||||
def __str__(self):
|
||||
return '[{}] {}'.format(self.timestamp, self.message)
|
||||
@ -674,9 +777,13 @@ def init_job_output(run_output, job):
|
||||
|
||||
|
||||
def discover_wa_outputs(path):
|
||||
for root, dirs, _ in os.walk(path):
|
||||
# Use topdown=True to allow pruning dirs
|
||||
for root, dirs, _ in os.walk(path, topdown=True):
|
||||
if '__meta' in dirs:
|
||||
yield RunOutput(root)
|
||||
# Avoid recursing into the artifact as it can be very lengthy if a
|
||||
# large number of file is present (sysfs dump)
|
||||
dirs.clear()
|
||||
|
||||
|
||||
def _save_raw_config(meta_dir, state):
|
||||
@ -689,3 +796,502 @@ def _save_raw_config(meta_dir, state):
|
||||
basename = os.path.basename(source)
|
||||
dest_path = os.path.join(raw_config_dir, 'cfg{}-{}'.format(i, basename))
|
||||
shutil.copy(source, dest_path)
|
||||
|
||||
|
||||
class DatabaseOutput(Output):
|
||||
|
||||
kind = None
|
||||
|
||||
@property
|
||||
def resultfile(self):
|
||||
if self.conn is None or self.oid is None:
|
||||
return {}
|
||||
pod = self._get_pod_version()
|
||||
pod['metrics'] = self._get_metrics()
|
||||
pod['status'] = self._get_status()
|
||||
pod['classifiers'] = self._get_classifiers(self.oid, 'run')
|
||||
pod['events'] = self._get_events()
|
||||
pod['artifacts'] = self._get_artifacts()
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
def _build_command(columns, tables, conditions=None, joins=None):
|
||||
cmd = '''SELECT\n\t{}\nFROM\n\t{}'''.format(',\n\t'.join(columns), ',\n\t'.join(tables))
|
||||
if joins:
|
||||
for join in joins:
|
||||
cmd += '''\nLEFT JOIN {} ON {}'''.format(join[0], join[1])
|
||||
if conditions:
|
||||
cmd += '''\nWHERE\n\t{}'''.format('\nAND\n\t'.join(conditions))
|
||||
return cmd + ';'
|
||||
|
||||
def __init__(self, conn, oid=None, reload=True): # pylint: disable=super-init-not-called
|
||||
self.conn = conn
|
||||
self.oid = oid
|
||||
self.result = None
|
||||
if reload:
|
||||
self.reload()
|
||||
|
||||
def __repr__(self):
|
||||
return '<{} {}>'.format(self.__class__.__name__, self.oid)
|
||||
|
||||
def __str__(self):
|
||||
return self.oid
|
||||
|
||||
def reload(self):
|
||||
try:
|
||||
self.result = Result.from_pod(self.resultfile)
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
self.result = Result()
|
||||
self.result.status = Status.UNKNOWN
|
||||
self.add_event(str(e))
|
||||
|
||||
def get_artifact_path(self, name):
|
||||
artifact = self.get_artifact(name)
|
||||
if artifact.is_dir:
|
||||
return self._read_dir_artifact(artifact)
|
||||
else:
|
||||
return self._read_file_artifact(artifact)
|
||||
|
||||
def _read_dir_artifact(self, artifact):
|
||||
artifact_path = tempfile.mkdtemp(prefix='wa_')
|
||||
with tarfile.open(fileobj=self.conn.lobject(int(artifact.path), mode='b'), mode='r|gz') as tar_file:
|
||||
safe_extract(tar_file, artifact_path)
|
||||
self.conn.commit()
|
||||
return artifact_path
|
||||
|
||||
def _read_file_artifact(self, artifact):
|
||||
artifact = StringIO(self.conn.lobject(int(artifact.path)).read())
|
||||
self.conn.commit()
|
||||
return artifact
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
def _read_db(self, columns, tables, conditions=None, join=None, as_dict=True):
|
||||
# Automatically remove table name from column when using column names as keys or
|
||||
# allow for column names to be aliases when retrieving the data,
|
||||
# (db_column_name, alias)
|
||||
db_columns = []
|
||||
aliases_colunms = []
|
||||
for column in columns:
|
||||
if isinstance(column, tuple):
|
||||
db_columns.append(column[0])
|
||||
aliases_colunms.append(column[1])
|
||||
else:
|
||||
db_columns.append(column)
|
||||
aliases_colunms.append(column.rsplit('.', 1)[-1])
|
||||
|
||||
cmd = self._build_command(db_columns, tables, conditions, join)
|
||||
|
||||
logger.debug(cmd)
|
||||
with self.conn.cursor() as cursor:
|
||||
cursor.execute(cmd)
|
||||
results = cursor.fetchall()
|
||||
self.conn.commit()
|
||||
|
||||
if not as_dict:
|
||||
return results
|
||||
|
||||
# Format the output dict using column names as keys
|
||||
output = []
|
||||
for result in results:
|
||||
entry = {}
|
||||
for k, v in zip(aliases_colunms, result):
|
||||
entry[k] = v
|
||||
output.append(entry)
|
||||
return output
|
||||
|
||||
def _get_pod_version(self):
|
||||
columns = ['_pod_version', '_pod_serialization_version']
|
||||
tables = ['{}s'.format(self.kind)]
|
||||
conditions = ['{}s.oid = \'{}\''.format(self.kind, self.oid)]
|
||||
results = self._read_db(columns, tables, conditions)
|
||||
if results:
|
||||
return results[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def _populate_classifers(self, pod, kind):
|
||||
for entry in pod:
|
||||
oid = entry.pop('oid')
|
||||
entry['classifiers'] = self._get_classifiers(oid, kind)
|
||||
return pod
|
||||
|
||||
def _get_classifiers(self, oid, kind):
|
||||
columns = ['classifiers.key', 'classifiers.value']
|
||||
tables = ['classifiers']
|
||||
conditions = ['{}_oid = \'{}\''.format(kind, oid)]
|
||||
results = self._read_db(columns, tables, conditions, as_dict=False)
|
||||
classifiers = {}
|
||||
for (k, v) in results:
|
||||
classifiers[k] = v
|
||||
return classifiers
|
||||
|
||||
def _get_metrics(self):
|
||||
columns = ['metrics.name', 'metrics.value', 'metrics.units',
|
||||
'metrics.lower_is_better',
|
||||
'metrics.oid', 'metrics._pod_version',
|
||||
'metrics._pod_serialization_version']
|
||||
tables = ['metrics']
|
||||
joins = [('classifiers', 'classifiers.metric_oid = metrics.oid')]
|
||||
conditions = ['metrics.{}_oid = \'{}\''.format(self.kind, self.oid)]
|
||||
pod = self._read_db(columns, tables, conditions, joins)
|
||||
return self._populate_classifers(pod, 'metric')
|
||||
|
||||
def _get_status(self):
|
||||
columns = ['{}s.status'.format(self.kind)]
|
||||
tables = ['{}s'.format(self.kind)]
|
||||
conditions = ['{}s.oid = \'{}\''.format(self.kind, self.oid)]
|
||||
results = self._read_db(columns, tables, conditions, as_dict=False)
|
||||
if results:
|
||||
return results[0][0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def _get_artifacts(self):
|
||||
columns = ['artifacts.name', 'artifacts.description', 'artifacts.kind',
|
||||
('largeobjects.lo_oid', 'path'), 'artifacts.oid', 'artifacts.is_dir',
|
||||
'artifacts._pod_version', 'artifacts._pod_serialization_version']
|
||||
tables = ['largeobjects', 'artifacts']
|
||||
joins = [('classifiers', 'classifiers.artifact_oid = artifacts.oid')]
|
||||
conditions = ['artifacts.{}_oid = \'{}\''.format(self.kind, self.oid),
|
||||
'artifacts.large_object_uuid = largeobjects.oid']
|
||||
# If retrieving run level artifacts we want those that don't also belong to a job
|
||||
if self.kind == 'run':
|
||||
conditions.append('artifacts.job_oid IS NULL')
|
||||
pod = self._read_db(columns, tables, conditions, joins)
|
||||
for artifact in pod:
|
||||
artifact['path'] = str(artifact['path'])
|
||||
return self._populate_classifers(pod, 'metric')
|
||||
|
||||
def _get_events(self):
|
||||
columns = ['events.message', 'events.timestamp']
|
||||
tables = ['events']
|
||||
conditions = ['events.{}_oid = \'{}\''.format(self.kind, self.oid)]
|
||||
return self._read_db(columns, tables, conditions)
|
||||
|
||||
|
||||
def kernel_config_from_db(raw):
|
||||
kernel_config = {}
|
||||
if raw:
|
||||
for k, v in zip(raw[0], raw[1]):
|
||||
kernel_config[k] = v
|
||||
return kernel_config
|
||||
|
||||
|
||||
class RunDatabaseOutput(DatabaseOutput, RunOutputCommon):
|
||||
|
||||
kind = 'run'
|
||||
|
||||
@property
|
||||
def basepath(self):
|
||||
return 'db:({})-{}@{}:{}'.format(self.dbname, self.user,
|
||||
self.host, self.port)
|
||||
|
||||
@property
|
||||
def augmentations(self):
|
||||
columns = ['augmentations.name']
|
||||
tables = ['augmentations']
|
||||
conditions = ['augmentations.run_oid = \'{}\''.format(self.oid)]
|
||||
results = self._read_db(columns, tables, conditions, as_dict=False)
|
||||
return [a for augs in results for a in augs]
|
||||
|
||||
@property
|
||||
def _db_infofile(self):
|
||||
columns = ['start_time', 'project', ('run_uuid', 'uuid'), 'end_time',
|
||||
'run_name', 'duration', '_pod_version', '_pod_serialization_version']
|
||||
tables = ['runs']
|
||||
conditions = ['runs.run_uuid = \'{}\''.format(self.run_uuid)]
|
||||
pod = self._read_db(columns, tables, conditions)
|
||||
if not pod:
|
||||
return {}
|
||||
return pod[0]
|
||||
|
||||
@property
|
||||
def _db_targetfile(self):
|
||||
columns = ['os', 'is_rooted', 'target', 'modules', 'abi', 'cpus', 'os_version',
|
||||
'hostid', 'hostname', 'kernel_version', 'kernel_release',
|
||||
'kernel_sha1', 'kernel_config', 'sched_features', 'page_size_kb',
|
||||
'system_id', 'screen_resolution', 'prop', 'android_id',
|
||||
'_pod_version', '_pod_serialization_version']
|
||||
tables = ['targets']
|
||||
conditions = ['targets.run_oid = \'{}\''.format(self.oid)]
|
||||
pod = self._read_db(columns, tables, conditions)
|
||||
if not pod:
|
||||
return {}
|
||||
pod = pod[0]
|
||||
try:
|
||||
pod['cpus'] = [json.loads(cpu) for cpu in pod.pop('cpus')]
|
||||
except SerializerSyntaxError:
|
||||
pod['cpus'] = []
|
||||
logger.debug('Failed to deserialize target cpu information')
|
||||
pod['kernel_config'] = kernel_config_from_db(pod['kernel_config'])
|
||||
return pod
|
||||
|
||||
@property
|
||||
def _db_statefile(self):
|
||||
# Read overall run information
|
||||
columns = ['runs.state']
|
||||
tables = ['runs']
|
||||
conditions = ['runs.run_uuid = \'{}\''.format(self.run_uuid)]
|
||||
pod = self._read_db(columns, tables, conditions)
|
||||
pod = pod[0].get('state')
|
||||
if not pod:
|
||||
return {}
|
||||
|
||||
# Read job information
|
||||
columns = ['jobs.job_id', 'jobs.oid']
|
||||
tables = ['jobs']
|
||||
conditions = ['jobs.run_oid = \'{}\''.format(self.oid)]
|
||||
job_oids = self._read_db(columns, tables, conditions)
|
||||
|
||||
# Match job oid with jobs from state file
|
||||
for job in pod.get('jobs', []):
|
||||
for job_oid in job_oids:
|
||||
if job['id'] == job_oid['job_id']:
|
||||
job['oid'] = job_oid['oid']
|
||||
break
|
||||
return pod
|
||||
|
||||
@property
|
||||
def _db_jobsfile(self):
|
||||
workload_params = self._get_parameters('workload')
|
||||
runtime_params = self._get_parameters('runtime')
|
||||
|
||||
columns = [('jobs.job_id', 'id'), 'jobs.label', 'jobs.workload_name',
|
||||
'jobs.oid', 'jobs._pod_version', 'jobs._pod_serialization_version']
|
||||
tables = ['jobs']
|
||||
conditions = ['jobs.run_oid = \'{}\''.format(self.oid)]
|
||||
jobs = self._read_db(columns, tables, conditions)
|
||||
|
||||
for job in jobs:
|
||||
job['augmentations'] = self._get_job_augmentations(job['oid'])
|
||||
job['workload_parameters'] = workload_params.pop(job['oid'], {})
|
||||
job['runtime_parameters'] = runtime_params.pop(job['oid'], {})
|
||||
job.pop('oid')
|
||||
return jobs
|
||||
|
||||
@property
|
||||
def _db_run_config(self):
|
||||
pod = defaultdict(dict)
|
||||
parameter_types = ['augmentation', 'resource_getter']
|
||||
for parameter_type in parameter_types:
|
||||
columns = ['parameters.name', 'parameters.value',
|
||||
'parameters.value_type',
|
||||
('{}s.name'.format(parameter_type), '{}'.format(parameter_type))]
|
||||
tables = ['parameters', '{}s'.format(parameter_type)]
|
||||
conditions = ['parameters.run_oid = \'{}\''.format(self.oid),
|
||||
'parameters.type = \'{}\''.format(parameter_type),
|
||||
'parameters.{0}_oid = {0}s.oid'.format(parameter_type)]
|
||||
configs = self._read_db(columns, tables, conditions)
|
||||
for config in configs:
|
||||
entry = {config['name']: json.loads(config['value'])}
|
||||
pod['{}s'.format(parameter_type)][config.pop(parameter_type)] = entry
|
||||
|
||||
# run config
|
||||
columns = ['runs.max_retries', 'runs.allow_phone_home',
|
||||
'runs.bail_on_init_failure', 'runs.retry_on_status']
|
||||
tables = ['runs']
|
||||
conditions = ['runs.oid = \'{}\''.format(self.oid)]
|
||||
config = self._read_db(columns, tables, conditions)
|
||||
if not config:
|
||||
return {}
|
||||
|
||||
config = config[0]
|
||||
# Convert back into a string representation of an enum list
|
||||
config['retry_on_status'] = config['retry_on_status'][1:-1].split(',')
|
||||
pod.update(config)
|
||||
return pod
|
||||
|
||||
def __init__(self,
|
||||
password=None,
|
||||
dbname='wa',
|
||||
host='localhost',
|
||||
port='5432',
|
||||
user='postgres',
|
||||
run_uuid=None,
|
||||
list_runs=False):
|
||||
|
||||
if psycopg2 is None:
|
||||
msg = 'Please install the psycopg2 in order to connect to postgres databases'
|
||||
raise HostError(msg)
|
||||
|
||||
self.dbname = dbname
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.run_uuid = run_uuid
|
||||
self.conn = None
|
||||
|
||||
self.info = None
|
||||
self.state = None
|
||||
self.result = None
|
||||
self.target_info = None
|
||||
self._combined_config = None
|
||||
self.jobs = []
|
||||
self.job_specs = []
|
||||
|
||||
self.connect()
|
||||
super(RunDatabaseOutput, self).__init__(conn=self.conn, reload=False)
|
||||
|
||||
local_schema_version, db_schema_version = get_schema_versions(self.conn)
|
||||
if local_schema_version != db_schema_version:
|
||||
self.disconnect()
|
||||
msg = 'The current database schema is v{} however the local ' \
|
||||
'schema version is v{}. Please update your database ' \
|
||||
'with the create command'
|
||||
raise HostError(msg.format(db_schema_version, local_schema_version))
|
||||
|
||||
if list_runs:
|
||||
print('Available runs are:')
|
||||
self._list_runs()
|
||||
self.disconnect()
|
||||
return
|
||||
if not self.run_uuid:
|
||||
print('Please specify "Run uuid"')
|
||||
self._list_runs()
|
||||
self.disconnect()
|
||||
return
|
||||
|
||||
if not self.oid:
|
||||
self.oid = self._get_oid()
|
||||
self.reload()
|
||||
|
||||
def read_job_specs(self):
|
||||
job_specs = []
|
||||
for job in self._db_jobsfile:
|
||||
job_specs.append(JobSpec.from_pod(job))
|
||||
return job_specs
|
||||
|
||||
def connect(self):
|
||||
if self.conn and not self.conn.closed:
|
||||
return
|
||||
try:
|
||||
self.conn = psycopg2.connect(dbname=self.dbname,
|
||||
user=self.user,
|
||||
host=self.host,
|
||||
password=self.password,
|
||||
port=self.port)
|
||||
except Psycopg2Error as e:
|
||||
raise HostError('Unable to connect to the Database: "{}'.format(e.args[0]))
|
||||
|
||||
def disconnect(self):
|
||||
self.conn.commit()
|
||||
self.conn.close()
|
||||
|
||||
def reload(self):
|
||||
super(RunDatabaseOutput, self).reload()
|
||||
info_pod = self._db_infofile
|
||||
state_pod = self._db_statefile
|
||||
if not info_pod or not state_pod:
|
||||
msg = '"{}" does not appear to be a valid WA Database Output.'
|
||||
raise ValueError(msg.format(self.oid))
|
||||
|
||||
self.info = RunInfo.from_pod(info_pod)
|
||||
self.state = RunState.from_pod(state_pod)
|
||||
self._combined_config = CombinedConfig.from_pod({'run_config': self._db_run_config})
|
||||
self.target_info = TargetInfo.from_pod(self._db_targetfile)
|
||||
self.job_specs = self.read_job_specs()
|
||||
|
||||
for job_state in self._db_statefile['jobs']:
|
||||
job = JobDatabaseOutput(self.conn, job_state.get('oid'), job_state['id'],
|
||||
job_state['label'], job_state['iteration'],
|
||||
job_state['retries'])
|
||||
job.status = job_state['status']
|
||||
job.spec = self.get_job_spec(job.id)
|
||||
if job.spec is None:
|
||||
logger.warning('Could not find spec for job {}'.format(job.id))
|
||||
self.jobs.append(job)
|
||||
|
||||
def _get_oid(self):
|
||||
columns = ['{}s.oid'.format(self.kind)]
|
||||
tables = ['{}s'.format(self.kind)]
|
||||
conditions = ['runs.run_uuid = \'{}\''.format(self.run_uuid)]
|
||||
oid = self._read_db(columns, tables, conditions, as_dict=False)
|
||||
if not oid:
|
||||
raise ConfigError('No matching run entries found for run_uuid {}'.format(self.run_uuid))
|
||||
if len(oid) > 1:
|
||||
raise ConfigError('Multiple entries found for run_uuid: {}'.format(self.run_uuid))
|
||||
return oid[0][0]
|
||||
|
||||
def _get_parameters(self, param_type):
|
||||
columns = ['parameters.job_oid', 'parameters.name', 'parameters.value']
|
||||
tables = ['parameters']
|
||||
conditions = ['parameters.type = \'{}\''.format(param_type),
|
||||
'parameters.run_oid = \'{}\''.format(self.oid)]
|
||||
params = self._read_db(columns, tables, conditions, as_dict=False)
|
||||
parm_dict = defaultdict(dict)
|
||||
for (job_oid, k, v) in params:
|
||||
try:
|
||||
parm_dict[job_oid][k] = json.loads(v)
|
||||
except SerializerSyntaxError:
|
||||
logger.debug('Failed to deserialize job_oid:{}-"{}":"{}"'.format(job_oid, k, v))
|
||||
return parm_dict
|
||||
|
||||
def _get_job_augmentations(self, job_oid):
|
||||
columns = ['jobs_augs.augmentation_oid', 'augmentations.name',
|
||||
'augmentations.oid', 'jobs_augs.job_oid']
|
||||
tables = ['jobs_augs', 'augmentations']
|
||||
conditions = ['jobs_augs.job_oid = \'{}\''.format(job_oid),
|
||||
'jobs_augs.augmentation_oid = augmentations.oid']
|
||||
augmentations = self._read_db(columns, tables, conditions)
|
||||
return [aug['name'] for aug in augmentations]
|
||||
|
||||
def _list_runs(self):
|
||||
columns = ['runs.run_uuid', 'runs.run_name', 'runs.project',
|
||||
'runs.project_stage', 'runs.status', 'runs.start_time', 'runs.end_time']
|
||||
tables = ['runs']
|
||||
pod = self._read_db(columns, tables)
|
||||
if pod:
|
||||
headers = ['Run Name', 'Project', 'Project Stage', 'Start Time', 'End Time',
|
||||
'run_uuid']
|
||||
run_list = []
|
||||
for entry in pod:
|
||||
# Format times to display better
|
||||
start_time = entry['start_time']
|
||||
end_time = entry['end_time']
|
||||
if start_time:
|
||||
start_time = start_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
if end_time:
|
||||
end_time = end_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
run_list.append([
|
||||
entry['run_name'],
|
||||
entry['project'],
|
||||
entry['project_stage'],
|
||||
start_time,
|
||||
end_time,
|
||||
entry['run_uuid']])
|
||||
|
||||
print(format_simple_table(run_list, headers))
|
||||
else:
|
||||
print('No Runs Found')
|
||||
|
||||
|
||||
class JobDatabaseOutput(DatabaseOutput):
|
||||
|
||||
kind = 'job'
|
||||
|
||||
def __init__(self, conn, oid, job_id, label, iteration, retry):
|
||||
super(JobDatabaseOutput, self).__init__(conn, oid=oid)
|
||||
self.id = job_id
|
||||
self.label = label
|
||||
self.iteration = iteration
|
||||
self.retry = retry
|
||||
self.result = None
|
||||
self.spec = None
|
||||
self.reload()
|
||||
|
||||
def __repr__(self):
|
||||
return '<{} {}-{}-{}>'.format(self.__class__.__name__,
|
||||
self.id, self.label, self.iteration)
|
||||
|
||||
def __str__(self):
|
||||
return '{}-{}-{}'.format(self.id, self.label, self.iteration)
|
||||
|
||||
@property
|
||||
def augmentations(self):
|
||||
job_augs = set([])
|
||||
if self.spec:
|
||||
for aug in self.spec.augmentations:
|
||||
job_augs.add(aug)
|
||||
return list(job_augs)
|
||||
|
@ -40,10 +40,10 @@ class OutputProcessor(Plugin):
|
||||
msg = 'Instrument "{}" is required by {}, but is not installed.'
|
||||
raise ConfigError(msg.format(instrument, self.name))
|
||||
|
||||
def initialize(self):
|
||||
def initialize(self, context):
|
||||
pass
|
||||
|
||||
def finalize(self):
|
||||
def finalize(self, context):
|
||||
pass
|
||||
|
||||
|
||||
@ -60,6 +60,7 @@ class ProcessorManager(object):
|
||||
self.logger.debug('Installing {}'.format(processor.name))
|
||||
processor.logger.context = context
|
||||
self.processors.append(processor)
|
||||
context.add_augmentation(processor)
|
||||
|
||||
def disable_all(self):
|
||||
for output_processor in self.processors:
|
||||
@ -103,13 +104,13 @@ class ProcessorManager(object):
|
||||
for proc in self.processors:
|
||||
proc.validate()
|
||||
|
||||
def initialize(self):
|
||||
def initialize(self, context):
|
||||
for proc in self.processors:
|
||||
proc.initialize()
|
||||
proc.initialize(context)
|
||||
|
||||
def finalize(self):
|
||||
def finalize(self, context):
|
||||
for proc in self.processors:
|
||||
proc.finalize()
|
||||
proc.finalize(context)
|
||||
|
||||
def process_job_output(self, context):
|
||||
self.do_for_each_proc('process_job_output', 'Processing using "{}"',
|
||||
|
@ -18,8 +18,6 @@
|
||||
import os
|
||||
import sys
|
||||
import inspect
|
||||
import imp
|
||||
import string
|
||||
import logging
|
||||
from collections import OrderedDict, defaultdict
|
||||
from itertools import chain
|
||||
@ -32,16 +30,10 @@ from wa.framework.exception import (NotFoundError, PluginLoaderError, TargetErro
|
||||
ValidationError, ConfigError, HostError)
|
||||
from wa.utils import log
|
||||
from wa.utils.misc import (ensure_directory_exists as _d, walk_modules, load_class,
|
||||
merge_dicts_simple, get_article)
|
||||
merge_dicts_simple, get_article, import_path)
|
||||
from wa.utils.types import identifier
|
||||
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
MODNAME_TRANS = str.maketrans(':/\\.', '____')
|
||||
else:
|
||||
MODNAME_TRANS = string.maketrans(':/\\.', '____')
|
||||
|
||||
|
||||
class AttributeCollection(object):
|
||||
"""
|
||||
Accumulator for plugin attribute objects (such as Parameters or Artifacts).
|
||||
@ -157,6 +149,7 @@ class Alias(object):
|
||||
raise ConfigError(msg.format(param, self.name, ext.name))
|
||||
|
||||
|
||||
# pylint: disable=bad-mcs-classmethod-argument
|
||||
class PluginMeta(type):
|
||||
"""
|
||||
This basically adds some magic to plugins to make implementing new plugins,
|
||||
@ -246,7 +239,7 @@ class Plugin(with_metaclass(PluginMeta, object)):
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls):
|
||||
return {p.name: p.default for p in cls.parameters}
|
||||
return {p.name: p.default for p in cls.parameters if not p.deprecated}
|
||||
|
||||
@property
|
||||
def dependencies_directory(self):
|
||||
@ -367,7 +360,7 @@ class Plugin(with_metaclass(PluginMeta, object)):
|
||||
self._modules.append(module)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
return str(self.name)
|
||||
|
||||
def __repr__(self):
|
||||
params = []
|
||||
@ -383,12 +376,22 @@ class TargetedPlugin(Plugin):
|
||||
|
||||
"""
|
||||
|
||||
suppoted_targets = []
|
||||
supported_targets = []
|
||||
parameters = [
|
||||
Parameter('cleanup_assets', kind=bool,
|
||||
global_alias='cleanup_assets',
|
||||
aliases=['clean_up'],
|
||||
default=True,
|
||||
description="""
|
||||
If ``True``, assets that are deployed or created by the
|
||||
plugin will be removed again from the device.
|
||||
"""),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def check_compatible(cls, target):
|
||||
if cls.suppoted_targets:
|
||||
if target.os not in cls.suppoted_targets:
|
||||
if cls.supported_targets:
|
||||
if target.os not in cls.supported_targets:
|
||||
msg = 'Incompatible target OS "{}" for {}'
|
||||
raise TargetError(msg.format(target.os, cls.name))
|
||||
|
||||
@ -611,24 +614,30 @@ class PluginLoader(object):
|
||||
self.logger.debug('Checking path %s', path)
|
||||
if os.path.isfile(path):
|
||||
self._discover_from_file(path)
|
||||
for root, _, files in os.walk(path, followlinks=True):
|
||||
should_skip = False
|
||||
for igpath in ignore_paths:
|
||||
if root.startswith(igpath):
|
||||
should_skip = True
|
||||
break
|
||||
if should_skip:
|
||||
continue
|
||||
for fname in files:
|
||||
if os.path.splitext(fname)[1].lower() != '.py':
|
||||
elif os.path.exists(path):
|
||||
for root, _, files in os.walk(path, followlinks=True):
|
||||
should_skip = False
|
||||
for igpath in ignore_paths:
|
||||
if root.startswith(igpath):
|
||||
should_skip = True
|
||||
break
|
||||
if should_skip:
|
||||
continue
|
||||
filepath = os.path.join(root, fname)
|
||||
self._discover_from_file(filepath)
|
||||
for fname in files:
|
||||
if os.path.splitext(fname)[1].lower() != '.py':
|
||||
continue
|
||||
filepath = os.path.join(root, fname)
|
||||
self._discover_from_file(filepath)
|
||||
elif not os.path.isabs(path):
|
||||
try:
|
||||
for module in walk_modules(path):
|
||||
self._discover_in_module(module)
|
||||
except Exception: # NOQA pylint: disable=broad-except
|
||||
pass
|
||||
|
||||
def _discover_from_file(self, filepath):
|
||||
try:
|
||||
modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS)
|
||||
module = imp.load_source(modname, filepath)
|
||||
module = import_path(filepath)
|
||||
self._discover_in_module(module)
|
||||
except (SystemExit, ImportError) as e:
|
||||
if self.keep_going:
|
||||
|
@ -35,6 +35,7 @@ class __LoaderWrapper(object):
|
||||
def reset(self):
|
||||
# These imports cannot be done at top level, because of
|
||||
# sys.modules manipulation below
|
||||
# pylint: disable=import-outside-toplevel
|
||||
from wa.framework.plugin import PluginLoader
|
||||
from wa.framework.configuration.core import settings
|
||||
self._loader = PluginLoader(settings.plugin_packages,
|
||||
|
@ -16,15 +16,14 @@ import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from devlib.utils.android import ApkInfo
|
||||
|
||||
from wa.framework import pluginloader
|
||||
from wa.framework.plugin import Plugin
|
||||
from wa.framework.exception import ResourceError
|
||||
from wa.framework.configuration import settings
|
||||
from wa.utils import log
|
||||
from wa.utils.android import get_cacheable_apk_info
|
||||
from wa.utils.misc import get_object_name
|
||||
from wa.utils.types import enum, list_or_string, prioritylist
|
||||
from wa.utils.types import enum, list_or_string, prioritylist, version_tuple
|
||||
|
||||
|
||||
SourcePriority = enum(['package', 'remote', 'lan', 'local',
|
||||
@ -142,10 +141,12 @@ class ApkFile(Resource):
|
||||
|
||||
def __init__(self, owner, variant=None, version=None,
|
||||
package=None, uiauto=False, exact_abi=False,
|
||||
supported_abi=None):
|
||||
supported_abi=None, min_version=None, max_version=None):
|
||||
super(ApkFile, self).__init__(owner)
|
||||
self.variant = variant
|
||||
self.version = version
|
||||
self.max_version = max_version
|
||||
self.min_version = min_version
|
||||
self.package = package
|
||||
self.uiauto = uiauto
|
||||
self.exact_abi = exact_abi
|
||||
@ -158,21 +159,25 @@ class ApkFile(Resource):
|
||||
def match(self, path):
|
||||
name_matches = True
|
||||
version_matches = True
|
||||
version_range_matches = True
|
||||
package_matches = True
|
||||
abi_matches = True
|
||||
uiauto_matches = uiauto_test_matches(path, self.uiauto)
|
||||
if self.version is not None:
|
||||
if self.version:
|
||||
version_matches = apk_version_matches(path, self.version)
|
||||
if self.variant is not None:
|
||||
if self.max_version or self.min_version:
|
||||
version_range_matches = apk_version_matches_range(path, self.min_version,
|
||||
self.max_version)
|
||||
if self.variant:
|
||||
name_matches = file_name_matches(path, self.variant)
|
||||
if self.package is not None:
|
||||
if self.package:
|
||||
package_matches = package_name_matches(path, self.package)
|
||||
if self.supported_abi is not None:
|
||||
if self.supported_abi:
|
||||
abi_matches = apk_abi_matches(path, self.supported_abi,
|
||||
self.exact_abi)
|
||||
return name_matches and version_matches and \
|
||||
uiauto_matches and package_matches and \
|
||||
abi_matches
|
||||
version_range_matches and uiauto_matches \
|
||||
and package_matches and abi_matches
|
||||
|
||||
def __str__(self):
|
||||
text = '<{}\'s apk'.format(self.owner)
|
||||
@ -273,15 +278,40 @@ class ResourceResolver(object):
|
||||
|
||||
|
||||
def apk_version_matches(path, version):
|
||||
info = ApkInfo(path)
|
||||
if info.version_name == version or info.version_code == version:
|
||||
return True
|
||||
return loose_version_matching(version, info.version_name)
|
||||
version = list_or_string(version)
|
||||
info = get_cacheable_apk_info(path)
|
||||
for v in version:
|
||||
if v in (info.version_name, info.version_code):
|
||||
return True
|
||||
if loose_version_matching(v, info.version_name):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def apk_version_matches_range(path, min_version=None, max_version=None):
|
||||
info = get_cacheable_apk_info(path)
|
||||
return range_version_matching(info.version_name, min_version, max_version)
|
||||
|
||||
|
||||
def range_version_matching(apk_version, min_version=None, max_version=None):
|
||||
if not apk_version:
|
||||
return False
|
||||
apk_version = version_tuple(apk_version)
|
||||
|
||||
if max_version:
|
||||
max_version = version_tuple(max_version)
|
||||
if apk_version > max_version:
|
||||
return False
|
||||
if min_version:
|
||||
min_version = version_tuple(min_version)
|
||||
if apk_version < min_version:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def loose_version_matching(config_version, apk_version):
|
||||
config_version = config_version.split('.')
|
||||
apk_version = apk_version.split('.')
|
||||
config_version = version_tuple(config_version)
|
||||
apk_version = version_tuple(apk_version)
|
||||
|
||||
if len(apk_version) < len(config_version):
|
||||
return False # More specific version requested than available
|
||||
@ -302,18 +332,18 @@ def file_name_matches(path, pattern):
|
||||
|
||||
|
||||
def uiauto_test_matches(path, uiauto):
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
return uiauto == ('com.arm.wa.uiauto' in info.package)
|
||||
|
||||
|
||||
def package_name_matches(path, package):
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
return info.package == package
|
||||
|
||||
|
||||
def apk_abi_matches(path, supported_abi, exact_abi=False):
|
||||
supported_abi = list_or_string(supported_abi)
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
# If no native code present, suitable for all devices.
|
||||
if not info.native_code:
|
||||
return True
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user