mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-04-30 14:35:18 +01:00
Compare commits
241 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
2d14c82f92 | ||
|
8598d1ba3c | ||
|
523fb3f659 | ||
|
0732fa9cf0 | ||
|
b03f28d1d5 | ||
|
f125fd340d | ||
|
75cfb56b38 | ||
|
b734e90de1 | ||
|
5670e571e1 | ||
|
45f09a66be | ||
|
9638a084f9 | ||
|
4da8b0691f | ||
|
412a785068 | ||
|
6fc5340f2f | ||
|
da667b58ac | ||
|
4e9d402c24 | ||
|
e0bf7668b8 | ||
|
4839ab354f | ||
|
b6ecc18763 | ||
|
7315041e90 | ||
|
adbb647fa7 | ||
|
366f59ebf7 | ||
|
0eb17bf8f0 | ||
|
f166ac742e | ||
|
6fe4bce68d | ||
|
28b78a93f1 | ||
|
77ebefba08 | ||
|
41f7984243 | ||
|
23fcb2c120 | ||
|
e38b51b242 | ||
|
ea08a4f9e6 | ||
|
5b56210d5f | ||
|
0179202c90 | ||
|
617306fdda | ||
|
8d4fe9556b | ||
|
775b24f7a3 | ||
|
13f9c64513 | ||
|
6cd1c60715 | ||
|
05eab42f27 | ||
|
b113a8b351 | ||
|
d67d9bd2a4 | ||
|
11374aae3f | ||
|
839242d636 | ||
|
b9b02f83fc | ||
|
6aa1caad94 | ||
|
bf72a576e6 | ||
|
951eec991c | ||
|
0b64b51259 | ||
|
f4ebca39a1 | ||
|
88b085c11b | ||
|
36a909dda2 | ||
|
3228a3187c | ||
|
5e0c59babb | ||
|
dc2fc99e98 | ||
|
46ff6e1f62 | ||
|
8b3f58e726 | ||
|
fe7a88e43e | ||
|
61bb162350 | ||
|
d1e960e9b0 | ||
|
29a5a7fd43 | ||
|
37346fe1b1 | ||
|
40a118c8cd | ||
|
c4535320fa | ||
|
08b87291f8 | ||
|
a3eacb877c | ||
|
48152224a8 | ||
|
095d6bc100 | ||
|
8b94ed972d | ||
|
276f146c1e | ||
|
3b9fcd8001 | ||
|
88fb1de62b | ||
|
7dc337b7d0 | ||
|
b0f9072830 | ||
|
b109acac05 | ||
|
9c7bae3440 | ||
|
7b5ffafbda | ||
|
be02ad649c | ||
|
5a121983fc | ||
|
69795628ed | ||
|
7a332dfd5b | ||
|
4bad433670 | ||
|
0b558e408c | ||
|
c023b9859c | ||
|
284cc60b00 | ||
|
06b508107b | ||
|
cb1107df8f | ||
|
789e150b0a | ||
|
43cb80d854 | ||
|
31d306c23a | ||
|
591c85edec | ||
|
72298ff9ac | ||
|
f08770884a | ||
|
a5e5920aca | ||
|
5558d43ddd | ||
|
c8ea525a00 | ||
|
c4c0230958 | ||
|
b65a371b9d | ||
|
7f0a6da86b | ||
|
75a70ad181 | ||
|
84b5ea8a56 | ||
|
4b54e17020 | ||
|
da4d10d4e7 | ||
|
8882feed84 | ||
|
7f82480a26 | ||
|
e4be2b73ef | ||
|
22750b15c7 | ||
|
e3703f0e1e | ||
|
4ddd610149 | ||
|
c5e3a421b1 | ||
|
0e2a150170 | ||
|
69378b0873 | ||
|
c543c49423 | ||
|
dd07d2ec43 | ||
|
94590e88ee | ||
|
c2725ffaa2 | ||
|
751bbb19fe | ||
|
ae1bc2c031 | ||
|
91b791665a | ||
|
62c4f3837c | ||
|
3c5bece01e | ||
|
cb51ef4d47 | ||
|
8e56a4c831 | ||
|
76032c1d05 | ||
|
4c20fe814a | ||
|
92e253d838 | ||
|
18439e3b31 | ||
|
5cfe452a35 | ||
|
f1aff6b5a8 | ||
|
5dd3abe564 | ||
|
e3ab798f6e | ||
|
ed925938dc | ||
|
ed4eb8af5d | ||
|
a1bdb7de45 | ||
|
fbe9460995 | ||
|
aa4df95a69 | ||
|
fbb84eca72 | ||
|
fbd6f4e90c | ||
|
1c08360263 | ||
|
ff220dfb44 | ||
|
7489b487e1 | ||
|
ba5a65aad7 | ||
|
7bea3a69bb | ||
|
971289698b | ||
|
66e220d444 | ||
|
ae8a7bdfb5 | ||
|
b0355194bc | ||
|
7817308bf7 | ||
|
ab9e29bdae | ||
|
9edb6b20f0 | ||
|
879a491691 | ||
|
7086fa6b48 | ||
|
716e59daf5 | ||
|
08fcc7d30f | ||
|
684121e2e7 | ||
|
0c1229df8c | ||
|
615cbbc94d | ||
|
1425a6f6c9 | ||
|
4557da2f80 | ||
|
7cf5fbd8af | ||
|
3f5a31de96 | ||
|
7c6ebfb49c | ||
|
8640f4f69a | ||
|
460965363f | ||
|
d4057367d8 | ||
|
ef6cffd85a | ||
|
37f4d33015 | ||
|
8c7320a1be | ||
|
6d72a242ce | ||
|
0c2613c608 | ||
|
b8301640f7 | ||
|
c473cfa8fe | ||
|
1f0da5facf | ||
|
39121caf66 | ||
|
83da20ce9f | ||
|
f664a00bdc | ||
|
443358f513 | ||
|
586d95a4f0 | ||
|
58f3ea35ec | ||
|
7fe334b467 | ||
|
3967071a5e | ||
|
cd6f4541ca | ||
|
7e6eb089ab | ||
|
491dcd5b5b | ||
|
7a085e586a | ||
|
0f47002e4e | ||
|
6ff5abdffe | ||
|
82d09612cb | ||
|
ecbfe32b9d | ||
|
2d32d81acb | ||
|
b9d593e578 | ||
|
1f8be77331 | ||
|
66f0edec5b | ||
|
e2489ea3a0 | ||
|
16be8a70f5 | ||
|
dce07e5095 | ||
|
711bff6a60 | ||
|
2a8454db6a | ||
|
9b19f33186 | ||
|
53faf159e8 | ||
|
84a9526dd3 | ||
|
a3cf2e5650 | ||
|
607cff4c54 | ||
|
d56f0fbe20 | ||
|
0f9c20dc69 | ||
|
310bad3966 | ||
|
a8abf24db0 | ||
|
dad0a28b5e | ||
|
2cd4bf7e31 | ||
|
5049e3663b | ||
|
c9ddee761a | ||
|
3be00b296d | ||
|
9a931f42ee | ||
|
06ba8409c1 | ||
|
2da9370920 | ||
|
ef9b4c8919 | ||
|
31f4c0fd5f | ||
|
62ca7c0c36 | ||
|
d0f099700a | ||
|
5f00a94121 | ||
|
0f2de5f951 | ||
|
51ffd60c06 | ||
|
0a4164349b | ||
|
fe50d75858 | ||
|
b93a8cbbd6 | ||
|
79dec810f3 | ||
|
44cead2f76 | ||
|
c6d23ab01f | ||
|
6f9856cf2e | ||
|
0f9331dafe | ||
|
659e60414f | ||
|
796f62d924 | ||
|
f60032a59d | ||
|
977ce4995d | ||
|
a66251dd60 | ||
|
d3adfa1af9 | ||
|
39a294ddbe | ||
|
164095e664 | ||
|
24a4a032db | ||
|
05857ec2bc | ||
|
fd8a7e442c | ||
|
dfb4737e51 |
.github
.readthedocs.yml.travis.ymlREADME.rstdev_scripts
doc
build_instrument_method_map.pyrequirements.txtinstrument_method_map.template
source
api
changes.rstconf.pydeveloper_information
developer_reference
how_tos
user_information
extras
requirements.txtsetup.pytests
wa
commands
create.py
postgres_schemas
process.pyreport.pyrevent.pyschema_changelog.rstshow.pytemplates/uiauto
framework
command.py
configuration
exception.pyexecution.pygetters.pyhost.pyinstrument.pyjob.pyoutput.pyplugin.pypluginloader.pyresource.pyrun.pysignal.pytarget
uiauto
version.pyworkload.pyinstruments
output_processors
utils
16
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
16
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help resolve an issue.
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the issue**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**Run Log**
|
||||
Please attach your `run.log` detailing the issue.
|
||||
|
||||
**Other comments (optional)**
|
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is.
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the feature request here.
|
10
.github/ISSUE_TEMPLATE/question---support-.md
vendored
Normal file
10
.github/ISSUE_TEMPLATE/question---support-.md
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
name: 'Question / Support '
|
||||
about: Ask a question or reqeust support
|
||||
title: ''
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**
|
11
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
name: Question
|
||||
about: Ask a question
|
||||
title: ''
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe you query**
|
||||
What would you like to know / what are you trying to achieve?
|
92
.github/workflows/main.yml
vendored
Normal file
92
.github/workflows/main.yml
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
name: WA Test Suite
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
schedule:
|
||||
- cron: 0 2 * * *
|
||||
# Allows runing this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
Run-Linters-and-Tests:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.8.18
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8.18
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .[test]
|
||||
python -m pip install pylint==2.6.2 pep8 flake8 mock nose
|
||||
- name: Run pylint
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE && ./dev_scripts/pylint wa/
|
||||
- name: Run PEP8
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE && ./dev_scripts/pep8 wa
|
||||
- name: Run nose tests
|
||||
run: |
|
||||
nosetests
|
||||
|
||||
Execute-Test-Workload-and-Process:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .
|
||||
- name: Run test workload
|
||||
run: |
|
||||
cd /tmp && wa run $GITHUB_WORKSPACE/tests/ci/idle_agenda.yaml -v -d idle_workload
|
||||
- name: Test Process Command
|
||||
run: |
|
||||
cd /tmp && wa process -f -p csv idle_workload
|
||||
|
||||
Test-WA-Commands:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: git-bash
|
||||
uses: pkg-src/github-action-git-bash@v1.1
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install .
|
||||
cd $GITHUB_WORKSPACE && pip install .
|
||||
- name: Test Show Command
|
||||
run: |
|
||||
wa show dhrystone && wa show generic_android && wa show trace-cmd && wa show csv
|
||||
- name: Test List Command
|
||||
run: |
|
||||
wa list all
|
||||
- name: Test Create Command
|
||||
run: |
|
||||
wa create agenda dhrystone generic_android csv trace_cmd && wa create package test && wa create workload test
|
@ -13,9 +13,16 @@ sphinx:
|
||||
# Build the docs in additional formats such as PDF and ePub
|
||||
formats: all
|
||||
|
||||
# Set the version of Python and requirements required to build your docs
|
||||
|
||||
# Configure the build environment
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
|
||||
# Ensure doc dependencies are installed before building
|
||||
python:
|
||||
version: 3.7
|
||||
install:
|
||||
- method: setuptools
|
||||
- requirements: doc/requirements.txt
|
||||
- method: pip
|
||||
path: .
|
||||
|
46
.travis.yml
46
.travis.yml
@ -1,46 +0,0 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
language: python
|
||||
|
||||
python:
|
||||
- "3.6"
|
||||
|
||||
install:
|
||||
- pip install nose
|
||||
- pip install nose2
|
||||
- pip install flake8
|
||||
- pip install pylint==1.9.2
|
||||
- git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && cd /tmp/devlib && python setup.py install
|
||||
- cd $TRAVIS_BUILD_DIR && python setup.py install
|
||||
|
||||
env:
|
||||
global:
|
||||
- PYLINT="cd $TRAVIS_BUILD_DIR && ./dev_scripts/pylint wa"
|
||||
- PEP8="cd $TRAVIS_BUILD_DIR && ./dev_scripts/pep8 wa"
|
||||
- NOSETESTS="nose2 -s $TRAVIS_BUILD_DIR/tests"
|
||||
- WORKLOAD="cd /tmp && wa run $TRAVIS_BUILD_DIR/tests/travis/idle_agenda.yaml -v -d idle_workload"
|
||||
- PROCESS_CMD="$WORKLOAD && wa process -f -p csv idle_workload"
|
||||
- SHOW_CMD="wa show dhrystone && wa show generic_android && wa show trace-cmd && wa show csv"
|
||||
- LIST_CMD="wa list all"
|
||||
- CREATE_CMD="wa create agenda dhrystone generic_android csv trace_cmd && wa create package test && wa create workload test"
|
||||
matrix:
|
||||
- TEST=$PYLINT
|
||||
- TEST=$PEP8
|
||||
- TEST=$NOSETESTS
|
||||
- TEST=$WORKLOAD
|
||||
- TEST="$PROCESS_CMD && $SHOW_CMD && $LIST_CMD && $CREATE_CMD"
|
||||
script:
|
||||
- echo $TEST && eval $TEST
|
@ -18,7 +18,7 @@ workloads, instruments or output processing.
|
||||
Requirements
|
||||
============
|
||||
|
||||
- Python 3
|
||||
- Python 3.5+
|
||||
- Linux (should work on other Unixes, but untested)
|
||||
- Latest Android SDK (ANDROID_HOME must be set) for Android devices, or
|
||||
- SSH for Linux devices
|
||||
|
@ -6,7 +6,7 @@ DEFAULT_DIRS=(
|
||||
|
||||
EXCLUDE=wa/tests,wa/framework/target/descriptor.py
|
||||
EXCLUDE_COMMA=
|
||||
IGNORE=E501,E265,E266,W391,E401,E402,E731,W504,W605,F401
|
||||
IGNORE=E501,E265,E266,W391,E401,E402,E731,W503,W605,F401
|
||||
|
||||
if ! hash flake8 2>/dev/null; then
|
||||
echo "flake8 not found in PATH"
|
||||
|
@ -36,6 +36,9 @@ pylint_version=$(python -c 'from pylint.__pkginfo__ import version; print(versio
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
pylint_version=$(python3 -c 'from pylint.__pkginfo__ import version; print(version)' 2>/dev/null)
|
||||
fi
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
pylint_version=$(python3 -c 'from pylint import version; print(version)' 2>/dev/null)
|
||||
fi
|
||||
if [ "x$pylint_version" == "x" ]; then
|
||||
echo "ERROR: no pylint verison found; is it installed?"
|
||||
exit 1
|
||||
|
@ -32,17 +32,11 @@ def transform(mod):
|
||||
if b'pylint:' in text[0]:
|
||||
msg = 'pylint directive found on the first line of {}; please move to below copyright header'
|
||||
raise RuntimeError(msg.format(mod.name))
|
||||
if sys.version_info[0] == 3:
|
||||
char = chr(text[0][0])
|
||||
else:
|
||||
char = text[0][0]
|
||||
char = chr(text[0][0])
|
||||
if text[0].strip() and char != '#':
|
||||
msg = 'first line of {} is not a comment; is the copyright header missing?'
|
||||
raise RuntimeError(msg.format(mod.name))
|
||||
if sys.version_info[0] == 3:
|
||||
text[0] = '# pylint: disable={}'.format(','.join(errors)).encode('utf-8')
|
||||
else:
|
||||
text[0] = '# pylint: disable={}'.format(','.join(errors))
|
||||
text[0] = '# pylint: disable={}'.format(','.join(errors)).encode('utf-8')
|
||||
mod.file_bytes = b'\n'.join(text)
|
||||
|
||||
# This is what *should* happen, but doesn't work.
|
||||
|
@ -28,8 +28,9 @@ OUTPUT_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), 'source', 'instr
|
||||
def generate_instrument_method_map(outfile):
|
||||
signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.items()],
|
||||
headers=['method name', 'signal'], align='<<')
|
||||
priority_table = format_simple_table(zip(CallbackPriority.names, CallbackPriority.values),
|
||||
headers=['decorator', 'priority'], align='<>')
|
||||
decorator_names = map(lambda x: x.replace('high', 'fast').replace('low', 'slow'), CallbackPriority.names)
|
||||
priority_table = format_simple_table(zip(decorator_names, CallbackPriority.names, CallbackPriority.values),
|
||||
headers=['decorator', 'CallbackPriority name', 'CallbackPriority value'], align='<>')
|
||||
with open(OUTPUT_TEMPLATE_FILE) as fh:
|
||||
template = string.Template(fh.read())
|
||||
with open(outfile, 'w') as wfh:
|
||||
|
@ -1,4 +1,7 @@
|
||||
nose
|
||||
numpy
|
||||
pandas
|
||||
sphinx_rtd_theme>=0.3.1
|
||||
sphinx_rtd_theme==1.0.0
|
||||
sphinx==4.2
|
||||
docutils<0.18
|
||||
devlib @ git+https://github.com/ARM-software/devlib@master
|
||||
|
@ -284,6 +284,13 @@ methods
|
||||
:return: A list of `str` labels of workloads that were part of this run.
|
||||
|
||||
|
||||
.. method:: RunOutput.add_classifier(name, value, overwrite=False)
|
||||
|
||||
Add a classifier to the run as a whole. If a classifier with the specified
|
||||
``name`` already exists, a``ValueError`` will be raised, unless
|
||||
`overwrite=True` is specified.
|
||||
|
||||
|
||||
:class:`RunDatabaseOutput`
|
||||
---------------------------
|
||||
|
||||
@ -402,7 +409,7 @@ artifacts, metadata, and configuration. It has the following attributes:
|
||||
methods
|
||||
~~~~~~~
|
||||
|
||||
.. method:: RunOutput.get_artifact(name)
|
||||
.. method:: JobOutput.get_artifact(name)
|
||||
|
||||
Return the :class:`Artifact` specified by ``name`` associated with this job.
|
||||
|
||||
@ -410,7 +417,7 @@ methods
|
||||
:return: The :class:`Artifact` with that name
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
.. method:: RunOutput.get_artifact_path(name)
|
||||
.. method:: JobOutput.get_artifact_path(name)
|
||||
|
||||
Return the path to the file backing the artifact specified by ``name``,
|
||||
associated with this job.
|
||||
@ -419,13 +426,20 @@ methods
|
||||
:return: The path to the artifact
|
||||
:raises HostError: If the artifact with the specified name does not exist.
|
||||
|
||||
.. method:: RunOutput.get_metric(name)
|
||||
.. method:: JobOutput.get_metric(name)
|
||||
|
||||
Return the :class:`Metric` associated with this job with the specified
|
||||
`name`.
|
||||
|
||||
:return: The :class:`Metric` object for the metric with the specified name.
|
||||
|
||||
.. method:: JobOutput.add_classifier(name, value, overwrite=False)
|
||||
|
||||
Add a classifier to the job. The classifier will be propagated to all
|
||||
existing artifacts and metrics, as well as those added afterwards. If a
|
||||
classifier with the specified ``name`` already exists, a ``ValueError`` will
|
||||
be raised, unless `overwrite=True` is specified.
|
||||
|
||||
|
||||
:class:`JobDatabaseOutput`
|
||||
---------------------------
|
||||
|
@ -2,6 +2,147 @@
|
||||
What's New in Workload Automation
|
||||
=================================
|
||||
|
||||
***********
|
||||
Version 3.3.1
|
||||
***********
|
||||
|
||||
.. warning:: This is the last release supporting Python 3.5 and Python 3.6.
|
||||
Subsequent releases will support Python 3.7+.
|
||||
|
||||
New Features:
|
||||
==============
|
||||
|
||||
Commands:
|
||||
---------
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- ``perf``: Add support for ``report-sample``.
|
||||
|
||||
Workloads:
|
||||
----------------
|
||||
- ``PCMark``: Add support for PCMark 3.0.
|
||||
- ``Antutu``: Add support for 9.1.6.
|
||||
- ``Geekbench``: Add support for Geekbench5.
|
||||
- ``gfxbench``: Support the non corporate version.
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Fix installation on systems without git installed.
|
||||
- Avoid querying online cpus if hotplug is disabled.
|
||||
|
||||
Dockerfile:
|
||||
-----------
|
||||
- Update base image to Ubuntu 20.04.
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- ``perf``: Fix parsing csv with using interval-only-values.
|
||||
- ``perf``: Improve error reporting of an invalid agenda.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``postgres``: Fixed SQL command when creating a new event.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``speedometer``: Fix adb reverse when rebooting a device.
|
||||
- ``googleplaybook``: Support newer apk version.
|
||||
- ``googlephotos``: Support newer apk version.
|
||||
- ``gmail``: Support newer apk version.
|
||||
|
||||
Other:
|
||||
------
|
||||
- Upgrade Android Gradle to 7.2 and Gradle plugin to 4.2.
|
||||
|
||||
***********
|
||||
Version 3.3
|
||||
***********
|
||||
|
||||
New Features:
|
||||
==============
|
||||
|
||||
Commands:
|
||||
---------
|
||||
- Add ``report`` command to provide a summary of a run.
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- Add ``proc_stat`` instrument to monitor CPU load using data from ``/proc/stat``.
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Add support for simulating atomic writes to prevent race conditions when running current instances of WA.
|
||||
- Add support file transfer for SSH connections via SFTP and falling back to using SCP implementation.
|
||||
- Support detection of logcat buffer overflow and present a warning if this occurs.
|
||||
- Allow skipping all remaining jobs if a job had exhausted all of its retires.
|
||||
- Add polling mechanism for file transfers rather than relying on timeouts.
|
||||
- Add `run_completed` reboot policy to enable rebooting a target after a run has been completed.
|
||||
|
||||
|
||||
Android Devices:
|
||||
----------------
|
||||
- Enable configuration of whether to keep the screen on while the device is plugged in.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- Enable the use of cascading deletion in Postgres databases to clean up after deletion of a run entry.
|
||||
|
||||
|
||||
Fixes/Improvements
|
||||
==================
|
||||
|
||||
Framework:
|
||||
----------
|
||||
- Improvements to the ``process`` command to correctly handle skipped and in process jobs.
|
||||
- Add support for deprecated parameters allowing for a warning to be raised when providing
|
||||
a parameter that will no longer have an effect.
|
||||
- Switch implementation of SSH connections to use Paramiko for greater stability.
|
||||
- By default use sftp for file transfers with SSH connections, allow falling back to scp
|
||||
by setting ``use_scp``.
|
||||
- Fix callbacks not being disconnected correctly when requested.
|
||||
- ``ApkInfo`` objects are now cached to reduce re-parsing of APK files.
|
||||
- Speed up discovery of wa output directories.
|
||||
- Fix merge handling of parameters from multiple files.
|
||||
|
||||
Dockerfile:
|
||||
-----------
|
||||
- Install additional instruments for use in the docker environment.
|
||||
- Fix environment variables not being defined in non interactive environments.
|
||||
|
||||
Instruments:
|
||||
------------
|
||||
- ``trace_cmd`` additional fixes for python 3 support.
|
||||
|
||||
Output Processors:
|
||||
------------------
|
||||
- ``postgres``: Fixed SQL command when creating a new event.
|
||||
|
||||
Workloads:
|
||||
----------
|
||||
- ``aitutu``: Improve reliability of results extraction.
|
||||
- ``androbench``: Enabling dismissing of additional popups on some devices.
|
||||
- ``antutu``: Now supports major version 8 in additional to version 7.X.
|
||||
- ``exoplayer``: Add support for Android 10.
|
||||
- ``googlephotos``: Support newer apk version.
|
||||
- ``gfxbench``: Allow user configuration for which tests should be ran.
|
||||
- ``gfxbench``: Improved score detection for a wider range of devices.
|
||||
- ``gfxbench``: Moved results extraction out of run stage.
|
||||
- ``jankbench``: Support newer versions of Pandas for processing.
|
||||
- ``pcmark``: Add support for handling additional popups and installation flows.
|
||||
- ``pcmark``: No longer clear and re-download test data before each execution.
|
||||
- ``speedometer``: Enable the workload to run offline and drops requirement for
|
||||
UiAutomator. To support this root access is now required to run the workload.
|
||||
- ``youtube``: Update to support later versions of the apk.
|
||||
|
||||
Other:
|
||||
------
|
||||
- ``cpustates``: Improved name handling for unknown idle states.
|
||||
|
||||
|
||||
***********
|
||||
Version 3.2
|
||||
***********
|
||||
|
@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 ARM Limited
|
||||
# Copyright 2023 ARM Limited
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
@ -68,7 +68,7 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'wa'
|
||||
copyright = u'2018, ARM Limited'
|
||||
copyright = u'2023, ARM Limited'
|
||||
author = u'ARM Limited'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
|
File diff suppressed because one or more lines are too long
Before (image error) Size: 63 KiB After (image error) Size: 74 KiB |
@ -47,6 +47,10 @@ submitting a pull request:
|
||||
- If significant additions have been made to the framework, unit
|
||||
tests should be added to cover the new functionality.
|
||||
|
||||
- If modifications have been made to the UI Automation source of a workload, the
|
||||
corresponding APK should be rebuilt and submitted as part of the same pull
|
||||
request. This can be done via the ``build.sh`` script in the relevant
|
||||
``uiauto`` subdirectory.
|
||||
- If modifications have been made to documentation (this includes description
|
||||
attributes for Parameters and Extensions), documentation should be built to
|
||||
make sure no errors or warning during build process, and a visual inspection
|
||||
|
@ -37,8 +37,8 @@ This section contains reference information common to plugins of all types.
|
||||
The Context
|
||||
~~~~~~~~~~~
|
||||
|
||||
.. note:: For clarification on the meaning of "workload specification" ("spec"), "job"
|
||||
and "workload" and the distiction between them, please see the :ref:`glossary <glossary>`.
|
||||
.. note:: For clarification on the meaning of "workload specification" "spec", "job"
|
||||
and "workload" and the distinction between them, please see the :ref:`glossary <glossary>`.
|
||||
|
||||
The majority of methods in plugins accept a context argument. This is an
|
||||
instance of :class:`wa.framework.execution.ExecutionContext`. It contains
|
||||
@ -119,7 +119,7 @@ context.output_directory
|
||||
This is the output directory for the current iteration. This will an
|
||||
iteration-specific subdirectory under the main results location. If
|
||||
there is no current iteration (e.g. when processing overall run results)
|
||||
this will point to the same location as ``root_output_directory``.
|
||||
this will point to the same location as ``run_output_directory``.
|
||||
|
||||
|
||||
Additionally, the global ``wa.settings`` object exposes on other location:
|
||||
@ -158,7 +158,7 @@ irrespective of the host's path notation. For example:
|
||||
.. note:: Output processors, unlike workloads and instruments, do not have their
|
||||
own target attribute as they are designed to be able to be run offline.
|
||||
|
||||
.. _plugin-parmeters:
|
||||
.. _plugin-parameters:
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~~
|
||||
|
@ -336,6 +336,6 @@ recordings in scripts. Here is an example:
|
||||
from wa.utils.revent import ReventRecording
|
||||
|
||||
with ReventRecording('/path/to/recording.revent') as recording:
|
||||
print "Recording: {}".format(recording.filepath)
|
||||
print "There are {} input events".format(recording.num_events)
|
||||
print "Over a total of {} seconds".format(recording.duration)
|
||||
print("Recording: {}".format(recording.filepath))
|
||||
print("There are {} input events".format(recording.num_events))
|
||||
print("Over a total of {} seconds".format(recording.duration))
|
||||
|
@ -58,22 +58,28 @@ will automatically generate a workload in the your ``WA_CONFIG_DIR/plugins``. If
|
||||
you wish to specify a custom location this can be provided with ``-p
|
||||
<path>``
|
||||
|
||||
A typical invocation of the :ref:`create <create-command>` command would be in
|
||||
the form::
|
||||
|
||||
wa create workload -k <workload_kind> <workload_name>
|
||||
|
||||
|
||||
.. _adding-a-basic-workload-example:
|
||||
|
||||
Adding a Basic Workload
|
||||
-----------------------
|
||||
|
||||
To add a basic workload you can simply use the command::
|
||||
To add a ``basic`` workload template for our example workload we can simply use the
|
||||
command::
|
||||
|
||||
wa create workload basic
|
||||
wa create workload -k basic ziptest
|
||||
|
||||
This will generate a very basic workload with dummy methods for the workload
|
||||
interface and it is left to the developer to add any required functionality to
|
||||
the workload.
|
||||
This will generate a very basic workload with dummy methods for the each method in
|
||||
the workload interface and it is left to the developer to add any required functionality.
|
||||
|
||||
Not all the methods are required to be implemented, this example shows how a
|
||||
subset might be used to implement a simple workload that times how long it takes
|
||||
to compress a file of a particular size on the device.
|
||||
Not all the methods from the interface are required to be implemented, this
|
||||
example shows how a subset might be used to implement a simple workload that
|
||||
times how long it takes to compress a file of a particular size on the device.
|
||||
|
||||
|
||||
.. note:: This is intended as an example of how to implement the Workload
|
||||
@ -87,14 +93,15 @@ in this example we are implementing a very simple workload and do not
|
||||
require any additional feature so shall inherit directly from the the base
|
||||
:class:`Workload` class. We then need to provide a ``name`` for our workload
|
||||
which is what will be used to identify your workload for example in an
|
||||
agenda or via the show command.
|
||||
agenda or via the show command, if you used the `create` command this will
|
||||
already be populated for you.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import os
|
||||
from wa import Workload, Parameter
|
||||
|
||||
class ZipTestWorkload(Workload):
|
||||
class ZipTest(Workload):
|
||||
|
||||
name = 'ziptest'
|
||||
|
||||
@ -113,7 +120,7 @@ separated by a new line.
|
||||
'''
|
||||
|
||||
In order to allow for additional configuration of the workload from a user a
|
||||
list of :ref:`parameters <plugin-parmeters>` can be supplied. These can be
|
||||
list of :ref:`parameters <plugin-parameters>` can be supplied. These can be
|
||||
configured in a variety of different ways. For example here we are ensuring that
|
||||
the value of the parameter is an integer and larger than 0 using the ``kind``
|
||||
and ``constraint`` options, also if no value is provided we are providing a
|
||||
@ -176,7 +183,7 @@ allow it to decide whether to keep the file or not.
|
||||
# Pull the results file to the host
|
||||
self.host_outfile = os.path.join(context.output_directory, 'timing_results')
|
||||
self.target.pull(self.target_outfile, self.host_outfile)
|
||||
context.add_artifact('ziptest-results', host_output_file, kind='raw')
|
||||
context.add_artifact('ziptest-results', self.host_outfile, kind='raw')
|
||||
|
||||
The ``update_output`` method we can do any generation of metrics that we wish to
|
||||
for our workload. In this case we are going to simply convert the times reported
|
||||
@ -252,7 +259,7 @@ The full implementation of this workload would look something like:
|
||||
# Pull the results file to the host
|
||||
self.host_outfile = os.path.join(context.output_directory, 'timing_results')
|
||||
self.target.pull(self.target_outfile, self.host_outfile)
|
||||
context.add_artifact('ziptest-results', host_output_file, kind='raw')
|
||||
context.add_artifact('ziptest-results', self.host_outfile, kind='raw')
|
||||
|
||||
def update_output(self, context):
|
||||
super(ZipTestWorkload, self).update_output(context)
|
||||
@ -485,9 +492,10 @@ Adding an Instrument
|
||||
====================
|
||||
This is an example of how we would create a instrument which will trace device
|
||||
errors using a custom "trace" binary file. For more detailed information please see the
|
||||
:ref:`Instrument Reference <instrument-reference>`. The first thing to do is to subclass
|
||||
:class:`Instrument`, overwrite the variable name with what we want our instrument
|
||||
to be called and locate our binary for our instrument.
|
||||
:ref:`Instrument Reference <instrument-reference>`. The first thing to do is to create
|
||||
a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass
|
||||
:class:`Instrument`. Make sure to overwrite the variable name with what we want our instrument
|
||||
to be called and then locate our binary for the instrument.
|
||||
|
||||
::
|
||||
|
||||
@ -495,8 +503,8 @@ to be called and locate our binary for our instrument.
|
||||
|
||||
name = 'trace-errors'
|
||||
|
||||
def __init__(self, target):
|
||||
super(TraceErrorsInstrument, self).__init__(target)
|
||||
def __init__(self, target, **kwargs):
|
||||
super(TraceErrorsInstrument, self).__init__(target, **kwargs)
|
||||
self.binary_name = 'trace'
|
||||
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
|
||||
self.trace_on_target = None
|
||||
@ -533,21 +541,20 @@ again decorated the method. ::
|
||||
Once we have generated our result data we need to retrieve it from the device
|
||||
for further processing or adding directly to WA's output for that job. For
|
||||
example for trace data we will want to pull it to the device and add it as a
|
||||
:ref:`artifact <artifact>` to WA's :ref:`context <context>` as shown below::
|
||||
|
||||
def extract_results(self, context):
|
||||
# pull the trace file from the target
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.target.pull(self.result, context.working_directory)
|
||||
context.add_artifact('error_trace', self.result, kind='export')
|
||||
|
||||
Once we have retrieved the data we can now do any further processing and add any
|
||||
relevant :ref:`Metrics <metrics>` to the :ref:`context <context>`. For this we
|
||||
will use the the ``add_metric`` method to add the results to the final output
|
||||
for that workload. The method can be passed 4 params, which are the metric
|
||||
`key`, `value`, `unit` and `lower_is_better`. ::
|
||||
:ref:`artifact <artifact>` to WA's :ref:`context <context>`. Once we have
|
||||
retrieved the data, we can now do any further processing and add any relevant
|
||||
:ref:`Metrics <metrics>` to the :ref:`context <context>`. For this we will use
|
||||
the the ``add_metric`` method to add the results to the final output for that
|
||||
workload. The method can be passed 4 params, which are the metric `key`,
|
||||
`value`, `unit` and `lower_is_better`. ::
|
||||
|
||||
def update_output(self, context):
|
||||
# pull the trace file from the target
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.outfile = os.path.join(context.output_directory, 'trace.txt')
|
||||
self.target.pull(self.result, self.outfile)
|
||||
context.add_artifact('error_trace', self.outfile, kind='export')
|
||||
|
||||
# parse the file if needs to be parsed, or add result directly to
|
||||
# context.
|
||||
|
||||
@ -567,12 +574,14 @@ At the very end of the run we would want to uninstall the binary we deployed ear
|
||||
|
||||
So the full example would look something like::
|
||||
|
||||
from wa import Instrument
|
||||
|
||||
class TraceErrorsInstrument(Instrument):
|
||||
|
||||
name = 'trace-errors'
|
||||
|
||||
def __init__(self, target):
|
||||
super(TraceErrorsInstrument, self).__init__(target)
|
||||
def __init__(self, target, **kwargs):
|
||||
super(TraceErrorsInstrument, self).__init__(target, **kwargs)
|
||||
self.binary_name = 'trace'
|
||||
self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name)
|
||||
self.trace_on_target = None
|
||||
@ -588,12 +597,12 @@ So the full example would look something like::
|
||||
def stop(self, context):
|
||||
self.target.execute('{} stop'.format(self.trace_on_target))
|
||||
|
||||
def extract_results(self, context):
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.target.pull(self.result, context.working_directory)
|
||||
context.add_artifact('error_trace', self.result, kind='export')
|
||||
|
||||
def update_output(self, context):
|
||||
self.result = os.path.join(self.target.working_directory, 'trace.txt')
|
||||
self.outfile = os.path.join(context.output_directory, 'trace.txt')
|
||||
self.target.pull(self.result, self.outfile)
|
||||
context.add_artifact('error_trace', self.outfile, kind='export')
|
||||
|
||||
metric = # ..
|
||||
context.add_metric('number_of_errors', metric, lower_is_better=True
|
||||
|
||||
@ -609,8 +618,9 @@ Adding an Output Processor
|
||||
==========================
|
||||
|
||||
This is an example of how we would create an output processor which will format
|
||||
the run metrics as a column-aligned table. The first thing to do is to subclass
|
||||
:class:`OutputProcessor` and overwrite the variable name with what we want our
|
||||
the run metrics as a column-aligned table. The first thing to do is to create
|
||||
a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass
|
||||
:class:`OutputProcessor`. Make sure to overwrite the variable name with what we want our
|
||||
processor to be called and provide a short description.
|
||||
|
||||
Next we need to implement any relevant methods, (please see
|
||||
|
@ -13,10 +13,11 @@ these signals are dispatched during execution please see the
|
||||
$signal_names
|
||||
|
||||
The methods above may be decorated with on the listed decorators to set the
|
||||
priority of the Instrument method relative to other callbacks registered for the
|
||||
signal (within the same priority level, callbacks are invoked in the order they
|
||||
were registered). The table below shows the mapping of the decorator to the
|
||||
corresponding priority:
|
||||
priority (a value in the ``wa.framework.signal.CallbackPriority`` enum) of the
|
||||
Instrument method relative to other callbacks registered for the signal (within
|
||||
the same priority level, callbacks are invoked in the order they were
|
||||
registered). The table below shows the mapping of the decorator to the
|
||||
corresponding priority name and level:
|
||||
|
||||
$priority_prefixes
|
||||
|
||||
|
@ -690,7 +690,7 @@ Workload-specific augmentation
|
||||
It is possible to enable or disable (but not configure) augmentations at
|
||||
workload or section level, as well as in the global config, in which case, the
|
||||
augmentations would only be enabled/disabled for that workload/section. If the
|
||||
same augmentation is enabled at one level and disabled at another, as will all
|
||||
same augmentation is enabled at one level and disabled at another, as with all
|
||||
WA configuration, the more specific settings will take precedence over the less
|
||||
specific ones (i.e. workloads override sections that, in turn, override global
|
||||
config).
|
||||
|
@ -23,11 +23,11 @@ Prerequisites
|
||||
Operating System
|
||||
----------------
|
||||
|
||||
WA runs on a native Linux install. It was tested with Ubuntu 14.04,
|
||||
but any recent Linux distribution should work. It should run on either
|
||||
32-bit or 64-bit OS, provided the correct version of Android (see below)
|
||||
was installed. Officially, **other environments are not supported**. WA
|
||||
has been known to run on Linux Virtual machines and in Cygwin environments,
|
||||
WA runs on a native Linux install. It has been tested on recent Ubuntu releases,
|
||||
but other recent Linux distributions should work as well. It should run on
|
||||
either 32-bit or 64-bit OS, provided the correct version of dependencies (see
|
||||
below) are installed. Officially, **other environments are not supported**.
|
||||
WA has been known to run on Linux Virtual machines and in Cygwin environments,
|
||||
though additional configuration may be required in both cases (known issues
|
||||
include makings sure USB/serial connections are passed to the VM, and wrong
|
||||
python/pip binaries being picked up in Cygwin). WA *should* work on other
|
||||
@ -46,7 +46,8 @@ possible to get limited functionality with minimal porting effort).
|
||||
Android SDK
|
||||
-----------
|
||||
|
||||
You need to have the Android SDK with at least one platform installed.
|
||||
To interact with Android devices you will need to have the Android SDK
|
||||
with at least one platform installed.
|
||||
To install it, download the ADT Bundle from here_. Extract it
|
||||
and add ``<path_to_android_sdk>/sdk/platform-tools`` and ``<path_to_android_sdk>/sdk/tools``
|
||||
to your ``PATH``. To test that you've installed it properly, run ``adb
|
||||
@ -73,7 +74,11 @@ the install location of the SDK (i.e. ``<path_to_android_sdk>/sdk``).
|
||||
Python
|
||||
------
|
||||
|
||||
Workload Automation 3 currently supports both Python 2.7 and Python 3.
|
||||
Workload Automation 3 currently supports Python 3.5+
|
||||
|
||||
.. note:: If your system's default python version is still Python 2, please
|
||||
replace the commands listed here with their Python3 equivalent
|
||||
(e.g. python3, pip3 etc.)
|
||||
|
||||
.. _pip:
|
||||
|
||||
@ -95,11 +100,11 @@ similar distributions, this may be done with APT::
|
||||
sudo -H pip install --upgrade pip
|
||||
sudo -H pip install --upgrade setuptools
|
||||
|
||||
If you do run into this issue after already installing some packages,
|
||||
If you do run into this issue after already installing some packages,
|
||||
you can resolve it by running ::
|
||||
|
||||
sudo chmod -R a+r /usr/local/lib/python2.7/dist-packages
|
||||
sudo find /usr/local/lib/python2.7/dist-packages -type d -exec chmod a+x {} \;
|
||||
sudo chmod -R a+r /usr/local/lib/python3.X/dist-packages
|
||||
sudo find /usr/local/lib/python3.X/dist-packages -type d -exec chmod a+x {} \;
|
||||
|
||||
(The paths above will work for Ubuntu; they may need to be adjusted
|
||||
for other distros).
|
||||
@ -187,12 +192,12 @@ Installing
|
||||
|
||||
Installing the latest released version from PyPI (Python Package Index)::
|
||||
|
||||
sudo -H pip install wa
|
||||
sudo -H pip install wlauto
|
||||
|
||||
This will install WA along with its mandatory dependencies. If you would like to
|
||||
install all optional dependencies at the same time, do the following instead::
|
||||
|
||||
sudo -H pip install wa[all]
|
||||
sudo -H pip install wlauto[all]
|
||||
|
||||
|
||||
Alternatively, you can also install the latest development version from GitHub
|
||||
|
@ -20,7 +20,7 @@ Install
|
||||
.. note:: This is a quick summary. For more detailed instructions, please see
|
||||
the :ref:`installation` section.
|
||||
|
||||
Make sure you have Python 2.7 or Python 3 and a recent Android SDK with API
|
||||
Make sure you have Python 3.5+ and a recent Android SDK with API
|
||||
level 18 or above installed on your system. A complete install of the Android
|
||||
SDK is required, as WA uses a number of its utilities, not just adb. For the
|
||||
SDK, make sure that either ``ANDROID_HOME`` environment variable is set, or that
|
||||
@ -352,13 +352,13 @@ in-depth information please see the :ref:`Create Command <create-command>` docum
|
||||
|
||||
In order to populate the agenda with relevant information you can supply all of
|
||||
the plugins you wish to use as arguments to the command, for example if we want
|
||||
to create an agenda file for running ``dhystrone`` on a 'generic android' device and we
|
||||
to create an agenda file for running ``dhrystone`` on a `generic_android` device and we
|
||||
want to enable the ``execution_time`` and ``trace-cmd`` instruments and display the
|
||||
metrics using the ``csv`` output processor. We would use the following command::
|
||||
|
||||
wa create agenda generic_android dhrystone execution_time trace-cmd csv -o my_agenda.yaml
|
||||
|
||||
This will produce a `my_agenda.yaml` file containing all the relevant
|
||||
This will produce a ``my_agenda.yaml`` file containing all the relevant
|
||||
configuration for the specified plugins along with their default values as shown
|
||||
below:
|
||||
|
||||
@ -373,6 +373,7 @@ below:
|
||||
device: generic_android
|
||||
device_config:
|
||||
adb_server: null
|
||||
adb_port: null
|
||||
big_core: null
|
||||
core_clusters: null
|
||||
core_names: null
|
||||
@ -399,6 +400,7 @@ below:
|
||||
no_install: false
|
||||
report: true
|
||||
report_on_target: false
|
||||
mode: write-to-memory
|
||||
csv:
|
||||
extra_columns: null
|
||||
use_all_classifiers: false
|
||||
@ -483,14 +485,14 @@ that parses the contents of the output directory:
|
||||
>>> ro = RunOutput('./wa_output')
|
||||
>>> for job in ro.jobs:
|
||||
... if job.status != 'OK':
|
||||
... print 'Job "{}" did not complete successfully: {}'.format(job, job.status)
|
||||
... print('Job "{}" did not complete successfully: {}'.format(job, job.status))
|
||||
... continue
|
||||
... print 'Job "{}":'.format(job)
|
||||
... print('Job "{}":'.format(job))
|
||||
... for metric in job.metrics:
|
||||
... if metric.units:
|
||||
... print '\t{}: {} {}'.format(metric.name, metric.value, metric.units)
|
||||
... print('\t{}: {} {}'.format(metric.name, metric.value, metric.units))
|
||||
... else:
|
||||
... print '\t{}: {}'.format(metric.name, metric.value)
|
||||
... print('\t{}: {}'.format(metric.name, metric.value))
|
||||
...
|
||||
Job "wk1-dhrystone-1":
|
||||
thread 0 score: 20833333
|
||||
|
@ -45,6 +45,7 @@ An example agenda can be seen here:
|
||||
no_install: false
|
||||
report: true
|
||||
report_on_target: false
|
||||
mode: write-to-disk
|
||||
csv: # Provide config for the csv augmentation
|
||||
use_all_classifiers: true
|
||||
|
||||
|
@ -40,7 +40,7 @@ Will display help for this subcommand that will look something like this:
|
||||
AGENDA Agenda for this workload automation run. This defines
|
||||
which workloads will be executed, how many times, with
|
||||
which tunables, etc. See example agendas in
|
||||
/usr/local/lib/python2.7/dist-packages/wa for an
|
||||
/usr/local/lib/python3.X/dist-packages/wa for an
|
||||
example of how this file should be structured.
|
||||
|
||||
optional arguments:
|
||||
|
@ -33,6 +33,7 @@ states.
|
||||
iterations: 1
|
||||
runtime_parameters:
|
||||
screen_on: false
|
||||
unlock_screen: 'vertical'
|
||||
- name: benchmarkpi
|
||||
iterations: 1
|
||||
sections:
|
||||
@ -208,6 +209,13 @@ Android Specific Runtime Parameters
|
||||
:screen_on: A ``boolean`` to specify whether the devices screen should be
|
||||
turned on. Defaults to ``True``.
|
||||
|
||||
:unlock_screen: A ``String`` to specify how the devices screen should be
|
||||
unlocked. Unlocking screen is disabled by default. ``vertical``, ``diagonal``
|
||||
and ``horizontal`` are the supported values (see :meth:`devlib.AndroidTarget.swipe_to_unlock`).
|
||||
Note that unlocking succeeds when no passcode is set. Since unlocking screen
|
||||
requires turning on the screen, this option overrides value of ``screen_on``
|
||||
option.
|
||||
|
||||
.. _setting-sysfiles:
|
||||
|
||||
Setting Sysfiles
|
||||
|
@ -17,6 +17,13 @@
|
||||
#
|
||||
# docker run -it --privileged -v /dev/bus/usb:/dev/bus/usb --volume ${PWD}:/workspace --workdir /workspace wa
|
||||
#
|
||||
# If using selinux you may need to add the `z` option when mounting
|
||||
# volumes e.g.:
|
||||
# --volume ${PWD}:/workspace:z
|
||||
# Warning: Please ensure you do not use this option when mounting
|
||||
# system directores. For more information please see:
|
||||
# https://docs.docker.com/storage/bind-mounts/#configure-the-selinux-label
|
||||
#
|
||||
# The above command starts the container in privileged mode, with
|
||||
# access to USB devices. The current directory is mounted into the
|
||||
# image, allowing you to work from there. Any files written to this
|
||||
@ -32,22 +39,61 @@
|
||||
#
|
||||
# When you are finished, please run `exit` to leave the container.
|
||||
#
|
||||
# The relevant environment variables are stored in a separate
|
||||
# file which is automatically sourced in an interactive shell.
|
||||
# If running from a non-interactive environment this can
|
||||
# be manually sourced with `source /home/wa/.wa_environment`
|
||||
#
|
||||
# NOTE: Please make sure that the ADB server is NOT running on the
|
||||
# host. If in doubt, run `adb kill-server` before running the docker
|
||||
# container.
|
||||
#
|
||||
|
||||
# We want to make sure to base this on a recent ubuntu release
|
||||
FROM ubuntu:19.10
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# Please update the references below to use different versions of
|
||||
# devlib, WA or the Android SDK
|
||||
ARG DEVLIB_REF=v1.2
|
||||
ARG WA_REF=v3.2
|
||||
ARG DEVLIB_REF=v1.3.4
|
||||
ARG WA_REF=v3.3.1
|
||||
ARG ANDROID_SDK_URL=https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y python3 python3-pip git wget zip openjdk-8-jre-headless vim emacs nano curl sshpass ssh usbutils locales
|
||||
# Set a default timezone to use
|
||||
ENV TZ=Europe/London
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apache2-utils \
|
||||
bison \
|
||||
cmake \
|
||||
curl \
|
||||
emacs \
|
||||
flex \
|
||||
git \
|
||||
libcdk5-dev \
|
||||
libiio-dev \
|
||||
libxml2 \
|
||||
libxml2-dev \
|
||||
locales \
|
||||
nano \
|
||||
openjdk-8-jre-headless \
|
||||
python3 \
|
||||
python3-pip \
|
||||
ssh \
|
||||
sshpass \
|
||||
sudo \
|
||||
trace-cmd \
|
||||
usbutils \
|
||||
vim \
|
||||
wget \
|
||||
zip
|
||||
|
||||
# Clone and download iio-capture
|
||||
RUN git clone -v https://github.com/BayLibre/iio-capture.git /tmp/iio-capture && \
|
||||
cd /tmp/iio-capture && \
|
||||
make && \
|
||||
make install
|
||||
|
||||
RUN pip3 install pandas
|
||||
|
||||
# Ensure we're using utf-8 as our default encoding
|
||||
@ -57,8 +103,16 @@ ENV LANGUAGE en_US:en
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
|
||||
# Let's get the two repos we need, and install them
|
||||
RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && cd /tmp/devlib && git checkout $DEVLIB_REF && python3 setup.py install && pip3 install .[full]
|
||||
RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && cd /tmp/wa && git checkout $WA_REF && python3 setup.py install && pip3 install .[all]
|
||||
RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && \
|
||||
cd /tmp/devlib && \
|
||||
git checkout $DEVLIB_REF && \
|
||||
python3 setup.py install && \
|
||||
pip3 install .[full]
|
||||
RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && \
|
||||
cd /tmp/wa && \
|
||||
git checkout $WA_REF && \
|
||||
python3 setup.py install && \
|
||||
pip3 install .[all]
|
||||
|
||||
# Clean-up
|
||||
RUN rm -R /tmp/devlib /tmp/wa
|
||||
@ -72,10 +126,19 @@ RUN mkdir -p /home/wa/.android
|
||||
RUN mkdir -p /home/wa/AndroidSDK && cd /home/wa/AndroidSDK && wget $ANDROID_SDK_URL -O sdk.zip && unzip sdk.zip
|
||||
RUN cd /home/wa/AndroidSDK/tools/bin && yes | ./sdkmanager --licenses && ./sdkmanager platform-tools && ./sdkmanager 'build-tools;27.0.3'
|
||||
|
||||
# Update the path
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/platform-tools:${PATH}' >> /home/wa/.bashrc
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/build-tools:${PATH}' >> /home/wa/.bashrc
|
||||
RUN echo 'export ANDROID_HOME=/home/wa/AndroidSDK' >> /home/wa/.bashrc
|
||||
# Download Monsoon
|
||||
RUN mkdir -p /home/wa/monsoon
|
||||
RUN curl https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py\?format\=TEXT | base64 --decode > /home/wa/monsoon/monsoon.py
|
||||
RUN chmod +x /home/wa/monsoon/monsoon.py
|
||||
|
||||
# Update WA's required environment variables.
|
||||
RUN echo 'export PATH=/home/wa/monsoon:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/platform-tools:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export PATH=/home/wa/AndroidSDK/build-tools:${PATH}' >> /home/wa/.wa_environment
|
||||
RUN echo 'export ANDROID_HOME=/home/wa/AndroidSDK' >> /home/wa/.wa_environment
|
||||
|
||||
# Source WA environment variables in an interactive environment
|
||||
RUN echo 'source /home/wa/.wa_environment' >> /home/wa/.bashrc
|
||||
|
||||
# Generate some ADB keys. These will change each time the image is build but will otherwise persist.
|
||||
RUN /home/wa/AndroidSDK/platform-tools/adb keygen /home/wa/.android/adbkey
|
||||
|
@ -43,7 +43,7 @@ ignore=external
|
||||
# https://bitbucket.org/logilab/pylint/issue/232/wrong-hanging-indentation-false-positive
|
||||
# TODO: disabling no-value-for-parameter and logging-format-interpolation, as they appear to be broken
|
||||
# in version 1.4.1 and return a lot of false postives; should be re-enabled once fixed.
|
||||
disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330,no-value-for-parameter,logging-format-interpolation,no-else-return,inconsistent-return-statements,keyword-arg-before-vararg,consider-using-enumerate,no-member
|
||||
disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330,no-value-for-parameter,logging-format-interpolation,no-else-return,inconsistent-return-statements,keyword-arg-before-vararg,consider-using-enumerate,no-member,super-with-arguments,useless-object-inheritance,raise-missing-from,no-else-raise,no-else-break,no-else-continue
|
||||
|
||||
[FORMAT]
|
||||
max-module-lines=4000
|
||||
|
@ -1,21 +1,30 @@
|
||||
certifi==2019.11.28
|
||||
chardet==3.0.4
|
||||
colorama==0.4.3
|
||||
devlib==1.2.0
|
||||
future==0.18.2
|
||||
idna==2.8
|
||||
bcrypt==4.0.1
|
||||
certifi==2024.7.4
|
||||
cffi==1.15.1
|
||||
charset-normalizer==3.1.0
|
||||
colorama==0.4.6
|
||||
cryptography==43.0.1
|
||||
devlib==1.3.4
|
||||
future==0.18.3
|
||||
idna==3.7
|
||||
Louie-latest==1.3.1
|
||||
lxml==4.9.2
|
||||
nose==1.3.7
|
||||
numpy==1.17.4
|
||||
pandas==0.25.3
|
||||
pexpect==4.7.0
|
||||
ptyprocess==0.6.0
|
||||
pyserial==3.4
|
||||
python-dateutil==2.8.1
|
||||
pytz==2019.3
|
||||
PyYAML==5.2
|
||||
requests==2.22.0
|
||||
six==1.13.0
|
||||
urllib3==1.25.7
|
||||
wlauto==3.2.0
|
||||
wrapt==1.11.2
|
||||
numpy==1.24.3
|
||||
pandas==2.0.1
|
||||
paramiko==3.4.0
|
||||
pexpect==4.8.0
|
||||
ptyprocess==0.7.0
|
||||
pycparser==2.21
|
||||
PyNaCl==1.5.0
|
||||
pyserial==3.5
|
||||
python-dateutil==2.8.2
|
||||
pytz==2023.3
|
||||
PyYAML==6.0
|
||||
requests==2.32.0
|
||||
scp==0.14.5
|
||||
six==1.16.0
|
||||
tzdata==2023.3
|
||||
urllib3==1.26.19
|
||||
wlauto==3.3.1
|
||||
wrapt==1.15.0
|
||||
|
1
setup.py
1
setup.py
@ -79,6 +79,7 @@ params = dict(
|
||||
license='Apache v2',
|
||||
maintainer='ARM Architecture & Technology Device Lab',
|
||||
maintainer_email='workload-automation@arm.com',
|
||||
python_requires='>= 3.7',
|
||||
setup_requires=[
|
||||
'numpy<=1.16.4; python_version<"3"',
|
||||
'numpy; python_version>="3"',
|
||||
|
@ -16,6 +16,7 @@
|
||||
import unittest
|
||||
from nose.tools import assert_equal
|
||||
|
||||
from wa.framework.configuration.execution import ConfigManager
|
||||
from wa.utils.misc import merge_config_values
|
||||
|
||||
|
||||
@ -38,3 +39,21 @@ class TestConfigUtils(unittest.TestCase):
|
||||
if v2 is not None:
|
||||
assert_equal(type(result), type(v2))
|
||||
|
||||
|
||||
|
||||
class TestConfigParser(unittest.TestCase):
|
||||
|
||||
def test_param_merge(self):
|
||||
config = ConfigManager()
|
||||
|
||||
config.load_config({'workload_params': {'one': 1, 'three': {'ex': 'x'}}, 'runtime_params': {'aye': 'a'}}, 'file_one')
|
||||
config.load_config({'workload_params': {'two': 2, 'three': {'why': 'y'}}, 'runtime_params': {'bee': 'b'}}, 'file_two')
|
||||
|
||||
assert_equal(
|
||||
config.jobs_config.job_spec_template['workload_parameters'],
|
||||
{'one': 1, 'two': 2, 'three': {'why': 'y'}},
|
||||
)
|
||||
assert_equal(
|
||||
config.jobs_config.job_spec_template['runtime_parameters'],
|
||||
{'aye': 'a', 'bee': 'b'},
|
||||
)
|
||||
|
@ -21,7 +21,8 @@ from nose.tools import assert_equal, assert_raises
|
||||
|
||||
from wa.utils.exec_control import (init_environment, reset_environment,
|
||||
activate_environment, once,
|
||||
once_per_class, once_per_instance)
|
||||
once_per_class, once_per_instance,
|
||||
once_per_attribute_value)
|
||||
|
||||
class MockClass(object):
|
||||
|
||||
@ -110,6 +111,18 @@ class AnotherClass(object):
|
||||
self.count += 1
|
||||
|
||||
|
||||
class NamedClass:
|
||||
|
||||
count = 0
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
@once_per_attribute_value('name')
|
||||
def initilize(self):
|
||||
NamedClass.count += 1
|
||||
|
||||
|
||||
class AnotherSubClass(MockClass):
|
||||
|
||||
def __init__(self):
|
||||
@ -352,3 +365,30 @@ class OncePerInstanceEnvironmentTest(TestCase):
|
||||
asc.initilize_once_per_instance()
|
||||
asc.initilize_once_per_instance()
|
||||
assert_equal(asc.count, 2)
|
||||
|
||||
|
||||
class OncePerAttributeValueTest(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
activate_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def tearDown(self):
|
||||
reset_environment('TEST_ENVIRONMENT')
|
||||
|
||||
def test_once_attribute_value(self):
|
||||
classes = [
|
||||
NamedClass('Rick'),
|
||||
NamedClass('Morty'),
|
||||
NamedClass('Rick'),
|
||||
NamedClass('Morty'),
|
||||
NamedClass('Morty'),
|
||||
NamedClass('Summer'),
|
||||
]
|
||||
|
||||
for c in classes:
|
||||
c.initilize()
|
||||
|
||||
for c in classes:
|
||||
c.initilize()
|
||||
|
||||
assert_equal(NamedClass.count, 3)
|
||||
|
315
tests/test_execution.py
Normal file
315
tests/test_execution.py
Normal file
@ -0,0 +1,315 @@
|
||||
# Copyright 2020 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from unittest import TestCase
|
||||
|
||||
from mock.mock import Mock
|
||||
from nose.tools import assert_equal
|
||||
from datetime import datetime
|
||||
|
||||
from wa.framework.configuration import RunConfiguration
|
||||
from wa.framework.configuration.core import JobSpec, Status
|
||||
from wa.framework.execution import ExecutionContext, Runner
|
||||
from wa.framework.job import Job
|
||||
from wa.framework.output import RunOutput, init_run_output
|
||||
from wa.framework.output_processor import ProcessorManager
|
||||
import wa.framework.signal as signal
|
||||
from wa.framework.run import JobState
|
||||
from wa.framework.exception import ExecutionError
|
||||
|
||||
|
||||
class MockConfigManager(Mock):
|
||||
|
||||
@property
|
||||
def jobs(self):
|
||||
return self._joblist
|
||||
|
||||
@property
|
||||
def loaded_config_sources(self):
|
||||
return []
|
||||
|
||||
@property
|
||||
def plugin_cache(self):
|
||||
return MockPluginCache()
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(MockConfigManager, self).__init__(*args, **kwargs)
|
||||
self._joblist = None
|
||||
self.run_config = RunConfiguration()
|
||||
|
||||
def to_pod(self):
|
||||
return {}
|
||||
|
||||
|
||||
class MockPluginCache(Mock):
|
||||
|
||||
def list_plugins(self, kind=None):
|
||||
return []
|
||||
|
||||
|
||||
class MockProcessorManager(Mock):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(MockProcessorManager, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_enabled(self):
|
||||
return []
|
||||
|
||||
|
||||
class JobState_force_retry(JobState):
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
return self._status
|
||||
|
||||
@status.setter
|
||||
def status(self, value):
|
||||
if(self.retries != self.times_to_retry) and (value == Status.RUNNING):
|
||||
self._status = Status.FAILED
|
||||
if self.output:
|
||||
self.output.status = Status.FAILED
|
||||
else:
|
||||
self._status = value
|
||||
if self.output:
|
||||
self.output.status = value
|
||||
|
||||
def __init__(self, to_retry, *args, **kwargs):
|
||||
self.retries = 0
|
||||
self._status = Status.NEW
|
||||
self.times_to_retry = to_retry
|
||||
self.output = None
|
||||
super(JobState_force_retry, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class Job_force_retry(Job):
|
||||
'''This class imitates a job that retries as many times as specified by
|
||||
``retries`` in its constructor'''
|
||||
|
||||
def __init__(self, to_retry, *args, **kwargs):
|
||||
super(Job_force_retry, self).__init__(*args, **kwargs)
|
||||
self.state = JobState_force_retry(to_retry, self.id, self.label, self.iteration, Status.NEW)
|
||||
self.initialized = False
|
||||
self.finalized = False
|
||||
|
||||
def initialize(self, context):
|
||||
self.initialized = True
|
||||
return super().initialize(context)
|
||||
|
||||
def finalize(self, context):
|
||||
self.finalized = True
|
||||
return super().finalize(context)
|
||||
|
||||
|
||||
class TestRunState(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.path = tempfile.mkstemp()[1]
|
||||
os.remove(self.path)
|
||||
self.initialise_signals()
|
||||
self.context = get_context(self.path)
|
||||
self.job_spec = get_jobspec()
|
||||
|
||||
def tearDown(self):
|
||||
signal.disconnect(self._verify_serialized_state, signal.RUN_INITIALIZED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_STARTED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_RESTARTED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_COMPLETED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_FAILED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.JOB_ABORTED)
|
||||
signal.disconnect(self._verify_serialized_state, signal.RUN_FINALIZED)
|
||||
|
||||
def test_job_state_transitions_pass(self):
|
||||
'''Tests state equality when the job passes first try'''
|
||||
job = Job(self.job_spec, 1, self.context)
|
||||
job.workload = Mock()
|
||||
|
||||
self.context.cm._joblist = [job]
|
||||
self.context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(self.context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
def test_job_state_transitions_fail(self):
|
||||
'''Tests state equality when job fails completely'''
|
||||
job = Job_force_retry(3, self.job_spec, 1, self.context)
|
||||
job.workload = Mock()
|
||||
|
||||
self.context.cm._joblist = [job]
|
||||
self.context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(self.context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
def test_job_state_transitions_retry(self):
|
||||
'''Tests state equality when job fails initially'''
|
||||
job = Job_force_retry(1, self.job_spec, 1, self.context)
|
||||
job.workload = Mock()
|
||||
|
||||
self.context.cm._joblist = [job]
|
||||
self.context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(self.context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
def initialise_signals(self):
|
||||
signal.connect(self._verify_serialized_state, signal.RUN_INITIALIZED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_STARTED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_RESTARTED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_COMPLETED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_FAILED)
|
||||
signal.connect(self._verify_serialized_state, signal.JOB_ABORTED)
|
||||
signal.connect(self._verify_serialized_state, signal.RUN_FINALIZED)
|
||||
|
||||
def _verify_serialized_state(self, _):
|
||||
fs_state = RunOutput(self.path).state
|
||||
ex_state = self.context.run_output.state
|
||||
|
||||
assert_equal(fs_state.status, ex_state.status)
|
||||
fs_js_zip = zip(
|
||||
[value for key, value in fs_state.jobs.items()],
|
||||
[value for key, value in ex_state.jobs.items()]
|
||||
)
|
||||
for fs_jobstate, ex_jobstate in fs_js_zip:
|
||||
assert_equal(fs_jobstate.iteration, ex_jobstate.iteration)
|
||||
assert_equal(fs_jobstate.retries, ex_jobstate.retries)
|
||||
assert_equal(fs_jobstate.status, ex_jobstate.status)
|
||||
|
||||
|
||||
class TestJobState(TestCase):
|
||||
|
||||
def test_job_retry_status(self):
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
job = Job_force_retry(2, job_spec, 1, context)
|
||||
job.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job]
|
||||
context.run_state.add_job(job)
|
||||
|
||||
verifier = lambda _: assert_equal(job.status, Status.PENDING)
|
||||
signal.connect(verifier, signal.JOB_RESTARTED)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
runner.run()
|
||||
signal.disconnect(verifier, signal.JOB_RESTARTED)
|
||||
|
||||
def test_skipped_job_state(self):
|
||||
# Test, if the first job fails and the bail parameter set,
|
||||
# that the remaining jobs have status: SKIPPED
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
context.cm.run_config.bail_on_job_failure = True
|
||||
|
||||
job1 = Job_force_retry(3, job_spec, 1, context)
|
||||
job2 = Job(job_spec, 1, context)
|
||||
job1.workload = Mock()
|
||||
job2.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job1, job2]
|
||||
context.run_state.add_job(job1)
|
||||
context.run_state.add_job(job2)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
try:
|
||||
runner.run()
|
||||
except ExecutionError:
|
||||
assert_equal(job2.status, Status.SKIPPED)
|
||||
else:
|
||||
assert False, "ExecutionError not raised"
|
||||
|
||||
def test_normal_job_finalized(self):
|
||||
# Test that a job is initialized then finalized normally
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
job = Job_force_retry(0, job_spec, 1, context)
|
||||
job.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job]
|
||||
context.run_state.add_job(job)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
runner.run()
|
||||
|
||||
assert_equal(job.initialized, True)
|
||||
assert_equal(job.finalized, True)
|
||||
|
||||
def test_skipped_job_finalized(self):
|
||||
# Test that a skipped job has been finalized
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
context.cm.run_config.bail_on_job_failure = True
|
||||
|
||||
job1 = Job_force_retry(3, job_spec, 1, context)
|
||||
job2 = Job_force_retry(0, job_spec, 1, context)
|
||||
job1.workload = Mock()
|
||||
job2.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job1, job2]
|
||||
context.run_state.add_job(job1)
|
||||
context.run_state.add_job(job2)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
try:
|
||||
runner.run()
|
||||
except ExecutionError:
|
||||
assert_equal(job2.finalized, True)
|
||||
else:
|
||||
assert False, "ExecutionError not raised"
|
||||
|
||||
def test_failed_job_finalized(self):
|
||||
# Test that a failed job, while the bail parameter is set,
|
||||
# is finalized
|
||||
job_spec = get_jobspec()
|
||||
context = get_context()
|
||||
|
||||
context.cm.run_config.bail_on_job_failure = True
|
||||
|
||||
job1 = Job_force_retry(3, job_spec, 1, context)
|
||||
job1.workload = Mock()
|
||||
|
||||
context.cm._joblist = [job1]
|
||||
context.run_state.add_job(job1)
|
||||
|
||||
runner = Runner(context, MockProcessorManager())
|
||||
try:
|
||||
runner.run()
|
||||
except ExecutionError:
|
||||
assert_equal(job1.finalized, True)
|
||||
else:
|
||||
assert False, "ExecutionError not raised"
|
||||
|
||||
|
||||
def get_context(path=None):
|
||||
if not path:
|
||||
path = tempfile.mkstemp()[1]
|
||||
os.remove(path)
|
||||
|
||||
config = MockConfigManager()
|
||||
output = init_run_output(path, config)
|
||||
|
||||
return ExecutionContext(config, Mock(), output)
|
||||
|
||||
|
||||
def get_jobspec():
|
||||
job_spec = JobSpec()
|
||||
job_spec.augmentations = {}
|
||||
job_spec.finalize()
|
||||
return job_spec
|
@ -30,6 +30,27 @@ class Callable(object):
|
||||
return self.val
|
||||
|
||||
|
||||
class TestSignalDisconnect(unittest.TestCase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.callback_ctr = 0
|
||||
|
||||
def setUp(self):
|
||||
signal.connect(self._call_me_once, 'first')
|
||||
signal.connect(self._call_me_once, 'second')
|
||||
|
||||
def test_handler_disconnected(self):
|
||||
signal.send('first')
|
||||
signal.send('second')
|
||||
|
||||
def _call_me_once(self):
|
||||
assert_equal(self.callback_ctr, 0)
|
||||
self.callback_ctr += 1
|
||||
signal.disconnect(self._call_me_once, 'first')
|
||||
signal.disconnect(self._call_me_once, 'second')
|
||||
|
||||
|
||||
class TestPriorityDispatcher(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@ -61,12 +82,16 @@ class TestPriorityDispatcher(unittest.TestCase):
|
||||
|
||||
def test_wrap_propagate(self):
|
||||
d = {'before': False, 'after': False, 'success': False}
|
||||
|
||||
def before():
|
||||
d['before'] = True
|
||||
|
||||
def after():
|
||||
d['after'] = True
|
||||
|
||||
def success():
|
||||
d['success'] = True
|
||||
|
||||
signal.connect(before, signal.BEFORE_WORKLOAD_SETUP)
|
||||
signal.connect(after, signal.AFTER_WORKLOAD_SETUP)
|
||||
signal.connect(success, signal.SUCCESSFUL_WORKLOAD_SETUP)
|
||||
@ -76,7 +101,7 @@ class TestPriorityDispatcher(unittest.TestCase):
|
||||
with signal.wrap('WORKLOAD_SETUP'):
|
||||
raise RuntimeError()
|
||||
except RuntimeError:
|
||||
caught=True
|
||||
caught = True
|
||||
|
||||
assert_true(d['before'])
|
||||
assert_true(d['after'])
|
||||
|
@ -190,3 +190,10 @@ class TestToggleSet(TestCase):
|
||||
|
||||
ts6 = ts2.merge_into(ts3).merge_with(ts1)
|
||||
assert_equal(ts6, toggle_set(['one', 'two', 'three', 'four', 'five', '~~']))
|
||||
|
||||
def test_order_on_create(self):
|
||||
ts1 = toggle_set(['one', 'two', 'three', '~one'])
|
||||
assert_equal(ts1, toggle_set(['~one', 'two', 'three']))
|
||||
|
||||
ts1 = toggle_set(['~one', 'two', 'three', 'one'])
|
||||
assert_equal(ts1, toggle_set(['one', 'two', 'three']))
|
||||
|
@ -23,7 +23,6 @@ import re
|
||||
import uuid
|
||||
import getpass
|
||||
from collections import OrderedDict
|
||||
from distutils.dir_util import copy_tree # pylint: disable=no-name-in-module, import-error
|
||||
|
||||
from devlib.utils.types import identifier
|
||||
try:
|
||||
@ -43,6 +42,24 @@ from wa.utils.misc import (ensure_directory_exists as _d, capitalize,
|
||||
from wa.utils.postgres import get_schema, POSTGRES_SCHEMA_DIR
|
||||
from wa.utils.serializer import yaml
|
||||
|
||||
if sys.version_info >= (3, 8):
|
||||
def copy_tree(src, dst):
|
||||
from shutil import copy, copytree # pylint: disable=import-outside-toplevel
|
||||
copytree(
|
||||
src,
|
||||
dst,
|
||||
# dirs_exist_ok=True only exists in Python >= 3.8
|
||||
dirs_exist_ok=True,
|
||||
# Align with devlib and only copy the content without metadata
|
||||
copy_function=copy
|
||||
)
|
||||
else:
|
||||
def copy_tree(src, dst):
|
||||
# pylint: disable=import-outside-toplevel, redefined-outer-name
|
||||
from distutils.dir_util import copy_tree
|
||||
# Align with devlib and only copy the content without metadata
|
||||
copy_tree(src, dst, preserve_mode=False, preserve_times=False)
|
||||
|
||||
|
||||
TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates')
|
||||
|
||||
@ -106,8 +123,8 @@ class CreateDatabaseSubcommand(SubCommand):
|
||||
def execute(self, state, args): # pylint: disable=too-many-branches
|
||||
if not psycopg2:
|
||||
raise CommandError(
|
||||
'The module psycopg2 is required for the wa ' +
|
||||
'create database command.')
|
||||
'The module psycopg2 is required for the wa '
|
||||
+ 'create database command.')
|
||||
|
||||
if args.dbname == 'postgres':
|
||||
raise ValueError('Databasename to create cannot be postgres.')
|
||||
@ -131,8 +148,8 @@ class CreateDatabaseSubcommand(SubCommand):
|
||||
config = yaml.load(config_file)
|
||||
if 'postgres' in config and not args.force_update_config:
|
||||
raise CommandError(
|
||||
"The entry 'postgres' already exists in the config file. " +
|
||||
"Please specify the -F flag to force an update.")
|
||||
"The entry 'postgres' already exists in the config file. "
|
||||
+ "Please specify the -F flag to force an update.")
|
||||
|
||||
possible_connection_errors = [
|
||||
(
|
||||
@ -261,8 +278,8 @@ class CreateDatabaseSubcommand(SubCommand):
|
||||
else:
|
||||
if not self.force:
|
||||
raise CommandError(
|
||||
"Database {} already exists. ".format(self.dbname) +
|
||||
"Please specify the -f flag to create it from afresh."
|
||||
"Database {} already exists. ".format(self.dbname)
|
||||
+ "Please specify the -f flag to create it from afresh."
|
||||
)
|
||||
|
||||
def _create_database_postgres(self):
|
||||
@ -400,14 +417,14 @@ class CreateWorkloadSubcommand(SubCommand):
|
||||
self.parser.add_argument('name', metavar='NAME',
|
||||
help='Name of the workload to be created')
|
||||
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
|
||||
help='The location at which the workload will be created. If not specified, ' +
|
||||
'this defaults to "~/.workload_automation/plugins".')
|
||||
help='The location at which the workload will be created. If not specified, '
|
||||
+ 'this defaults to "~/.workload_automation/plugins".')
|
||||
self.parser.add_argument('-f', '--force', action='store_true',
|
||||
help='Create the new workload even if a workload with the specified ' +
|
||||
'name already exists.')
|
||||
help='Create the new workload even if a workload with the specified '
|
||||
+ 'name already exists.')
|
||||
self.parser.add_argument('-k', '--kind', metavar='KIND', default='basic', choices=list(create_funcs.keys()),
|
||||
help='The type of workload to be created. The available options ' +
|
||||
'are: {}'.format(', '.join(list(create_funcs.keys()))))
|
||||
help='The type of workload to be created. The available options '
|
||||
+ 'are: {}'.format(', '.join(list(create_funcs.keys()))))
|
||||
|
||||
def execute(self, state, args): # pylint: disable=R0201
|
||||
where = args.path or 'local'
|
||||
@ -430,8 +447,8 @@ class CreatePackageSubcommand(SubCommand):
|
||||
self.parser.add_argument('name', metavar='NAME',
|
||||
help='Name of the package to be created')
|
||||
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
|
||||
help='The location at which the new package will be created. If not specified, ' +
|
||||
'current working directory will be used.')
|
||||
help='The location at which the new package will be created. If not specified, '
|
||||
+ 'current working directory will be used.')
|
||||
self.parser.add_argument('-f', '--force', action='store_true',
|
||||
help='Create the new package even if a file or directory with the same name '
|
||||
'already exists at the specified location.')
|
||||
|
@ -1,4 +1,4 @@
|
||||
--!VERSION!1.5!ENDVERSION!
|
||||
--!VERSION!1.6!ENDVERSION!
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
CREATE EXTENSION IF NOT EXISTS "lo";
|
||||
|
||||
@ -61,7 +61,7 @@ CREATE TABLE Runs (
|
||||
|
||||
CREATE TABLE Jobs (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
status status_enum,
|
||||
retry int,
|
||||
label text,
|
||||
@ -76,7 +76,7 @@ CREATE TABLE Jobs (
|
||||
|
||||
CREATE TABLE Targets (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
target text,
|
||||
modules text[],
|
||||
cpus text[],
|
||||
@ -103,7 +103,7 @@ CREATE TABLE Targets (
|
||||
|
||||
CREATE TABLE Events (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
timestamp timestamp,
|
||||
message text,
|
||||
@ -114,28 +114,28 @@ CREATE TABLE Events (
|
||||
|
||||
CREATE TABLE Resource_Getters (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
name text,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Augmentations (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
name text,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Jobs_Augs (
|
||||
oid uuid NOT NULL,
|
||||
job_oid uuid NOT NULL references Jobs(oid),
|
||||
augmentation_oid uuid NOT NULL references Augmentations(oid),
|
||||
job_oid uuid NOT NULL references Jobs(oid) ON DELETE CASCADE,
|
||||
augmentation_oid uuid NOT NULL references Augmentations(oid) ON DELETE CASCADE,
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE TABLE Metrics (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
name text,
|
||||
value double precision,
|
||||
@ -158,7 +158,7 @@ CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON LargeObjects
|
||||
|
||||
CREATE TABLE Artifacts (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
name text,
|
||||
large_object_uuid uuid NOT NULL references LargeObjects(oid),
|
||||
@ -170,12 +170,18 @@ CREATE TABLE Artifacts (
|
||||
PRIMARY KEY (oid)
|
||||
);
|
||||
|
||||
CREATE RULE del_lo AS
|
||||
ON DELETE TO Artifacts
|
||||
DO DELETE FROM LargeObjects
|
||||
WHERE LargeObjects.oid = old.large_object_uuid
|
||||
;
|
||||
|
||||
CREATE TABLE Classifiers (
|
||||
oid uuid NOT NULL,
|
||||
artifact_oid uuid references Artifacts(oid),
|
||||
metric_oid uuid references Metrics(oid),
|
||||
job_oid uuid references Jobs(oid),
|
||||
run_oid uuid references Runs(oid),
|
||||
artifact_oid uuid references Artifacts(oid) ON DELETE CASCADE,
|
||||
metric_oid uuid references Metrics(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid) ON DELETE CASCADE,
|
||||
run_oid uuid references Runs(oid) ON DELETE CASCADE,
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (oid)
|
||||
@ -183,7 +189,7 @@ CREATE TABLE Classifiers (
|
||||
|
||||
CREATE TABLE Parameters (
|
||||
oid uuid NOT NULL,
|
||||
run_oid uuid NOT NULL references Runs(oid),
|
||||
run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE,
|
||||
job_oid uuid references Jobs(oid),
|
||||
augmentation_oid uuid references Augmentations(oid),
|
||||
resource_getter_oid uuid references Resource_Getters(oid),
|
||||
|
109
wa/commands/postgres_schemas/postgres_schema_update_v1.6.sql
Normal file
109
wa/commands/postgres_schemas/postgres_schema_update_v1.6.sql
Normal file
@ -0,0 +1,109 @@
|
||||
ALTER TABLE jobs
|
||||
DROP CONSTRAINT jobs_run_oid_fkey,
|
||||
ADD CONSTRAINT jobs_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE targets
|
||||
DROP CONSTRAINT targets_run_oid_fkey,
|
||||
ADD CONSTRAINT targets_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE events
|
||||
DROP CONSTRAINT events_run_oid_fkey,
|
||||
ADD CONSTRAINT events_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE resource_getters
|
||||
DROP CONSTRAINT resource_getters_run_oid_fkey,
|
||||
ADD CONSTRAINT resource_getters_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE augmentations
|
||||
DROP CONSTRAINT augmentations_run_oid_fkey,
|
||||
ADD CONSTRAINT augmentations_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE jobs_augs
|
||||
DROP CONSTRAINT jobs_augs_job_oid_fkey,
|
||||
DROP CONSTRAINT jobs_augs_augmentation_oid_fkey,
|
||||
ADD CONSTRAINT jobs_augs_job_oid_fkey
|
||||
FOREIGN KEY (job_oid)
|
||||
REFERENCES Jobs(oid)
|
||||
ON DELETE CASCADE,
|
||||
ADD CONSTRAINT jobs_augs_augmentation_oid_fkey
|
||||
FOREIGN KEY (augmentation_oid)
|
||||
REFERENCES Augmentations(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE metrics
|
||||
DROP CONSTRAINT metrics_run_oid_fkey,
|
||||
ADD CONSTRAINT metrics_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE artifacts
|
||||
DROP CONSTRAINT artifacts_run_oid_fkey,
|
||||
ADD CONSTRAINT artifacts_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
CREATE RULE del_lo AS
|
||||
ON DELETE TO Artifacts
|
||||
DO DELETE FROM LargeObjects
|
||||
WHERE LargeObjects.oid = old.large_object_uuid
|
||||
;
|
||||
|
||||
ALTER TABLE classifiers
|
||||
DROP CONSTRAINT classifiers_artifact_oid_fkey,
|
||||
DROP CONSTRAINT classifiers_metric_oid_fkey,
|
||||
DROP CONSTRAINT classifiers_job_oid_fkey,
|
||||
DROP CONSTRAINT classifiers_run_oid_fkey,
|
||||
|
||||
ADD CONSTRAINT classifiers_artifact_oid_fkey
|
||||
FOREIGN KEY (artifact_oid)
|
||||
REFERENCES artifacts(oid)
|
||||
ON DELETE CASCADE,
|
||||
|
||||
ADD CONSTRAINT classifiers_metric_oid_fkey
|
||||
FOREIGN KEY (metric_oid)
|
||||
REFERENCES metrics(oid)
|
||||
ON DELETE CASCADE,
|
||||
|
||||
ADD CONSTRAINT classifiers_job_oid_fkey
|
||||
FOREIGN KEY (job_oid)
|
||||
REFERENCES jobs(oid)
|
||||
ON DELETE CASCADE,
|
||||
|
||||
ADD CONSTRAINT classifiers_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
||||
|
||||
ALTER TABLE parameters
|
||||
DROP CONSTRAINT parameters_run_oid_fkey,
|
||||
ADD CONSTRAINT parameters_run_oid_fkey
|
||||
FOREIGN KEY (run_oid)
|
||||
REFERENCES runs(oid)
|
||||
ON DELETE CASCADE
|
||||
;
|
@ -17,6 +17,7 @@ import os
|
||||
|
||||
from wa import Command
|
||||
from wa import discover_wa_outputs
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.framework.exception import CommandError
|
||||
from wa.framework.output import RunOutput
|
||||
from wa.framework.output_processor import ProcessorManager
|
||||
@ -57,8 +58,9 @@ class ProcessCommand(Command):
|
||||
""")
|
||||
self.parser.add_argument('-f', '--force', action='store_true',
|
||||
help="""
|
||||
Run processors that have already been
|
||||
run. By default these will be skipped.
|
||||
Run processors that have already been run. By
|
||||
default these will be skipped. Also, forces
|
||||
processing of in-progress runs.
|
||||
""")
|
||||
self.parser.add_argument('-r', '--recursive', action='store_true',
|
||||
help="""
|
||||
@ -76,10 +78,15 @@ class ProcessCommand(Command):
|
||||
if not args.recursive:
|
||||
output_list = [RunOutput(process_directory)]
|
||||
else:
|
||||
output_list = [output for output in discover_wa_outputs(process_directory)]
|
||||
output_list = list(discover_wa_outputs(process_directory))
|
||||
|
||||
pc = ProcessContext()
|
||||
for run_output in output_list:
|
||||
if run_output.status < Status.OK and not args.force:
|
||||
msg = 'Skipping {} as it has not completed -- {}'
|
||||
self.logger.info(msg.format(run_output.basepath, run_output.status))
|
||||
continue
|
||||
|
||||
pc.run_output = run_output
|
||||
pc.target_info = run_output.target_info
|
||||
|
||||
@ -112,6 +119,12 @@ class ProcessCommand(Command):
|
||||
pm.initialize(pc)
|
||||
|
||||
for job_output in run_output.jobs:
|
||||
if job_output.status < Status.OK or job_output.status in [Status.SKIPPED, Status.ABORTED]:
|
||||
msg = 'Skipping job {} {} iteration {} -- {}'
|
||||
self.logger.info(msg.format(job_output.id, job_output.label,
|
||||
job_output.iteration, job_output.status))
|
||||
continue
|
||||
|
||||
pc.job_output = job_output
|
||||
pm.enable_all()
|
||||
if not args.force:
|
||||
@ -142,5 +155,6 @@ class ProcessCommand(Command):
|
||||
pm.export_run_output(pc)
|
||||
pm.finalize(pc)
|
||||
|
||||
run_output.write_info()
|
||||
run_output.write_result()
|
||||
self.logger.info('Done.')
|
||||
|
288
wa/commands/report.py
Normal file
288
wa/commands/report.py
Normal file
@ -0,0 +1,288 @@
|
||||
from collections import Counter
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
import os
|
||||
|
||||
from wa import Command, settings
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.framework.output import RunOutput, discover_wa_outputs
|
||||
from wa.utils.doc import underline
|
||||
from wa.utils.log import COLOR_MAP, RESET_COLOR
|
||||
from wa.utils.terminalsize import get_terminal_size
|
||||
|
||||
|
||||
class ReportCommand(Command):
|
||||
|
||||
name = 'report'
|
||||
description = '''
|
||||
Monitor an ongoing run and provide information on its progress.
|
||||
|
||||
Specify the output directory of the run you would like the monitor;
|
||||
alternatively report will attempt to discover wa output directories
|
||||
within the current directory. The output includes run information such as
|
||||
the UUID, start time, duration, project name and a short summary of the
|
||||
run's progress (number of completed jobs, the number of jobs in each
|
||||
different status).
|
||||
|
||||
If verbose output is specified, the output includes a list of all events
|
||||
labelled as not specific to any job, followed by a list of the jobs in the
|
||||
order executed, with their retries (if any), current status and, if the job
|
||||
is finished, a list of events that occurred during that job's execution.
|
||||
|
||||
This is an example of a job status line:
|
||||
|
||||
wk1 (exoplayer) [1] - 2, PARTIAL
|
||||
|
||||
It contains two entries delimited by a comma: the job's descriptor followed
|
||||
by its completion status (``PARTIAL``, in this case). The descriptor
|
||||
consists of the following elements:
|
||||
|
||||
- the job ID (``wk1``)
|
||||
- the job label (which defaults to the workload name) in parentheses
|
||||
- job iteration number in square brakets (``1`` in this case)
|
||||
- a hyphen followed by the retry attempt number.
|
||||
(note: this will only be shown if the job has been retried as least
|
||||
once. If the job has not yet run, or if it completed on the first
|
||||
attempt, the hyphen and retry count -- which in that case would be
|
||||
zero -- will not appear).
|
||||
'''
|
||||
|
||||
def initialize(self, context):
|
||||
self.parser.add_argument('-d', '--directory',
|
||||
help='''
|
||||
Specify the WA output path. report will
|
||||
otherwise attempt to discover output
|
||||
directories in the current directory.
|
||||
''')
|
||||
|
||||
def execute(self, state, args):
|
||||
if args.directory:
|
||||
output_path = args.directory
|
||||
run_output = RunOutput(output_path)
|
||||
else:
|
||||
possible_outputs = list(discover_wa_outputs(os.getcwd()))
|
||||
num_paths = len(possible_outputs)
|
||||
|
||||
if num_paths > 1:
|
||||
print('More than one possible output directory found,'
|
||||
' please choose a path from the following:'
|
||||
)
|
||||
|
||||
for i in range(num_paths):
|
||||
print("{}: {}".format(i, possible_outputs[i].basepath))
|
||||
|
||||
while True:
|
||||
try:
|
||||
select = int(input())
|
||||
except ValueError:
|
||||
print("Please select a valid path number")
|
||||
continue
|
||||
|
||||
if select not in range(num_paths):
|
||||
print("Please select a valid path number")
|
||||
continue
|
||||
break
|
||||
|
||||
run_output = possible_outputs[select]
|
||||
|
||||
else:
|
||||
run_output = possible_outputs[0]
|
||||
|
||||
rm = RunMonitor(run_output)
|
||||
print(rm.generate_output(args.verbose))
|
||||
|
||||
|
||||
class RunMonitor:
|
||||
|
||||
@property
|
||||
def elapsed_time(self):
|
||||
if self._elapsed is None:
|
||||
if self.ro.info.duration is None:
|
||||
self._elapsed = datetime.utcnow() - self.ro.info.start_time
|
||||
else:
|
||||
self._elapsed = self.ro.info.duration
|
||||
return self._elapsed
|
||||
|
||||
@property
|
||||
def job_outputs(self):
|
||||
if self._job_outputs is None:
|
||||
self._job_outputs = {
|
||||
(j_o.id, j_o.label, j_o.iteration): j_o for j_o in self.ro.jobs
|
||||
}
|
||||
return self._job_outputs
|
||||
|
||||
@property
|
||||
def projected_duration(self):
|
||||
elapsed = self.elapsed_time.total_seconds()
|
||||
proj = timedelta(seconds=elapsed * (len(self.jobs) / len(self.segmented['finished'])))
|
||||
return proj - self.elapsed_time
|
||||
|
||||
def __init__(self, ro):
|
||||
self.ro = ro
|
||||
self._elapsed = None
|
||||
self._p_duration = None
|
||||
self._job_outputs = None
|
||||
self._termwidth = None
|
||||
self._fmt = _simple_formatter()
|
||||
self.get_data()
|
||||
|
||||
def get_data(self):
|
||||
self.jobs = [state for label_id, state in self.ro.state.jobs.items()]
|
||||
if self.jobs:
|
||||
rc = self.ro.run_config
|
||||
self.segmented = segment_jobs_by_state(self.jobs,
|
||||
rc.max_retries,
|
||||
rc.retry_on_status
|
||||
)
|
||||
|
||||
def generate_run_header(self):
|
||||
info = self.ro.info
|
||||
|
||||
header = underline('Run Info')
|
||||
header += "UUID: {}\n".format(info.uuid)
|
||||
if info.run_name:
|
||||
header += "Run name: {}\n".format(info.run_name)
|
||||
if info.project:
|
||||
header += "Project: {}\n".format(info.project)
|
||||
if info.project_stage:
|
||||
header += "Project stage: {}\n".format(info.project_stage)
|
||||
|
||||
if info.start_time:
|
||||
duration = _seconds_as_smh(self.elapsed_time.total_seconds())
|
||||
header += ("Start time: {}\n"
|
||||
"Duration: {:02}:{:02}:{:02}\n"
|
||||
).format(info.start_time,
|
||||
duration[2], duration[1], duration[0],
|
||||
)
|
||||
if self.segmented['finished'] and not info.end_time:
|
||||
p_duration = _seconds_as_smh(self.projected_duration.total_seconds())
|
||||
header += "Projected time remaining: {:02}:{:02}:{:02}\n".format(
|
||||
p_duration[2], p_duration[1], p_duration[0]
|
||||
)
|
||||
|
||||
elif self.ro.info.end_time:
|
||||
header += "End time: {}\n".format(info.end_time)
|
||||
|
||||
return header + '\n'
|
||||
|
||||
def generate_job_summary(self):
|
||||
total = len(self.jobs)
|
||||
num_fin = len(self.segmented['finished'])
|
||||
|
||||
summary = underline('Job Summary')
|
||||
summary += 'Total: {}, Completed: {} ({}%)\n'.format(
|
||||
total, num_fin, (num_fin / total) * 100
|
||||
) if total > 0 else 'No jobs created\n'
|
||||
|
||||
ctr = Counter()
|
||||
for run_state, jobs in ((k, v) for k, v in self.segmented.items() if v):
|
||||
if run_state == 'finished':
|
||||
ctr.update([job.status.name.lower() for job in jobs])
|
||||
else:
|
||||
ctr[run_state] += len(jobs)
|
||||
|
||||
return summary + ', '.join(
|
||||
[str(count) + ' ' + self._fmt.highlight_keyword(status) for status, count in ctr.items()]
|
||||
) + '\n\n'
|
||||
|
||||
def generate_job_detail(self):
|
||||
detail = underline('Job Detail')
|
||||
for job in self.jobs:
|
||||
detail += ('{} ({}) [{}]{}, {}\n').format(
|
||||
job.id,
|
||||
job.label,
|
||||
job.iteration,
|
||||
' - ' + str(job.retries)if job.retries else '',
|
||||
self._fmt.highlight_keyword(str(job.status))
|
||||
)
|
||||
|
||||
job_output = self.job_outputs[(job.id, job.label, job.iteration)]
|
||||
for event in job_output.events:
|
||||
detail += self._fmt.fit_term_width(
|
||||
'\t{}\n'.format(event.summary)
|
||||
)
|
||||
return detail
|
||||
|
||||
def generate_run_detail(self):
|
||||
detail = underline('Run Events') if self.ro.events else ''
|
||||
|
||||
for event in self.ro.events:
|
||||
detail += '{}\n'.format(event.summary)
|
||||
|
||||
return detail + '\n'
|
||||
|
||||
def generate_output(self, verbose):
|
||||
if not self.jobs:
|
||||
return 'No jobs found in output directory\n'
|
||||
|
||||
output = self.generate_run_header()
|
||||
output += self.generate_job_summary()
|
||||
|
||||
if verbose:
|
||||
output += self.generate_run_detail()
|
||||
output += self.generate_job_detail()
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def _seconds_as_smh(seconds):
|
||||
seconds = int(seconds)
|
||||
hours = seconds // 3600
|
||||
minutes = (seconds % 3600) // 60
|
||||
seconds = seconds % 60
|
||||
return seconds, minutes, hours
|
||||
|
||||
|
||||
def segment_jobs_by_state(jobstates, max_retries, retry_status):
|
||||
finished_states = [
|
||||
Status.PARTIAL, Status.FAILED,
|
||||
Status.ABORTED, Status.OK, Status.SKIPPED
|
||||
]
|
||||
|
||||
segmented = {
|
||||
'finished': [], 'other': [], 'running': [],
|
||||
'pending': [], 'uninitialized': []
|
||||
}
|
||||
|
||||
for jobstate in jobstates:
|
||||
if (jobstate.status in retry_status) and jobstate.retries < max_retries:
|
||||
segmented['running'].append(jobstate)
|
||||
elif jobstate.status in finished_states:
|
||||
segmented['finished'].append(jobstate)
|
||||
elif jobstate.status == Status.RUNNING:
|
||||
segmented['running'].append(jobstate)
|
||||
elif jobstate.status == Status.PENDING:
|
||||
segmented['pending'].append(jobstate)
|
||||
elif jobstate.status == Status.NEW:
|
||||
segmented['uninitialized'].append(jobstate)
|
||||
else:
|
||||
segmented['other'].append(jobstate)
|
||||
|
||||
return segmented
|
||||
|
||||
|
||||
class _simple_formatter:
|
||||
color_map = {
|
||||
'running': COLOR_MAP[logging.INFO],
|
||||
'partial': COLOR_MAP[logging.WARNING],
|
||||
'failed': COLOR_MAP[logging.CRITICAL],
|
||||
'aborted': COLOR_MAP[logging.ERROR]
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.termwidth = get_terminal_size()[0]
|
||||
self.color = settings.logging['color']
|
||||
|
||||
def fit_term_width(self, text):
|
||||
text = text.expandtabs()
|
||||
if len(text) <= self.termwidth:
|
||||
return text
|
||||
else:
|
||||
return text[0:self.termwidth - 4] + " ...\n"
|
||||
|
||||
def highlight_keyword(self, kw):
|
||||
if not self.color or kw not in _simple_formatter.color_map:
|
||||
return kw
|
||||
|
||||
color = _simple_formatter.color_map[kw.lower()]
|
||||
return '{}{}{}'.format(color, kw, RESET_COLOR)
|
@ -25,10 +25,6 @@ from wa.framework.target.manager import TargetManager
|
||||
from wa.utils.revent import ReventRecorder
|
||||
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
raw_input = input # pylint: disable=redefined-builtin
|
||||
|
||||
|
||||
class RecordCommand(Command):
|
||||
|
||||
name = 'record'
|
||||
@ -96,8 +92,8 @@ class RecordCommand(Command):
|
||||
if args.workload and args.output:
|
||||
self.logger.error("Output file cannot be specified with Workload")
|
||||
sys.exit()
|
||||
if not args.workload and (args.setup or args.extract_results or
|
||||
args.teardown or args.all):
|
||||
if not args.workload and (args.setup or args.extract_results
|
||||
or args.teardown or args.all):
|
||||
self.logger.error("Cannot specify a recording stage without a Workload")
|
||||
sys.exit()
|
||||
if args.workload and not any([args.all, args.teardown, args.extract_results, args.run, args.setup]):
|
||||
@ -137,11 +133,11 @@ class RecordCommand(Command):
|
||||
def record(self, revent_file, name, output_path):
|
||||
msg = 'Press Enter when you are ready to record {}...'
|
||||
self.logger.info(msg.format(name))
|
||||
raw_input('')
|
||||
input('')
|
||||
self.revent_recorder.start_record(revent_file)
|
||||
msg = 'Press Enter when you have finished recording {}...'
|
||||
self.logger.info(msg.format(name))
|
||||
raw_input('')
|
||||
input('')
|
||||
self.revent_recorder.stop_record()
|
||||
|
||||
if not os.path.isdir(output_path):
|
||||
|
@ -22,3 +22,7 @@
|
||||
during the run.
|
||||
## 1.5
|
||||
- Change the type of the "hostid" in TargetInfo from Int to Bigint.
|
||||
## 1.6
|
||||
- Add cascading deletes to most tables to allow easy deletion of a run
|
||||
and its associated data
|
||||
- Add rule to delete associated large object on deletion of artifact
|
@ -73,11 +73,8 @@ class ShowCommand(Command):
|
||||
|
||||
if which('pandoc'):
|
||||
p = Popen(['pandoc', '-f', 'rst', '-t', 'man'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
if sys.version_info[0] == 3:
|
||||
output, _ = p.communicate(rst_output.encode(sys.stdin.encoding))
|
||||
output = output.decode(sys.stdout.encoding)
|
||||
else:
|
||||
output, _ = p.communicate(rst_output)
|
||||
output, _ = p.communicate(rst_output.encode(sys.stdin.encoding))
|
||||
output = output.decode(sys.stdout.encoding)
|
||||
|
||||
# Make sure to double escape back slashes
|
||||
output = output.replace('\\', '\\\\\\')
|
||||
|
@ -12,7 +12,7 @@ android {
|
||||
buildTypes {
|
||||
applicationVariants.all { variant ->
|
||||
variant.outputs.each { output ->
|
||||
output.outputFile = file("$$project.buildDir/apk/${package_name}.apk")
|
||||
output.outputFileName = "${package_name}.apk"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,8 +31,8 @@ fi
|
||||
|
||||
# If successful move APK file to workload folder (overwrite previous)
|
||||
rm -f ../$package_name
|
||||
if [[ -f app/build/apk/$package_name.apk ]]; then
|
||||
cp app/build/apk/$package_name.apk ../$package_name.apk
|
||||
if [[ -f app/build/outputs/apk/debug/$package_name.apk ]]; then
|
||||
cp app/build/outputs/apk/debug/$package_name.apk ../$package_name.apk
|
||||
else
|
||||
echo 'ERROR: UiAutomator apk could not be found!'
|
||||
exit 9
|
||||
|
@ -3,9 +3,10 @@
|
||||
buildscript {
|
||||
repositories {
|
||||
jcenter()
|
||||
google()
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:2.3.1'
|
||||
classpath 'com.android.tools.build:gradle:7.2.1'
|
||||
|
||||
// NOTE: Do not place your application dependencies here; they belong
|
||||
// in the individual module build.gradle files
|
||||
@ -15,6 +16,7 @@ buildscript {
|
||||
allprojects {
|
||||
repositories {
|
||||
jcenter()
|
||||
google()
|
||||
}
|
||||
}
|
||||
|
||||
|
2
wa/commands/templates/uiauto/uiauto_workload_template/gradle/wrapper/gradle-wrapper.properties
vendored
2
wa/commands/templates/uiauto/uiauto_workload_template/gradle/wrapper/gradle-wrapper.properties
vendored
@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-3.3-all.zip
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip
|
||||
|
@ -65,7 +65,6 @@ class SubCommand(object):
|
||||
options to the command's parser). ``context`` is always ``None``.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def execute(self, state, args):
|
||||
"""
|
||||
|
@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import logging
|
||||
from copy import copy, deepcopy
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
@ -36,6 +37,8 @@ Status = enum(['UNKNOWN', 'NEW', 'PENDING',
|
||||
'STARTED', 'CONNECTED', 'INITIALIZED', 'RUNNING',
|
||||
'OK', 'PARTIAL', 'FAILED', 'ABORTED', 'SKIPPED'])
|
||||
|
||||
logger = logging.getLogger('config')
|
||||
|
||||
|
||||
##########################
|
||||
### CONFIG POINT TYPES ###
|
||||
@ -55,10 +58,11 @@ class RebootPolicy(object):
|
||||
executing the first workload spec.
|
||||
:each_spec: The device will be rebooted before running a new workload spec.
|
||||
:each_iteration: The device will be rebooted before each new iteration.
|
||||
:run_completion: The device will be rebooted after the run has been completed.
|
||||
|
||||
"""
|
||||
|
||||
valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_job']
|
||||
valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_job', 'run_completion']
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
@ -89,6 +93,10 @@ class RebootPolicy(object):
|
||||
def reboot_on_each_spec(self):
|
||||
return self.policy == 'each_spec'
|
||||
|
||||
@property
|
||||
def reboot_on_run_completion(self):
|
||||
return self.policy == 'run_completion'
|
||||
|
||||
def __str__(self):
|
||||
return self.policy
|
||||
|
||||
@ -192,7 +200,8 @@ class ConfigurationPoint(object):
|
||||
constraint=None,
|
||||
merge=False,
|
||||
aliases=None,
|
||||
global_alias=None):
|
||||
global_alias=None,
|
||||
deprecated=False):
|
||||
"""
|
||||
Create a new Parameter object.
|
||||
|
||||
@ -243,10 +252,12 @@ class ConfigurationPoint(object):
|
||||
:param global_alias: An alias for this parameter that can be specified at
|
||||
the global level. A global_alias can map onto many
|
||||
ConfigurationPoints.
|
||||
:param deprecated: Specify that this parameter is deprecated and its
|
||||
config should be ignored. If supplied WA will display
|
||||
a warning to the user however will continue execution.
|
||||
"""
|
||||
self.name = identifier(name)
|
||||
if kind in KIND_MAP:
|
||||
kind = KIND_MAP[kind]
|
||||
kind = KIND_MAP.get(kind, kind)
|
||||
if kind is not None and not callable(kind):
|
||||
raise ValueError('Kind must be callable.')
|
||||
self.kind = kind
|
||||
@ -266,6 +277,7 @@ class ConfigurationPoint(object):
|
||||
self.merge = merge
|
||||
self.aliases = aliases or []
|
||||
self.global_alias = global_alias
|
||||
self.deprecated = deprecated
|
||||
|
||||
if self.default is not None:
|
||||
try:
|
||||
@ -281,6 +293,11 @@ class ConfigurationPoint(object):
|
||||
return False
|
||||
|
||||
def set_value(self, obj, value=None, check_mandatory=True):
|
||||
if self.deprecated:
|
||||
if value is not None:
|
||||
msg = 'Depreciated parameter supplied for "{}" in "{}". The value will be ignored.'
|
||||
logger.warning(msg.format(self.name, obj.name))
|
||||
return
|
||||
if value is None:
|
||||
if self.default is not None:
|
||||
value = self.kind(self.default)
|
||||
@ -302,6 +319,8 @@ class ConfigurationPoint(object):
|
||||
setattr(obj, self.name, value)
|
||||
|
||||
def validate(self, obj, check_mandatory=True):
|
||||
if self.deprecated:
|
||||
return
|
||||
value = getattr(obj, self.name, None)
|
||||
if value is not None:
|
||||
self.validate_value(obj.name, value)
|
||||
@ -450,6 +469,7 @@ class MetaConfiguration(Configuration):
|
||||
description="""
|
||||
The local mount point for the filer hosting WA assets.
|
||||
""",
|
||||
default=''
|
||||
),
|
||||
ConfigurationPoint(
|
||||
'logging',
|
||||
@ -466,7 +486,6 @@ class MetaConfiguration(Configuration):
|
||||
contain bash color escape codes. Set this to ``False`` if
|
||||
console output will be piped somewhere that does not know
|
||||
how to handle those.
|
||||
|
||||
""",
|
||||
),
|
||||
ConfigurationPoint(
|
||||
@ -523,6 +542,10 @@ class MetaConfiguration(Configuration):
|
||||
def target_info_cache_file(self):
|
||||
return os.path.join(self.cache_directory, 'targets.json')
|
||||
|
||||
@property
|
||||
def apk_info_cache_file(self):
|
||||
return os.path.join(self.cache_directory, 'apk_info.json')
|
||||
|
||||
def __init__(self, environ=None):
|
||||
super(MetaConfiguration, self).__init__()
|
||||
if environ is None:
|
||||
@ -644,15 +667,18 @@ class RunConfiguration(Configuration):
|
||||
``"each_spec"``
|
||||
The device will be rebooted before running a new workload spec.
|
||||
|
||||
.. note:: this acts the same as each_job when execution order
|
||||
.. note:: This acts the same as ``each_job`` when execution order
|
||||
is set to by_iteration
|
||||
|
||||
``"run_completion"``
|
||||
The device will be rebooted after the run has been completed.
|
||||
'''),
|
||||
ConfigurationPoint(
|
||||
'device',
|
||||
kind=str,
|
||||
default='generic_android',
|
||||
description='''
|
||||
This setting defines what specific Device subclass will be used to
|
||||
This setting defines what specific ``Device`` subclass will be used to
|
||||
interact the connected device. Obviously, this must match your
|
||||
setup.
|
||||
''',
|
||||
@ -706,6 +732,17 @@ class RunConfiguration(Configuration):
|
||||
failed, but continue attempting to run others.
|
||||
'''
|
||||
),
|
||||
ConfigurationPoint(
|
||||
'bail_on_job_failure',
|
||||
kind=bool,
|
||||
default=False,
|
||||
description='''
|
||||
When a job fails during its run phase, WA will attempt to retry the
|
||||
job, then continue with remaining jobs after. Setting this to
|
||||
``True`` means WA will skip remaining jobs and end the run if a job
|
||||
has retried the maximum number of times, and still fails.
|
||||
'''
|
||||
),
|
||||
ConfigurationPoint(
|
||||
'allow_phone_home',
|
||||
kind=bool, default=True,
|
||||
@ -793,12 +830,12 @@ class JobSpec(Configuration):
|
||||
description='''
|
||||
The name of the workload to run.
|
||||
'''),
|
||||
ConfigurationPoint('workload_parameters', kind=obj_dict,
|
||||
ConfigurationPoint('workload_parameters', kind=obj_dict, merge=True,
|
||||
aliases=["params", "workload_params", "parameters"],
|
||||
description='''
|
||||
Parameter to be passed to the workload
|
||||
'''),
|
||||
ConfigurationPoint('runtime_parameters', kind=obj_dict,
|
||||
ConfigurationPoint('runtime_parameters', kind=obj_dict, merge=True,
|
||||
aliases=["runtime_params"],
|
||||
description='''
|
||||
Runtime parameters to be set prior to running
|
||||
|
@ -238,20 +238,47 @@ def _load_file(filepath, error_name):
|
||||
return raw, includes
|
||||
|
||||
|
||||
def _config_values_from_includes(filepath, include_path, error_name):
|
||||
source_dir = os.path.dirname(filepath)
|
||||
included_files = []
|
||||
|
||||
if isinstance(include_path, str):
|
||||
include_path = os.path.expanduser(os.path.join(source_dir, include_path))
|
||||
|
||||
replace_value, includes = _load_file(include_path, error_name)
|
||||
|
||||
included_files.append(include_path)
|
||||
included_files.extend(includes)
|
||||
elif isinstance(include_path, list):
|
||||
replace_value = {}
|
||||
|
||||
for path in include_path:
|
||||
include_path = os.path.expanduser(os.path.join(source_dir, path))
|
||||
|
||||
sub_replace_value, includes = _load_file(include_path, error_name)
|
||||
for key, val in sub_replace_value.items():
|
||||
replace_value[key] = merge_config_values(val, replace_value.get(key, None))
|
||||
|
||||
included_files.append(include_path)
|
||||
included_files.extend(includes)
|
||||
else:
|
||||
message = "{} does not contain a valid {} structure; value for 'include#' must be a string or a list"
|
||||
raise ConfigError(message.format(filepath, error_name))
|
||||
|
||||
return replace_value, included_files
|
||||
|
||||
|
||||
def _process_includes(raw, filepath, error_name):
|
||||
if not raw:
|
||||
return []
|
||||
|
||||
source_dir = os.path.dirname(filepath)
|
||||
included_files = []
|
||||
replace_value = None
|
||||
|
||||
if hasattr(raw, 'items'):
|
||||
for key, value in raw.items():
|
||||
if key == 'include#':
|
||||
include_path = os.path.expanduser(os.path.join(source_dir, value))
|
||||
included_files.append(include_path)
|
||||
replace_value, includes = _load_file(include_path, error_name)
|
||||
replace_value, includes = _config_values_from_includes(filepath, value, error_name)
|
||||
included_files.extend(includes)
|
||||
elif hasattr(value, 'items') or isiterable(value):
|
||||
includes = _process_includes(value, filepath, error_name)
|
||||
@ -297,7 +324,7 @@ def merge_augmentations(raw):
|
||||
raise ConfigError(msg.format(value, n, exc))
|
||||
|
||||
# Make sure none of the specified aliases conflict with each other
|
||||
to_check = [e for e in entries]
|
||||
to_check = list(entries)
|
||||
while len(to_check) > 1:
|
||||
check_entry = to_check.pop()
|
||||
for e in to_check:
|
||||
|
@ -84,9 +84,9 @@ class PluginCache(object):
|
||||
'defined in a config file, move the entry content into the top level'
|
||||
raise ConfigError(msg.format((plugin_name)))
|
||||
|
||||
if (not self.loader.has_plugin(plugin_name) and
|
||||
plugin_name not in self.targets and
|
||||
plugin_name not in GENERIC_CONFIGS):
|
||||
if (not self.loader.has_plugin(plugin_name)
|
||||
and plugin_name not in self.targets
|
||||
and plugin_name not in GENERIC_CONFIGS):
|
||||
msg = 'configuration provided for unknown plugin "{}"'
|
||||
raise ConfigError(msg.format(plugin_name))
|
||||
|
||||
@ -95,8 +95,8 @@ class PluginCache(object):
|
||||
raise ConfigError(msg.format(plugin_name, repr(values), type(values)))
|
||||
|
||||
for name, value in values.items():
|
||||
if (plugin_name not in GENERIC_CONFIGS and
|
||||
name not in self.get_plugin_parameters(plugin_name)):
|
||||
if (plugin_name not in GENERIC_CONFIGS
|
||||
and name not in self.get_plugin_parameters(plugin_name)):
|
||||
msg = "'{}' is not a valid parameter for '{}'"
|
||||
raise ConfigError(msg.format(name, plugin_name))
|
||||
|
||||
|
@ -33,6 +33,7 @@ class JobSpecSource(object):
|
||||
def id(self):
|
||||
return self.config['id']
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
@ -30,60 +30,49 @@ class WAError(Exception):
|
||||
|
||||
class NotFoundError(WAError):
|
||||
"""Raised when the specified item is not found."""
|
||||
pass
|
||||
|
||||
|
||||
class ValidationError(WAError):
|
||||
"""Raised on failure to validate an extension."""
|
||||
pass
|
||||
|
||||
|
||||
class ExecutionError(WAError):
|
||||
"""Error encountered by the execution framework."""
|
||||
pass
|
||||
|
||||
|
||||
class WorkloadError(WAError):
|
||||
"""General Workload error."""
|
||||
pass
|
||||
|
||||
|
||||
class JobError(WAError):
|
||||
"""Job execution error."""
|
||||
pass
|
||||
|
||||
|
||||
class InstrumentError(WAError):
|
||||
"""General Instrument error."""
|
||||
pass
|
||||
|
||||
|
||||
class OutputProcessorError(WAError):
|
||||
"""General OutputProcessor error."""
|
||||
pass
|
||||
|
||||
|
||||
class ResourceError(WAError):
|
||||
"""General Resolver error."""
|
||||
pass
|
||||
|
||||
|
||||
class CommandError(WAError):
|
||||
"""Raised by commands when they have encountered an error condition
|
||||
during execution."""
|
||||
pass
|
||||
|
||||
|
||||
class ToolError(WAError):
|
||||
"""Raised by tools when they have encountered an error condition
|
||||
during execution."""
|
||||
pass
|
||||
|
||||
|
||||
class ConfigError(WAError):
|
||||
"""Raised when configuration provided is invalid. This error suggests that
|
||||
the user should modify their config and try again."""
|
||||
pass
|
||||
|
||||
|
||||
class SerializerSyntaxError(Exception):
|
||||
|
@ -25,7 +25,7 @@ from datetime import datetime
|
||||
import wa.framework.signal as signal
|
||||
from wa.framework import instrument as instrumentation
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.framework.exception import TargetError, HostError, WorkloadError
|
||||
from wa.framework.exception import TargetError, HostError, WorkloadError, ExecutionError
|
||||
from wa.framework.exception import TargetNotRespondingError, TimeoutError # pylint: disable=redefined-builtin
|
||||
from wa.framework.job import Job
|
||||
from wa.framework.output import init_job_output
|
||||
@ -128,8 +128,8 @@ class ExecutionContext(object):
|
||||
self.run_state.status = status
|
||||
self.run_output.status = status
|
||||
self.run_output.info.end_time = datetime.utcnow()
|
||||
self.run_output.info.duration = (self.run_output.info.end_time -
|
||||
self.run_output.info.start_time)
|
||||
self.run_output.info.duration = (self.run_output.info.end_time
|
||||
- self.run_output.info.start_time)
|
||||
self.write_output()
|
||||
|
||||
def finalize(self):
|
||||
@ -141,21 +141,24 @@ class ExecutionContext(object):
|
||||
self.current_job = self.job_queue.pop(0)
|
||||
job_output = init_job_output(self.run_output, self.current_job)
|
||||
self.current_job.set_output(job_output)
|
||||
self.update_job_state(self.current_job)
|
||||
return self.current_job
|
||||
|
||||
def end_job(self):
|
||||
if not self.current_job:
|
||||
raise RuntimeError('No jobs in progress')
|
||||
self.completed_jobs.append(self.current_job)
|
||||
self.update_job_state(self.current_job)
|
||||
self.output.write_result()
|
||||
self.current_job = None
|
||||
|
||||
def set_status(self, status, force=False):
|
||||
def set_status(self, status, force=False, write=True):
|
||||
if not self.current_job:
|
||||
raise RuntimeError('No jobs in progress')
|
||||
self.current_job.set_status(status, force)
|
||||
self.set_job_status(self.current_job, status, force, write)
|
||||
|
||||
def set_job_status(self, job, status, force=False, write=True):
|
||||
job.set_status(status, force)
|
||||
if write:
|
||||
self.run_output.write_state()
|
||||
|
||||
def extract_results(self):
|
||||
self.tm.extract_results(self)
|
||||
@ -163,13 +166,8 @@ class ExecutionContext(object):
|
||||
def move_failed(self, job):
|
||||
self.run_output.move_failed(job.output)
|
||||
|
||||
def update_job_state(self, job):
|
||||
self.run_state.update_job(job)
|
||||
self.run_output.write_state()
|
||||
|
||||
def skip_job(self, job):
|
||||
job.status = Status.SKIPPED
|
||||
self.run_state.update_job(job)
|
||||
self.set_job_status(job, Status.SKIPPED, force=True)
|
||||
self.completed_jobs.append(job)
|
||||
|
||||
def skip_remaining_jobs(self):
|
||||
@ -249,6 +247,11 @@ class ExecutionContext(object):
|
||||
def add_event(self, message):
|
||||
self.output.add_event(message)
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
self.output.add_classifier(name, value, overwrite)
|
||||
if self.current_job:
|
||||
self.current_job.add_classifier(name, value, overwrite)
|
||||
|
||||
def add_metadata(self, key, *args, **kwargs):
|
||||
self.output.add_metadata(key, *args, **kwargs)
|
||||
|
||||
@ -288,7 +291,7 @@ class ExecutionContext(object):
|
||||
try:
|
||||
job.initialize(self)
|
||||
except WorkloadError as e:
|
||||
job.set_status(Status.FAILED)
|
||||
self.set_job_status(job, Status.FAILED, write=False)
|
||||
log.log_error(e, self.logger)
|
||||
failed_ids.append(job.id)
|
||||
|
||||
@ -298,6 +301,7 @@ class ExecutionContext(object):
|
||||
new_queue.append(job)
|
||||
|
||||
self.job_queue = new_queue
|
||||
self.write_state()
|
||||
|
||||
def _load_resource_getters(self):
|
||||
self.logger.debug('Loading resource discoverers')
|
||||
@ -333,7 +337,7 @@ class Executor(object):
|
||||
returning.
|
||||
|
||||
The initial context set up involves combining configuration from various
|
||||
sources, loading of requided workloads, loading and installation of
|
||||
sources, loading of required workloads, loading and installation of
|
||||
instruments and output processors, etc. Static validation of the combined
|
||||
configuration is also performed.
|
||||
|
||||
@ -349,7 +353,7 @@ class Executor(object):
|
||||
def execute(self, config_manager, output):
|
||||
"""
|
||||
Execute the run specified by an agenda. Optionally, selectors may be
|
||||
used to only selecute a subset of the specified agenda.
|
||||
used to only execute a subset of the specified agenda.
|
||||
|
||||
Params::
|
||||
|
||||
@ -399,7 +403,7 @@ class Executor(object):
|
||||
attempts = context.cm.run_config.max_retries
|
||||
while attempts:
|
||||
try:
|
||||
self.target_manager.reboot()
|
||||
self.target_manager.reboot(context)
|
||||
except TargetError as e:
|
||||
if attempts:
|
||||
attempts -= 1
|
||||
@ -445,7 +449,7 @@ class Executor(object):
|
||||
for status in reversed(Status.levels):
|
||||
if status in counter:
|
||||
parts.append('{} {}'.format(counter[status], status))
|
||||
self.logger.info(status_summary + ', '.join(parts))
|
||||
self.logger.info('{}{}'.format(status_summary, ', '.join(parts)))
|
||||
|
||||
self.logger.info('Results can be found in {}'.format(output.basepath))
|
||||
|
||||
@ -533,6 +537,9 @@ class Runner(object):
|
||||
self.pm.process_run_output(self.context)
|
||||
self.pm.export_run_output(self.context)
|
||||
self.pm.finalize(self.context)
|
||||
if self.context.reboot_policy.reboot_on_run_completion:
|
||||
self.logger.info('Rebooting target on run completion.')
|
||||
self.context.tm.reboot(self.context)
|
||||
signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED)
|
||||
signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED)
|
||||
|
||||
@ -552,15 +559,15 @@ class Runner(object):
|
||||
with signal.wrap('JOB', self, context):
|
||||
context.tm.start()
|
||||
self.do_run_job(job, context)
|
||||
job.set_status(Status.OK)
|
||||
context.set_job_status(job, Status.OK)
|
||||
except (Exception, KeyboardInterrupt) as e: # pylint: disable=broad-except
|
||||
log.log_error(e, self.logger)
|
||||
if isinstance(e, KeyboardInterrupt):
|
||||
context.run_interrupted = True
|
||||
job.set_status(Status.ABORTED)
|
||||
context.set_job_status(job, Status.ABORTED)
|
||||
raise e
|
||||
else:
|
||||
job.set_status(Status.FAILED)
|
||||
context.set_job_status(job, Status.FAILED)
|
||||
if isinstance(e, TargetNotRespondingError):
|
||||
raise e
|
||||
elif isinstance(e, TargetError):
|
||||
@ -583,7 +590,7 @@ class Runner(object):
|
||||
self.context.skip_job(job)
|
||||
return
|
||||
|
||||
job.set_status(Status.RUNNING)
|
||||
context.set_job_status(job, Status.RUNNING)
|
||||
self.send(signal.JOB_STARTED)
|
||||
|
||||
job.configure_augmentations(context, self.pm)
|
||||
@ -594,7 +601,7 @@ class Runner(object):
|
||||
try:
|
||||
job.setup(context)
|
||||
except Exception as e:
|
||||
job.set_status(Status.FAILED)
|
||||
context.set_job_status(job, Status.FAILED)
|
||||
log.log_error(e, self.logger)
|
||||
if isinstance(e, (TargetError, TimeoutError)):
|
||||
context.tm.verify_target_responsive(context)
|
||||
@ -607,10 +614,10 @@ class Runner(object):
|
||||
job.run(context)
|
||||
except KeyboardInterrupt:
|
||||
context.run_interrupted = True
|
||||
job.set_status(Status.ABORTED)
|
||||
context.set_job_status(job, Status.ABORTED)
|
||||
raise
|
||||
except Exception as e:
|
||||
job.set_status(Status.FAILED)
|
||||
context.set_job_status(job, Status.FAILED)
|
||||
log.log_error(e, self.logger)
|
||||
if isinstance(e, (TargetError, TimeoutError)):
|
||||
context.tm.verify_target_responsive(context)
|
||||
@ -623,7 +630,7 @@ class Runner(object):
|
||||
self.pm.process_job_output(context)
|
||||
self.pm.export_job_output(context)
|
||||
except Exception as e:
|
||||
job.set_status(Status.PARTIAL)
|
||||
context.set_job_status(job, Status.PARTIAL)
|
||||
if isinstance(e, (TargetError, TimeoutError)):
|
||||
context.tm.verify_target_responsive(context)
|
||||
self.context.record_ui_state('output-error')
|
||||
@ -631,7 +638,7 @@ class Runner(object):
|
||||
|
||||
except KeyboardInterrupt:
|
||||
context.run_interrupted = True
|
||||
job.set_status(Status.ABORTED)
|
||||
context.set_status(Status.ABORTED)
|
||||
raise
|
||||
finally:
|
||||
# If setup was successfully completed, teardown must
|
||||
@ -653,6 +660,9 @@ class Runner(object):
|
||||
self.logger.error(msg.format(job.id, job.iteration, job.status))
|
||||
self.context.failed_jobs += 1
|
||||
self.send(signal.JOB_FAILED)
|
||||
if rc.bail_on_job_failure:
|
||||
raise ExecutionError('Job {} failed, bailing.'.format(job.id))
|
||||
|
||||
else: # status not in retry_on_status
|
||||
self.logger.info('Job completed with status {}'.format(job.status))
|
||||
if job.status != 'ABORTED':
|
||||
@ -664,8 +674,9 @@ class Runner(object):
|
||||
def retry_job(self, job):
|
||||
retry_job = Job(job.spec, job.iteration, self.context)
|
||||
retry_job.workload = job.workload
|
||||
retry_job.state = job.state
|
||||
retry_job.retries = job.retries + 1
|
||||
retry_job.set_status(Status.PENDING)
|
||||
self.context.set_job_status(retry_job, Status.PENDING, force=True)
|
||||
self.context.job_queue.insert(0, retry_job)
|
||||
self.send(signal.JOB_RESTARTED)
|
||||
|
||||
|
@ -31,7 +31,7 @@ import requests
|
||||
from wa import Parameter, settings, __file__ as _base_filepath
|
||||
from wa.framework.resource import ResourceGetter, SourcePriority, NO_ONE
|
||||
from wa.framework.exception import ResourceError
|
||||
from wa.utils.misc import (ensure_directory_exists as _d,
|
||||
from wa.utils.misc import (ensure_directory_exists as _d, atomic_write_path,
|
||||
ensure_file_directory_exists as _f, sha256, urljoin)
|
||||
from wa.utils.types import boolean, caseless_string
|
||||
|
||||
@ -78,15 +78,20 @@ def get_path_matches(resource, files):
|
||||
return matches
|
||||
|
||||
|
||||
# pylint: disable=too-many-return-statements
|
||||
def get_from_location(basepath, resource):
|
||||
if resource.kind == 'file':
|
||||
path = os.path.join(basepath, resource.path)
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
elif resource.kind == 'executable':
|
||||
path = os.path.join(basepath, 'bin', resource.abi, resource.filename)
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
bin_dir = os.path.join(basepath, 'bin', resource.abi)
|
||||
if not os.path.exists(bin_dir):
|
||||
return None
|
||||
for entry in os.listdir(bin_dir):
|
||||
path = os.path.join(bin_dir, entry)
|
||||
if resource.match(path):
|
||||
return path
|
||||
elif resource.kind == 'revent':
|
||||
path = os.path.join(basepath, 'revent_files')
|
||||
if os.path.exists(path):
|
||||
@ -234,21 +239,19 @@ class Http(ResourceGetter):
|
||||
index_url = urljoin(self.url, 'index.json')
|
||||
response = self.geturl(index_url)
|
||||
if response.status_code != http.client.OK:
|
||||
message = 'Could not fetch "{}"; recieved "{} {}"'
|
||||
message = 'Could not fetch "{}"; received "{} {}"'
|
||||
self.logger.error(message.format(index_url,
|
||||
response.status_code,
|
||||
response.reason))
|
||||
return {}
|
||||
if sys.version_info[0] == 3:
|
||||
content = response.content.decode('utf-8')
|
||||
else:
|
||||
content = response.content
|
||||
content = response.content.decode('utf-8')
|
||||
return json.loads(content)
|
||||
|
||||
def download_asset(self, asset, owner_name):
|
||||
url = urljoin(self.url, owner_name, asset['path'])
|
||||
local_path = _f(os.path.join(settings.dependencies_directory, '__remote',
|
||||
owner_name, asset['path'].replace('/', os.sep)))
|
||||
|
||||
if os.path.exists(local_path) and not self.always_fetch:
|
||||
local_sha = sha256(local_path)
|
||||
if local_sha == asset['sha256']:
|
||||
@ -257,14 +260,15 @@ class Http(ResourceGetter):
|
||||
self.logger.debug('Downloading {}'.format(url))
|
||||
response = self.geturl(url, stream=True)
|
||||
if response.status_code != http.client.OK:
|
||||
message = 'Could not download asset "{}"; recieved "{} {}"'
|
||||
message = 'Could not download asset "{}"; received "{} {}"'
|
||||
self.logger.warning(message.format(url,
|
||||
response.status_code,
|
||||
response.reason))
|
||||
return
|
||||
with open(local_path, 'wb') as wfh:
|
||||
for chunk in response.iter_content(chunk_size=self.chunk_size):
|
||||
wfh.write(chunk)
|
||||
with atomic_write_path(local_path) as at_path:
|
||||
with open(at_path, 'wb') as wfh:
|
||||
for chunk in response.iter_content(chunk_size=self.chunk_size):
|
||||
wfh.write(chunk)
|
||||
return local_path
|
||||
|
||||
def geturl(self, url, stream=False):
|
||||
@ -322,7 +326,8 @@ class Filer(ResourceGetter):
|
||||
|
||||
"""
|
||||
parameters = [
|
||||
Parameter('remote_path', global_alias='remote_assets_path', default='',
|
||||
Parameter('remote_path', global_alias='remote_assets_path',
|
||||
default=settings.assets_repository,
|
||||
description="""
|
||||
Path, on the local system, where the assets are located.
|
||||
"""),
|
||||
|
@ -50,6 +50,7 @@ def init_user_directory(overwrite_existing=False): # pylint: disable=R0914
|
||||
# If running with sudo on POSIX, change the ownership to the real user.
|
||||
real_user = os.getenv('SUDO_USER')
|
||||
if real_user:
|
||||
# pylint: disable=import-outside-toplevel
|
||||
import pwd # done here as module won't import on win32
|
||||
user_entry = pwd.getpwnam(real_user)
|
||||
uid, gid = user_entry.pw_uid, user_entry.pw_gid
|
||||
|
@ -98,13 +98,12 @@ and the code to clear these file goes in teardown method. ::
|
||||
|
||||
"""
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import inspect
|
||||
from collections import OrderedDict
|
||||
|
||||
from wa.framework import signal
|
||||
from wa.framework.plugin import Plugin
|
||||
from wa.framework.plugin import TargetedPlugin
|
||||
from wa.framework.exception import (TargetNotRespondingError, TimeoutError, # pylint: disable=redefined-builtin
|
||||
WorkloadError, TargetError)
|
||||
from wa.utils.log import log_error
|
||||
@ -325,10 +324,7 @@ def install(instrument, context):
|
||||
if not callable(attr):
|
||||
msg = 'Attribute {} not callable in {}.'
|
||||
raise ValueError(msg.format(attr_name, instrument))
|
||||
if sys.version_info[0] == 3:
|
||||
argspec = inspect.getfullargspec(attr)
|
||||
else:
|
||||
argspec = inspect.getargspec(attr) # pylint: disable=deprecated-method
|
||||
argspec = inspect.getfullargspec(attr)
|
||||
arg_num = len(argspec.args)
|
||||
# Instrument callbacks will be passed exactly two arguments: self
|
||||
# (the instrument instance to which the callback is bound) and
|
||||
@ -421,14 +417,13 @@ def get_disabled():
|
||||
return [i for i in installed if not i.is_enabled]
|
||||
|
||||
|
||||
class Instrument(Plugin):
|
||||
class Instrument(TargetedPlugin):
|
||||
"""
|
||||
Base class for instrument implementations.
|
||||
"""
|
||||
kind = "instrument"
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
super(Instrument, self).__init__(**kwargs)
|
||||
self.target = target
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Instrument, self).__init__(*args, **kwargs)
|
||||
self.is_enabled = True
|
||||
self.is_broken = False
|
||||
|
@ -23,6 +23,7 @@ from datetime import datetime
|
||||
from wa.framework import pluginloader, signal, instrument
|
||||
from wa.framework.configuration.core import Status
|
||||
from wa.utils.log import indentcontext
|
||||
from wa.framework.run import JobState
|
||||
|
||||
|
||||
class Job(object):
|
||||
@ -37,24 +38,29 @@ class Job(object):
|
||||
def label(self):
|
||||
return self.spec.label
|
||||
|
||||
@property
|
||||
def classifiers(self):
|
||||
return self.spec.classifiers
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
return self._status
|
||||
return self.state.status
|
||||
|
||||
@property
|
||||
def has_been_initialized(self):
|
||||
return self._has_been_initialized
|
||||
|
||||
@property
|
||||
def retries(self):
|
||||
return self.state.retries
|
||||
|
||||
@status.setter
|
||||
def status(self, value):
|
||||
self._status = value
|
||||
self.state.status = value
|
||||
self.state.timestamp = datetime.utcnow()
|
||||
if self.output:
|
||||
self.output.status = value
|
||||
|
||||
@retries.setter
|
||||
def retries(self, value):
|
||||
self.state.retries = value
|
||||
|
||||
def __init__(self, spec, iteration, context):
|
||||
self.logger = logging.getLogger('job')
|
||||
self.spec = spec
|
||||
@ -63,13 +69,13 @@ class Job(object):
|
||||
self.workload = None
|
||||
self.output = None
|
||||
self.run_time = None
|
||||
self.retries = 0
|
||||
self.classifiers = copy(self.spec.classifiers)
|
||||
self._has_been_initialized = False
|
||||
self._status = Status.NEW
|
||||
self.state = JobState(self.id, self.label, self.iteration, Status.NEW)
|
||||
|
||||
def load(self, target, loader=pluginloader):
|
||||
self.logger.info('Loading job {}'.format(self))
|
||||
if self.iteration == 1:
|
||||
if self.id not in self._workload_cache:
|
||||
self.workload = loader.get_workload(self.spec.workload_name,
|
||||
target,
|
||||
**self.spec.workload_parameters)
|
||||
@ -91,7 +97,6 @@ class Job(object):
|
||||
self.workload.initialize(context)
|
||||
self.set_status(Status.PENDING)
|
||||
self._has_been_initialized = True
|
||||
context.update_job_state(self)
|
||||
|
||||
def configure_augmentations(self, context, pm):
|
||||
self.logger.info('Configuring augmentations')
|
||||
@ -181,6 +186,11 @@ class Job(object):
|
||||
if force or self.status < status:
|
||||
self.status = status
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
if name in self.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier.'.format(name))
|
||||
self.classifiers[name] = value
|
||||
|
||||
def __str__(self):
|
||||
return '{} ({}) [{}]'.format(self.id, self.label, self.iteration)
|
||||
|
||||
|
@ -39,7 +39,8 @@ from wa.framework.run import RunState, RunInfo
|
||||
from wa.framework.target.info import TargetInfo
|
||||
from wa.framework.version import get_wa_version_with_commit
|
||||
from wa.utils.doc import format_simple_table
|
||||
from wa.utils.misc import touch, ensure_directory_exists, isiterable, format_ordered_dict
|
||||
from wa.utils.misc import (touch, ensure_directory_exists, isiterable,
|
||||
format_ordered_dict, safe_extract)
|
||||
from wa.utils.postgres import get_schema_versions
|
||||
from wa.utils.serializer import write_pod, read_pod, Podable, json
|
||||
from wa.utils.types import enum, numeric
|
||||
@ -165,6 +166,9 @@ class Output(object):
|
||||
artifact = self.get_artifact(name)
|
||||
return self.get_path(artifact.path)
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
self.result.add_classifier(name, value, overwrite)
|
||||
|
||||
def add_metadata(self, key, *args, **kwargs):
|
||||
self.result.add_metadata(key, *args, **kwargs)
|
||||
|
||||
@ -265,8 +269,8 @@ class RunOutput(Output, RunOutputCommon):
|
||||
self._combined_config = None
|
||||
self.jobs = []
|
||||
self.job_specs = []
|
||||
if (not os.path.isfile(self.statefile) or
|
||||
not os.path.isfile(self.infofile)):
|
||||
if (not os.path.isfile(self.statefile)
|
||||
or not os.path.isfile(self.infofile)):
|
||||
msg = '"{}" does not exist or is not a valid WA output directory.'
|
||||
raise ValueError(msg.format(self.basepath))
|
||||
self.reload()
|
||||
@ -410,6 +414,21 @@ class Result(Podable):
|
||||
return artifact
|
||||
raise HostError('Artifact "{}" not found'.format(name))
|
||||
|
||||
def add_classifier(self, name, value, overwrite=False):
|
||||
if name in self.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier.'.format(name))
|
||||
self.classifiers[name] = value
|
||||
|
||||
for metric in self.metrics:
|
||||
if name in metric.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier; clashes with {}.'.format(name, metric))
|
||||
metric.classifiers[name] = value
|
||||
|
||||
for artifact in self.artifacts:
|
||||
if name in artifact.classifiers and not overwrite:
|
||||
raise ValueError('Cannot overwrite "{}" classifier; clashes with {}.'.format(name, artifact))
|
||||
artifact.classifiers[name] = value
|
||||
|
||||
def add_metadata(self, key, *args, **kwargs):
|
||||
force = kwargs.pop('force', False)
|
||||
if kwargs:
|
||||
@ -758,9 +777,13 @@ def init_job_output(run_output, job):
|
||||
|
||||
|
||||
def discover_wa_outputs(path):
|
||||
for root, dirs, _ in os.walk(path):
|
||||
# Use topdown=True to allow pruning dirs
|
||||
for root, dirs, _ in os.walk(path, topdown=True):
|
||||
if '__meta' in dirs:
|
||||
yield RunOutput(root)
|
||||
# Avoid recursing into the artifact as it can be very lengthy if a
|
||||
# large number of file is present (sysfs dump)
|
||||
dirs.clear()
|
||||
|
||||
|
||||
def _save_raw_config(meta_dir, state):
|
||||
@ -832,7 +855,7 @@ class DatabaseOutput(Output):
|
||||
def _read_dir_artifact(self, artifact):
|
||||
artifact_path = tempfile.mkdtemp(prefix='wa_')
|
||||
with tarfile.open(fileobj=self.conn.lobject(int(artifact.path), mode='b'), mode='r|gz') as tar_file:
|
||||
tar_file.extractall(artifact_path)
|
||||
safe_extract(tar_file, artifact_path)
|
||||
self.conn.commit()
|
||||
return artifact_path
|
||||
|
||||
|
@ -18,8 +18,6 @@
|
||||
import os
|
||||
import sys
|
||||
import inspect
|
||||
import imp
|
||||
import string
|
||||
import logging
|
||||
from collections import OrderedDict, defaultdict
|
||||
from itertools import chain
|
||||
@ -32,16 +30,10 @@ from wa.framework.exception import (NotFoundError, PluginLoaderError, TargetErro
|
||||
ValidationError, ConfigError, HostError)
|
||||
from wa.utils import log
|
||||
from wa.utils.misc import (ensure_directory_exists as _d, walk_modules, load_class,
|
||||
merge_dicts_simple, get_article)
|
||||
merge_dicts_simple, get_article, import_path)
|
||||
from wa.utils.types import identifier
|
||||
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
MODNAME_TRANS = str.maketrans(':/\\.', '____')
|
||||
else:
|
||||
MODNAME_TRANS = string.maketrans(':/\\.', '____')
|
||||
|
||||
|
||||
class AttributeCollection(object):
|
||||
"""
|
||||
Accumulator for plugin attribute objects (such as Parameters or Artifacts).
|
||||
@ -157,6 +149,7 @@ class Alias(object):
|
||||
raise ConfigError(msg.format(param, self.name, ext.name))
|
||||
|
||||
|
||||
# pylint: disable=bad-mcs-classmethod-argument
|
||||
class PluginMeta(type):
|
||||
"""
|
||||
This basically adds some magic to plugins to make implementing new plugins,
|
||||
@ -246,7 +239,7 @@ class Plugin(with_metaclass(PluginMeta, object)):
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls):
|
||||
return {p.name: p.default for p in cls.parameters}
|
||||
return {p.name: p.default for p in cls.parameters if not p.deprecated}
|
||||
|
||||
@property
|
||||
def dependencies_directory(self):
|
||||
@ -367,7 +360,7 @@ class Plugin(with_metaclass(PluginMeta, object)):
|
||||
self._modules.append(module)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
return str(self.name)
|
||||
|
||||
def __repr__(self):
|
||||
params = []
|
||||
@ -383,12 +376,22 @@ class TargetedPlugin(Plugin):
|
||||
|
||||
"""
|
||||
|
||||
suppoted_targets = []
|
||||
supported_targets = []
|
||||
parameters = [
|
||||
Parameter('cleanup_assets', kind=bool,
|
||||
global_alias='cleanup_assets',
|
||||
aliases=['clean_up'],
|
||||
default=True,
|
||||
description="""
|
||||
If ``True``, assets that are deployed or created by the
|
||||
plugin will be removed again from the device.
|
||||
"""),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def check_compatible(cls, target):
|
||||
if cls.suppoted_targets:
|
||||
if target.os not in cls.suppoted_targets:
|
||||
if cls.supported_targets:
|
||||
if target.os not in cls.supported_targets:
|
||||
msg = 'Incompatible target OS "{}" for {}'
|
||||
raise TargetError(msg.format(target.os, cls.name))
|
||||
|
||||
@ -611,24 +614,30 @@ class PluginLoader(object):
|
||||
self.logger.debug('Checking path %s', path)
|
||||
if os.path.isfile(path):
|
||||
self._discover_from_file(path)
|
||||
for root, _, files in os.walk(path, followlinks=True):
|
||||
should_skip = False
|
||||
for igpath in ignore_paths:
|
||||
if root.startswith(igpath):
|
||||
should_skip = True
|
||||
break
|
||||
if should_skip:
|
||||
continue
|
||||
for fname in files:
|
||||
if os.path.splitext(fname)[1].lower() != '.py':
|
||||
elif os.path.exists(path):
|
||||
for root, _, files in os.walk(path, followlinks=True):
|
||||
should_skip = False
|
||||
for igpath in ignore_paths:
|
||||
if root.startswith(igpath):
|
||||
should_skip = True
|
||||
break
|
||||
if should_skip:
|
||||
continue
|
||||
filepath = os.path.join(root, fname)
|
||||
self._discover_from_file(filepath)
|
||||
for fname in files:
|
||||
if os.path.splitext(fname)[1].lower() != '.py':
|
||||
continue
|
||||
filepath = os.path.join(root, fname)
|
||||
self._discover_from_file(filepath)
|
||||
elif not os.path.isabs(path):
|
||||
try:
|
||||
for module in walk_modules(path):
|
||||
self._discover_in_module(module)
|
||||
except Exception: # NOQA pylint: disable=broad-except
|
||||
pass
|
||||
|
||||
def _discover_from_file(self, filepath):
|
||||
try:
|
||||
modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS)
|
||||
module = imp.load_source(modname, filepath)
|
||||
module = import_path(filepath)
|
||||
self._discover_in_module(module)
|
||||
except (SystemExit, ImportError) as e:
|
||||
if self.keep_going:
|
||||
|
@ -35,6 +35,7 @@ class __LoaderWrapper(object):
|
||||
def reset(self):
|
||||
# These imports cannot be done at top level, because of
|
||||
# sys.modules manipulation below
|
||||
# pylint: disable=import-outside-toplevel
|
||||
from wa.framework.plugin import PluginLoader
|
||||
from wa.framework.configuration.core import settings
|
||||
self._loader = PluginLoader(settings.plugin_packages,
|
||||
|
@ -16,13 +16,12 @@ import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from devlib.utils.android import ApkInfo
|
||||
|
||||
from wa.framework import pluginloader
|
||||
from wa.framework.plugin import Plugin
|
||||
from wa.framework.exception import ResourceError
|
||||
from wa.framework.configuration import settings
|
||||
from wa.utils import log
|
||||
from wa.utils.android import get_cacheable_apk_info
|
||||
from wa.utils.misc import get_object_name
|
||||
from wa.utils.types import enum, list_or_string, prioritylist, version_tuple
|
||||
|
||||
@ -280,9 +279,9 @@ class ResourceResolver(object):
|
||||
|
||||
def apk_version_matches(path, version):
|
||||
version = list_or_string(version)
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
for v in version:
|
||||
if info.version_name == v or info.version_code == v:
|
||||
if v in (info.version_name, info.version_code):
|
||||
return True
|
||||
if loose_version_matching(v, info.version_name):
|
||||
return True
|
||||
@ -290,7 +289,7 @@ def apk_version_matches(path, version):
|
||||
|
||||
|
||||
def apk_version_matches_range(path, min_version=None, max_version=None):
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
return range_version_matching(info.version_name, min_version, max_version)
|
||||
|
||||
|
||||
@ -333,18 +332,18 @@ def file_name_matches(path, pattern):
|
||||
|
||||
|
||||
def uiauto_test_matches(path, uiauto):
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
return uiauto == ('com.arm.wa.uiauto' in info.package)
|
||||
|
||||
|
||||
def package_name_matches(path, package):
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
return info.package == package
|
||||
|
||||
|
||||
def apk_abi_matches(path, supported_abi, exact_abi=False):
|
||||
supported_abi = list_or_string(supported_abi)
|
||||
info = ApkInfo(path)
|
||||
info = get_cacheable_apk_info(path)
|
||||
# If no native code present, suitable for all devices.
|
||||
if not info.native_code:
|
||||
return True
|
||||
|
@ -102,13 +102,7 @@ class RunState(Podable):
|
||||
self.timestamp = datetime.utcnow()
|
||||
|
||||
def add_job(self, job):
|
||||
job_state = JobState(job.id, job.label, job.iteration, job.status)
|
||||
self.jobs[(job_state.id, job_state.iteration)] = job_state
|
||||
|
||||
def update_job(self, job):
|
||||
state = self.jobs[(job.id, job.iteration)]
|
||||
state.status = job.status
|
||||
state.timestamp = datetime.utcnow()
|
||||
self.jobs[(job.state.id, job.state.iteration)] = job.state
|
||||
|
||||
def get_status_counts(self):
|
||||
counter = Counter()
|
||||
@ -163,7 +157,7 @@ class JobState(Podable):
|
||||
pod['label'] = self.label
|
||||
pod['iteration'] = self.iteration
|
||||
pod['status'] = self.status.to_pod()
|
||||
pod['retries'] = 0
|
||||
pod['retries'] = self.retries
|
||||
pod['timestamp'] = self.timestamp
|
||||
return pod
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
|
||||
"""
|
||||
This module wraps louie signalling mechanism. It relies on modified version of loiue
|
||||
This module wraps louie signalling mechanism. It relies on modified version of louie
|
||||
that has prioritization added to handler invocation.
|
||||
|
||||
"""
|
||||
@ -23,8 +23,9 @@ import sys
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
|
||||
from louie import dispatcher, saferef # pylint: disable=wrong-import-order
|
||||
from louie.dispatcher import _remove_receiver
|
||||
import wrapt
|
||||
from louie import dispatcher # pylint: disable=wrong-import-order
|
||||
|
||||
from wa.utils.types import prioritylist, enum
|
||||
|
||||
@ -242,8 +243,8 @@ def connect(handler, signal, sender=dispatcher.Any, priority=0):
|
||||
receivers = signals[signal]
|
||||
else:
|
||||
receivers = signals[signal] = _prioritylist_wrapper()
|
||||
receivers.add(handler, priority)
|
||||
dispatcher.connect(handler, signal, sender)
|
||||
receivers.add(saferef.safe_ref(handler, on_delete=_remove_receiver), priority)
|
||||
|
||||
|
||||
def disconnect(handler, signal, sender=dispatcher.Any):
|
||||
@ -268,7 +269,7 @@ def send(signal, sender=dispatcher.Anonymous, *args, **kwargs):
|
||||
"""
|
||||
Sends a signal, causing connected handlers to be invoked.
|
||||
|
||||
Paramters:
|
||||
Parameters:
|
||||
|
||||
:signal: Signal to be sent. This must be an instance of :class:`wa.core.signal.Signal`
|
||||
or its subclasses.
|
||||
|
@ -21,9 +21,11 @@ import tempfile
|
||||
import threading
|
||||
import time
|
||||
|
||||
from wa.framework.plugin import Parameter
|
||||
from wa.framework.exception import WorkerThreadError
|
||||
from wa.framework.plugin import Parameter
|
||||
from wa.utils.android import LogcatParser
|
||||
from wa.utils.misc import touch
|
||||
import wa.framework.signal as signal
|
||||
|
||||
|
||||
class LinuxAssistant(object):
|
||||
@ -33,6 +35,9 @@ class LinuxAssistant(object):
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
|
||||
def initialize(self):
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
@ -42,6 +47,9 @@ class LinuxAssistant(object):
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
def finalize(self):
|
||||
pass
|
||||
|
||||
|
||||
class AndroidAssistant(object):
|
||||
|
||||
@ -66,40 +74,111 @@ class AndroidAssistant(object):
|
||||
temporary locaiton on the host. Setting the value of the poll
|
||||
period enables this behavior.
|
||||
"""),
|
||||
Parameter('stay_on_mode', kind=int,
|
||||
constraint=lambda x: 0 <= x <= 7,
|
||||
description="""
|
||||
Specify whether the screen should stay on while the device is
|
||||
charging:
|
||||
|
||||
0: never stay on
|
||||
1: with AC charger
|
||||
2: with USB charger
|
||||
4: with wireless charger
|
||||
|
||||
Values can be OR-ed together to produce combinations, for
|
||||
instance ``7`` will cause the screen to stay on when charging
|
||||
under any method.
|
||||
"""),
|
||||
]
|
||||
|
||||
def __init__(self, target, logcat_poll_period=None, disable_selinux=True):
|
||||
def __init__(self, target, logcat_poll_period=None, disable_selinux=True, stay_on_mode=None):
|
||||
self.target = target
|
||||
self.logcat_poll_period = logcat_poll_period
|
||||
self.disable_selinux = disable_selinux
|
||||
self.stay_on_mode = stay_on_mode
|
||||
self.orig_stay_on_mode = self.target.get_stay_on_mode() if stay_on_mode is not None else None
|
||||
self.logcat_poller = None
|
||||
self.logger = logging.getLogger('logcat')
|
||||
self._logcat_marker_msg = None
|
||||
self._logcat_marker_tag = None
|
||||
signal.connect(self._before_workload, signal.BEFORE_WORKLOAD_EXECUTION)
|
||||
if self.logcat_poll_period:
|
||||
signal.connect(self._after_workload, signal.AFTER_WORKLOAD_EXECUTION)
|
||||
|
||||
def initialize(self):
|
||||
if self.target.is_rooted and self.disable_selinux:
|
||||
self.do_disable_selinux()
|
||||
if self.stay_on_mode is not None:
|
||||
self.target.set_stay_on_mode(self.stay_on_mode)
|
||||
|
||||
def start(self):
|
||||
if self.logcat_poll_period:
|
||||
self.logcat_poller = LogcatPoller(self.target, self.logcat_poll_period)
|
||||
self.logcat_poller.start()
|
||||
else:
|
||||
if not self._logcat_marker_msg:
|
||||
self._logcat_marker_msg = 'WA logcat marker for wrap detection'
|
||||
self._logcat_marker_tag = 'WAlog'
|
||||
|
||||
def stop(self):
|
||||
if self.logcat_poller:
|
||||
self.logcat_poller.stop()
|
||||
|
||||
def finalize(self):
|
||||
if self.stay_on_mode is not None:
|
||||
self.target.set_stay_on_mode(self.orig_stay_on_mode)
|
||||
|
||||
def extract_results(self, context):
|
||||
logcat_file = os.path.join(context.output_directory, 'logcat.log')
|
||||
self.dump_logcat(logcat_file)
|
||||
context.add_artifact('logcat', logcat_file, kind='log')
|
||||
self.clear_logcat()
|
||||
if not self._check_logcat_nowrap(logcat_file):
|
||||
self.logger.warning('The main logcat buffer wrapped and lost data;'
|
||||
' results that rely on this buffer may be'
|
||||
' inaccurate or incomplete.'
|
||||
)
|
||||
|
||||
def dump_logcat(self, outfile):
|
||||
if self.logcat_poller:
|
||||
self.logcat_poller.write_log(outfile)
|
||||
else:
|
||||
self.target.dump_logcat(outfile)
|
||||
self.target.dump_logcat(outfile, logcat_format='threadtime')
|
||||
|
||||
def clear_logcat(self):
|
||||
if self.logcat_poller:
|
||||
self.logcat_poller.clear_buffer()
|
||||
else:
|
||||
self.target.clear_logcat()
|
||||
|
||||
def _before_workload(self, _):
|
||||
if self.logcat_poller:
|
||||
self.logcat_poller.start_logcat_wrap_detect()
|
||||
else:
|
||||
self.insert_logcat_marker()
|
||||
|
||||
def _after_workload(self, _):
|
||||
self.logcat_poller.stop_logcat_wrap_detect()
|
||||
|
||||
def _check_logcat_nowrap(self, outfile):
|
||||
if self.logcat_poller:
|
||||
return self.logcat_poller.check_logcat_nowrap(outfile)
|
||||
else:
|
||||
parser = LogcatParser()
|
||||
for event in parser.parse(outfile):
|
||||
if (event.tag == self._logcat_marker_tag
|
||||
and event.message == self._logcat_marker_msg):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def insert_logcat_marker(self):
|
||||
self.logger.debug('Inserting logcat marker')
|
||||
self.target.execute(
|
||||
'log -t "{}" "{}"'.format(
|
||||
self._logcat_marker_tag, self._logcat_marker_msg
|
||||
)
|
||||
)
|
||||
|
||||
def do_disable_selinux(self):
|
||||
# SELinux was added in Android 4.3 (API level 18). Trying to
|
||||
@ -119,15 +198,21 @@ class LogcatPoller(threading.Thread):
|
||||
self.period = period
|
||||
self.timeout = timeout
|
||||
self.stop_signal = threading.Event()
|
||||
self.lock = threading.Lock()
|
||||
self.lock = threading.RLock()
|
||||
self.buffer_file = tempfile.mktemp()
|
||||
self.last_poll = 0
|
||||
self.daemon = True
|
||||
self.exc = None
|
||||
self._logcat_marker_tag = 'WALog'
|
||||
self._logcat_marker_msg = 'WA logcat marker for wrap detection:{}'
|
||||
self._marker_count = 0
|
||||
self._start_marker = None
|
||||
self._end_marker = None
|
||||
|
||||
def run(self):
|
||||
self.logger.debug('Starting polling')
|
||||
try:
|
||||
self.insert_logcat_marker()
|
||||
while True:
|
||||
if self.stop_signal.is_set():
|
||||
break
|
||||
@ -135,6 +220,7 @@ class LogcatPoller(threading.Thread):
|
||||
current_time = time.time()
|
||||
if (current_time - self.last_poll) >= self.period:
|
||||
self.poll()
|
||||
self.insert_logcat_marker()
|
||||
time.sleep(0.5)
|
||||
except Exception: # pylint: disable=W0703
|
||||
self.exc = WorkerThreadError(self.name, sys.exc_info())
|
||||
@ -170,9 +256,49 @@ class LogcatPoller(threading.Thread):
|
||||
|
||||
def poll(self):
|
||||
self.last_poll = time.time()
|
||||
self.target.dump_logcat(self.buffer_file, append=True, timeout=self.timeout)
|
||||
self.target.dump_logcat(self.buffer_file, append=True, timeout=self.timeout, logcat_format='threadtime')
|
||||
self.target.clear_logcat()
|
||||
|
||||
def insert_logcat_marker(self):
|
||||
self.logger.debug('Inserting logcat marker')
|
||||
with self.lock:
|
||||
self.target.execute(
|
||||
'log -t "{}" "{}"'.format(
|
||||
self._logcat_marker_tag,
|
||||
self._logcat_marker_msg.format(self._marker_count)
|
||||
)
|
||||
)
|
||||
self._marker_count += 1
|
||||
|
||||
def check_logcat_nowrap(self, outfile):
|
||||
parser = LogcatParser()
|
||||
counter = self._start_marker
|
||||
for event in parser.parse(outfile):
|
||||
message = self._logcat_marker_msg.split(':')[0]
|
||||
if not (event.tag == self._logcat_marker_tag
|
||||
and event.message.split(':')[0] == message):
|
||||
continue
|
||||
|
||||
number = int(event.message.split(':')[1])
|
||||
if number > counter:
|
||||
return False
|
||||
elif number == counter:
|
||||
counter += 1
|
||||
|
||||
if counter == self._end_marker:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def start_logcat_wrap_detect(self):
|
||||
with self.lock:
|
||||
self._start_marker = self._marker_count
|
||||
self.insert_logcat_marker()
|
||||
|
||||
def stop_logcat_wrap_detect(self):
|
||||
with self.lock:
|
||||
self._end_marker = self._marker_count
|
||||
|
||||
|
||||
class ChromeOsAssistant(LinuxAssistant):
|
||||
|
||||
|
@ -14,14 +14,13 @@
|
||||
#
|
||||
|
||||
import inspect
|
||||
from collections import OrderedDict
|
||||
from copy import copy
|
||||
|
||||
from devlib import (LinuxTarget, AndroidTarget, LocalLinuxTarget,
|
||||
ChromeOsTarget, Platform, Juno, TC2, Gem5SimulationPlatform,
|
||||
AdbConnection, SshConnection, LocalConnection,
|
||||
Gem5Connection)
|
||||
TelnetConnection, Gem5Connection)
|
||||
from devlib.target import DEFAULT_SHELL_PROMPT
|
||||
from devlib.utils.ssh import DEFAULT_SSH_SUDO_COMMAND
|
||||
|
||||
from wa.framework import pluginloader
|
||||
from wa.framework.configuration.core import get_config_point_map
|
||||
@ -69,11 +68,14 @@ def instantiate_target(tdesc, params, connect=None, extra_platform_params=None):
|
||||
|
||||
for name, value in params.items():
|
||||
if name in target_params:
|
||||
tp[name] = value
|
||||
if not target_params[name].deprecated:
|
||||
tp[name] = value
|
||||
elif name in platform_params:
|
||||
pp[name] = value
|
||||
if not platform_params[name].deprecated:
|
||||
pp[name] = value
|
||||
elif name in conn_params:
|
||||
cp[name] = value
|
||||
if not conn_params[name].deprecated:
|
||||
cp[name] = value
|
||||
elif name in assistant_params:
|
||||
pass
|
||||
else:
|
||||
@ -129,7 +131,8 @@ class TargetDescription(object):
|
||||
config = {}
|
||||
for pattr in param_attrs:
|
||||
for p in getattr(self, pattr):
|
||||
config[p.name] = p.default
|
||||
if not p.deprecated:
|
||||
config[p.name] = p.default
|
||||
return config
|
||||
|
||||
def _set(self, attr, vals):
|
||||
@ -195,6 +198,12 @@ COMMON_TARGET_PARAMS = [
|
||||
description='''
|
||||
A regex that matches the shell prompt on the target.
|
||||
'''),
|
||||
|
||||
Parameter('max_async', kind=int, default=50,
|
||||
description='''
|
||||
The maximum number of concurent asynchronous connections to the
|
||||
target maintained at any time.
|
||||
'''),
|
||||
]
|
||||
|
||||
COMMON_PLATFORM_PARAMS = [
|
||||
@ -262,7 +271,6 @@ VEXPRESS_PLATFORM_PARAMS = [
|
||||
|
||||
``dtr``: toggle the DTR line on the serial connection
|
||||
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
|
||||
|
||||
'''),
|
||||
]
|
||||
|
||||
@ -300,6 +308,48 @@ CONNECTION_PARAMS = {
|
||||
description="""
|
||||
ADB server to connect to.
|
||||
"""),
|
||||
Parameter(
|
||||
'adb_port', kind=int,
|
||||
description="""
|
||||
ADB port to connect to.
|
||||
"""),
|
||||
Parameter(
|
||||
'poll_transfers', kind=bool,
|
||||
default=True,
|
||||
description="""
|
||||
File transfers will be polled for activity. Inactive
|
||||
file transfers are cancelled.
|
||||
"""),
|
||||
Parameter(
|
||||
'start_transfer_poll_delay', kind=int,
|
||||
default=30,
|
||||
description="""
|
||||
How long to wait (s) for a transfer to complete
|
||||
before polling transfer activity. Requires ``poll_transfers``
|
||||
to be set.
|
||||
"""),
|
||||
Parameter(
|
||||
'total_transfer_timeout', kind=int,
|
||||
default=3600,
|
||||
description="""
|
||||
The total time to elapse before a transfer is cancelled, regardless
|
||||
of its activity. Requires ``poll_transfers`` to be set.
|
||||
"""),
|
||||
Parameter(
|
||||
'transfer_poll_period', kind=int,
|
||||
default=30,
|
||||
description="""
|
||||
The period at which transfer activity is sampled. Requires
|
||||
``poll_transfers`` to be set. Too small values may cause
|
||||
the destination size to appear the same over one or more sample
|
||||
periods, causing improper transfer cancellation.
|
||||
"""),
|
||||
Parameter(
|
||||
'adb_as_root', kind=bool,
|
||||
default=False,
|
||||
description="""
|
||||
Specify whether the adb server should be started in root mode.
|
||||
""")
|
||||
],
|
||||
SshConnection: [
|
||||
Parameter(
|
||||
@ -316,6 +366,8 @@ CONNECTION_PARAMS = {
|
||||
'password', kind=str,
|
||||
description="""
|
||||
Password to use.
|
||||
(When connecting to a passwordless machine set to an
|
||||
empty string to prevent attempting ssh key authentication.)
|
||||
"""),
|
||||
Parameter(
|
||||
'keyfile', kind=str,
|
||||
@ -324,14 +376,101 @@ CONNECTION_PARAMS = {
|
||||
"""),
|
||||
Parameter(
|
||||
'port', kind=int,
|
||||
default=22,
|
||||
description="""
|
||||
The port SSH server is listening on on the target.
|
||||
"""),
|
||||
Parameter(
|
||||
'telnet', kind=bool, default=False,
|
||||
'strict_host_check', kind=bool, default=False,
|
||||
description="""
|
||||
If set to ``True``, a Telnet connection, rather than
|
||||
SSH will be used.
|
||||
Specify whether devices should be connected to if
|
||||
their host key does not match the systems known host keys. """),
|
||||
Parameter(
|
||||
'sudo_cmd', kind=str,
|
||||
default=DEFAULT_SSH_SUDO_COMMAND,
|
||||
description="""
|
||||
Sudo command to use. Must have ``{}`` specified
|
||||
somewhere in the string it indicate where the command
|
||||
to be run via sudo is to go.
|
||||
"""),
|
||||
Parameter(
|
||||
'use_scp', kind=bool,
|
||||
default=False,
|
||||
description="""
|
||||
Allow using SCP as method of file transfer instead
|
||||
of the default SFTP.
|
||||
"""),
|
||||
Parameter(
|
||||
'poll_transfers', kind=bool,
|
||||
default=True,
|
||||
description="""
|
||||
File transfers will be polled for activity. Inactive
|
||||
file transfers are cancelled.
|
||||
"""),
|
||||
Parameter(
|
||||
'start_transfer_poll_delay', kind=int,
|
||||
default=30,
|
||||
description="""
|
||||
How long to wait (s) for a transfer to complete
|
||||
before polling transfer activity. Requires ``poll_transfers``
|
||||
to be set.
|
||||
"""),
|
||||
Parameter(
|
||||
'total_transfer_timeout', kind=int,
|
||||
default=3600,
|
||||
description="""
|
||||
The total time to elapse before a transfer is cancelled, regardless
|
||||
of its activity. Requires ``poll_transfers`` to be set.
|
||||
"""),
|
||||
Parameter(
|
||||
'transfer_poll_period', kind=int,
|
||||
default=30,
|
||||
description="""
|
||||
The period at which transfer activity is sampled. Requires
|
||||
``poll_transfers`` to be set. Too small values may cause
|
||||
the destination size to appear the same over one or more sample
|
||||
periods, causing improper transfer cancellation.
|
||||
"""),
|
||||
# Deprecated Parameters
|
||||
Parameter(
|
||||
'telnet', kind=str,
|
||||
description="""
|
||||
Original shell prompt to expect.
|
||||
""",
|
||||
deprecated=True),
|
||||
Parameter(
|
||||
'password_prompt', kind=str,
|
||||
description="""
|
||||
Password prompt to expect
|
||||
""",
|
||||
deprecated=True),
|
||||
Parameter(
|
||||
'original_prompt', kind=str,
|
||||
description="""
|
||||
Original shell prompt to expect.
|
||||
""",
|
||||
deprecated=True),
|
||||
],
|
||||
TelnetConnection: [
|
||||
Parameter(
|
||||
'host', kind=str, mandatory=True,
|
||||
description="""
|
||||
Host name or IP address of the target.
|
||||
"""),
|
||||
Parameter(
|
||||
'username', kind=str, mandatory=True,
|
||||
description="""
|
||||
User name to connect with
|
||||
"""),
|
||||
Parameter(
|
||||
'password', kind=str,
|
||||
description="""
|
||||
Password to use.
|
||||
"""),
|
||||
Parameter(
|
||||
'port', kind=int,
|
||||
description="""
|
||||
The port SSH server is listening on on the target.
|
||||
"""),
|
||||
Parameter(
|
||||
'password_prompt', kind=str,
|
||||
@ -411,16 +550,16 @@ CONNECTION_PARAMS['ChromeOsConnection'] = \
|
||||
CONNECTION_PARAMS[AdbConnection] + CONNECTION_PARAMS[SshConnection]
|
||||
|
||||
|
||||
# name --> ((target_class, conn_class), params_list, defaults)
|
||||
# name --> ((target_class, conn_class, unsupported_platforms), params_list, defaults)
|
||||
TARGETS = {
|
||||
'linux': ((LinuxTarget, SshConnection), COMMON_TARGET_PARAMS, None),
|
||||
'android': ((AndroidTarget, AdbConnection), COMMON_TARGET_PARAMS +
|
||||
'linux': ((LinuxTarget, SshConnection, []), COMMON_TARGET_PARAMS, None),
|
||||
'android': ((AndroidTarget, AdbConnection, []), COMMON_TARGET_PARAMS +
|
||||
[Parameter('package_data_directory', kind=str, default='/data/data',
|
||||
description='''
|
||||
Directory containing Android data
|
||||
'''),
|
||||
], None),
|
||||
'chromeos': ((ChromeOsTarget, 'ChromeOsConnection'), COMMON_TARGET_PARAMS +
|
||||
'chromeos': ((ChromeOsTarget, 'ChromeOsConnection', []), COMMON_TARGET_PARAMS +
|
||||
[Parameter('package_data_directory', kind=str, default='/data/data',
|
||||
description='''
|
||||
Directory containing Android data
|
||||
@ -441,7 +580,8 @@ TARGETS = {
|
||||
the need for privilege elevation.
|
||||
'''),
|
||||
], None),
|
||||
'local': ((LocalLinuxTarget, LocalConnection), COMMON_TARGET_PARAMS, None),
|
||||
'local': ((LocalLinuxTarget, LocalConnection, [Juno, Gem5SimulationPlatform, TC2]),
|
||||
COMMON_TARGET_PARAMS, None),
|
||||
}
|
||||
|
||||
# name --> assistant
|
||||
@ -452,31 +592,87 @@ ASSISTANTS = {
|
||||
'chromeos': ChromeOsAssistant
|
||||
}
|
||||
|
||||
# name --> ((platform_class, conn_class), params_list, defaults, target_defaults)
|
||||
# Platform specific parameter overrides.
|
||||
JUNO_PLATFORM_OVERRIDES = [
|
||||
Parameter('baudrate', kind=int, default=115200,
|
||||
description='''
|
||||
Baud rate for the serial connection.
|
||||
'''),
|
||||
Parameter('vemsd_mount', kind=str, default='/media/JUNO',
|
||||
description='''
|
||||
VExpress MicroSD card mount location. This is a MicroSD card in
|
||||
the VExpress device that is mounted on the host via USB. The card
|
||||
contains configuration files for the platform and firmware and
|
||||
kernel images to be flashed.
|
||||
'''),
|
||||
Parameter('bootloader', kind=str, default='u-boot',
|
||||
allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'],
|
||||
description='''
|
||||
Selects the bootloader mechanism used by the board. Depending on
|
||||
firmware version, a number of possible boot mechanisms may be use.
|
||||
|
||||
Please see ``devlib`` documentation for descriptions.
|
||||
'''),
|
||||
Parameter('hard_reset_method', kind=str, default='dtr',
|
||||
allowed_values=['dtr', 'reboottxt'],
|
||||
description='''
|
||||
There are a couple of ways to reset VersatileExpress board if the
|
||||
software running on the board becomes unresponsive. Both require
|
||||
configuration to be enabled (please see ``devlib`` documentation).
|
||||
|
||||
``dtr``: toggle the DTR line on the serial connection
|
||||
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
|
||||
'''),
|
||||
]
|
||||
TC2_PLATFORM_OVERRIDES = [
|
||||
Parameter('baudrate', kind=int, default=38400,
|
||||
description='''
|
||||
Baud rate for the serial connection.
|
||||
'''),
|
||||
Parameter('vemsd_mount', kind=str, default='/media/VEMSD',
|
||||
description='''
|
||||
VExpress MicroSD card mount location. This is a MicroSD card in
|
||||
the VExpress device that is mounted on the host via USB. The card
|
||||
contains configuration files for the platform and firmware and
|
||||
kernel images to be flashed.
|
||||
'''),
|
||||
Parameter('bootloader', kind=str, default='bootmon',
|
||||
allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'],
|
||||
description='''
|
||||
Selects the bootloader mechanism used by the board. Depending on
|
||||
firmware version, a number of possible boot mechanisms may be use.
|
||||
|
||||
Please see ``devlib`` documentation for descriptions.
|
||||
'''),
|
||||
Parameter('hard_reset_method', kind=str, default='reboottxt',
|
||||
allowed_values=['dtr', 'reboottxt'],
|
||||
description='''
|
||||
There are a couple of ways to reset VersatileExpress board if the
|
||||
software running on the board becomes unresponsive. Both require
|
||||
configuration to be enabled (please see ``devlib`` documentation).
|
||||
|
||||
``dtr``: toggle the DTR line on the serial connection
|
||||
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
|
||||
'''),
|
||||
]
|
||||
|
||||
# name --> ((platform_class, conn_class, conn_overrides), params_list, defaults, target_overrides)
|
||||
# Note: normally, connection is defined by the Target name, but
|
||||
# platforms may choose to override it
|
||||
# Note: the target_defaults allows you to override common target_params for a
|
||||
# Note: the target_overrides allows you to override common target_params for a
|
||||
# particular platform. Parameters you can override are in COMMON_TARGET_PARAMS
|
||||
# Example of overriding one of the target parameters: Replace last None with:
|
||||
# {'shell_prompt': CUSTOM__SHELL_PROMPT}
|
||||
# Example of overriding one of the target parameters: Replace last `None` with
|
||||
# a list of `Parameter` objects to be used instead.
|
||||
PLATFORMS = {
|
||||
'generic': ((Platform, None), COMMON_PLATFORM_PARAMS, None, None),
|
||||
'juno': ((Juno, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
|
||||
{
|
||||
'vemsd_mount': '/media/JUNO',
|
||||
'baudrate': 115200,
|
||||
'bootloader': 'u-boot',
|
||||
'hard_reset_method': 'dtr',
|
||||
},
|
||||
None),
|
||||
'tc2': ((TC2, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
|
||||
{
|
||||
'vemsd_mount': '/media/VEMSD',
|
||||
'baudrate': 38400,
|
||||
'bootloader': 'bootmon',
|
||||
'hard_reset_method': 'reboottxt',
|
||||
}, None),
|
||||
'gem5': ((Gem5SimulationPlatform, Gem5Connection), GEM5_PLATFORM_PARAMS, None, None),
|
||||
'generic': ((Platform, None, None), COMMON_PLATFORM_PARAMS, None, None),
|
||||
'juno': ((Juno, None, [
|
||||
Parameter('host', kind=str, mandatory=False,
|
||||
description="Host name or IP address of the target."),
|
||||
]
|
||||
), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS, JUNO_PLATFORM_OVERRIDES, None),
|
||||
'tc2': ((TC2, None, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
|
||||
TC2_PLATFORM_OVERRIDES, None),
|
||||
'gem5': ((Gem5SimulationPlatform, Gem5Connection, None), GEM5_PLATFORM_PARAMS, None, None),
|
||||
}
|
||||
|
||||
|
||||
@ -496,16 +692,17 @@ class DefaultTargetDescriptor(TargetDescriptor):
|
||||
# pylint: disable=attribute-defined-outside-init,too-many-locals
|
||||
result = []
|
||||
for target_name, target_tuple in TARGETS.items():
|
||||
(target, conn), target_params = self._get_item(target_tuple)
|
||||
(target, conn, unsupported_platforms), target_params = self._get_item(target_tuple)
|
||||
assistant = ASSISTANTS[target_name]
|
||||
conn_params = CONNECTION_PARAMS[conn]
|
||||
for platform_name, platform_tuple in PLATFORMS.items():
|
||||
platform_target_defaults = platform_tuple[-1]
|
||||
platform_tuple = platform_tuple[0:-1]
|
||||
(platform, plat_conn), platform_params = self._get_item(platform_tuple)
|
||||
(platform, plat_conn, conn_defaults), platform_params = self._get_item(platform_tuple)
|
||||
if platform in unsupported_platforms:
|
||||
continue
|
||||
# Add target defaults specified in the Platform tuple
|
||||
target_params = self._apply_param_defaults(target_params,
|
||||
platform_target_defaults)
|
||||
target_params = self._override_params(target_params, platform_target_defaults)
|
||||
name = '{}_{}'.format(platform_name, target_name)
|
||||
td = TargetDescription(name, self)
|
||||
td.target = target
|
||||
@ -517,31 +714,31 @@ class DefaultTargetDescriptor(TargetDescriptor):
|
||||
|
||||
if plat_conn:
|
||||
td.conn = plat_conn
|
||||
td.conn_params = CONNECTION_PARAMS[plat_conn]
|
||||
td.conn_params = self._override_params(CONNECTION_PARAMS[plat_conn],
|
||||
conn_defaults)
|
||||
else:
|
||||
td.conn = conn
|
||||
td.conn_params = conn_params
|
||||
td.conn_params = self._override_params(conn_params, conn_defaults)
|
||||
|
||||
result.append(td)
|
||||
return result
|
||||
|
||||
def _apply_param_defaults(self, params, defaults): # pylint: disable=no-self-use
|
||||
'''Adds parameters in the defaults dict to params list.
|
||||
Return updated params as a list (idempotent function).'''
|
||||
if not defaults:
|
||||
def _override_params(self, params, overrides): # pylint: disable=no-self-use
|
||||
''' Returns a new list of parameters replacing any parameter with the
|
||||
corresponding parameter in overrides'''
|
||||
if not overrides:
|
||||
return params
|
||||
param_map = OrderedDict((p.name, copy(p)) for p in params)
|
||||
for name, value in defaults.items():
|
||||
if name not in param_map:
|
||||
raise ValueError('Unexpected default "{}"'.format(name))
|
||||
param_map[name].default = value
|
||||
# Convert the OrderedDict to a list to return the same type
|
||||
param_map = {p.name: p for p in params}
|
||||
for override in overrides:
|
||||
if override.name in param_map:
|
||||
param_map[override.name] = override
|
||||
# Return the list of overriden parameters
|
||||
return list(param_map.values())
|
||||
|
||||
def _get_item(self, item_tuple):
|
||||
cls, params, defaults = item_tuple
|
||||
updated_params = self._apply_param_defaults(params, defaults)
|
||||
return cls, updated_params
|
||||
cls_tuple, params, defaults = item_tuple
|
||||
updated_params = self._override_params(params, defaults)
|
||||
return cls_tuple, updated_params
|
||||
|
||||
|
||||
_adhoc_target_descriptions = []
|
||||
@ -584,7 +781,7 @@ def _get_target_defaults(target):
|
||||
|
||||
|
||||
def add_description_for_target(target, description=None, **kwargs):
|
||||
(base_name, ((_, base_conn), base_params, _)) = _get_target_defaults(target)
|
||||
(base_name, ((_, base_conn, _), base_params, _)) = _get_target_defaults(target)
|
||||
|
||||
if 'target_params' not in kwargs:
|
||||
kwargs['target_params'] = base_params
|
||||
@ -592,7 +789,7 @@ def add_description_for_target(target, description=None, **kwargs):
|
||||
if 'platform' not in kwargs:
|
||||
kwargs['platform'] = Platform
|
||||
if 'platform_params' not in kwargs:
|
||||
for (plat, conn), params, _, _ in PLATFORMS.values():
|
||||
for (plat, conn, _), params, _, _ in PLATFORMS.values():
|
||||
if plat == kwargs['platform']:
|
||||
kwargs['platform_params'] = params
|
||||
if conn is not None and kwargs['conn'] is None:
|
||||
|
@ -23,6 +23,7 @@ from devlib.utils.android import AndroidProperties
|
||||
from wa.framework.configuration.core import settings
|
||||
from wa.framework.exception import ConfigError
|
||||
from wa.utils.serializer import read_pod, write_pod, Podable
|
||||
from wa.utils.misc import atomic_write_path
|
||||
|
||||
|
||||
def cpuinfo_from_pod(pod):
|
||||
@ -229,16 +230,15 @@ def get_target_info(target):
|
||||
info.is_rooted = target.is_rooted
|
||||
info.kernel_version = target.kernel_version
|
||||
info.kernel_config = target.config
|
||||
info.hostname = target.hostname
|
||||
info.hostid = target.hostid
|
||||
|
||||
try:
|
||||
info.sched_features = target.read_value('/sys/kernel/debug/sched_features').split()
|
||||
except TargetError:
|
||||
# best effort -- debugfs might not be mounted
|
||||
pass
|
||||
|
||||
hostid_string = target.execute('{} hostid'.format(target.busybox)).strip()
|
||||
info.hostid = int(hostid_string, 16)
|
||||
info.hostname = target.execute('{} hostname'.format(target.busybox)).strip()
|
||||
|
||||
for i, name in enumerate(target.cpuinfo.cpu_names):
|
||||
cpu = CpuInfo()
|
||||
cpu.id = i
|
||||
@ -286,11 +286,13 @@ def read_target_info_cache():
|
||||
def write_target_info_cache(cache):
|
||||
if not os.path.exists(settings.cache_directory):
|
||||
os.makedirs(settings.cache_directory)
|
||||
write_pod(cache, settings.target_info_cache_file)
|
||||
with atomic_write_path(settings.target_info_cache_file) as at_path:
|
||||
write_pod(cache, at_path)
|
||||
|
||||
|
||||
def get_target_info_from_cache(system_id):
|
||||
cache = read_target_info_cache()
|
||||
def get_target_info_from_cache(system_id, cache=None):
|
||||
if cache is None:
|
||||
cache = read_target_info_cache()
|
||||
pod = cache.get(system_id, None)
|
||||
|
||||
if not pod:
|
||||
@ -304,8 +306,9 @@ def get_target_info_from_cache(system_id):
|
||||
return TargetInfo.from_pod(pod)
|
||||
|
||||
|
||||
def cache_target_info(target_info, overwrite=False):
|
||||
cache = read_target_info_cache()
|
||||
def cache_target_info(target_info, overwrite=False, cache=None):
|
||||
if cache is None:
|
||||
cache = read_target_info_cache()
|
||||
if target_info.system_id in cache and not overwrite:
|
||||
raise ValueError('TargetInfo for {} is already in cache.'.format(target_info.system_id))
|
||||
cache[target_info.system_id] = target_info.to_pod()
|
||||
|
@ -24,8 +24,10 @@ from wa.framework.plugin import Parameter
|
||||
from wa.framework.target.descriptor import (get_target_description,
|
||||
instantiate_target,
|
||||
instantiate_assistant)
|
||||
from wa.framework.target.info import get_target_info, get_target_info_from_cache, cache_target_info
|
||||
from wa.framework.target.info import (get_target_info, get_target_info_from_cache,
|
||||
cache_target_info, read_target_info_cache)
|
||||
from wa.framework.target.runtime_parameter_manager import RuntimeParameterManager
|
||||
from wa.utils.types import module_name_set
|
||||
|
||||
|
||||
class TargetManager(object):
|
||||
@ -55,6 +57,7 @@ class TargetManager(object):
|
||||
|
||||
def initialize(self):
|
||||
self._init_target()
|
||||
self.assistant.initialize()
|
||||
|
||||
# If target supports hotplugging, online all cpus before perform discovery
|
||||
# and restore original configuration after completed.
|
||||
@ -75,6 +78,8 @@ class TargetManager(object):
|
||||
def finalize(self):
|
||||
if not self.target:
|
||||
return
|
||||
if self.assistant:
|
||||
self.assistant.finalize()
|
||||
if self.disconnect or isinstance(self.target.platform, Gem5SimulationPlatform):
|
||||
self.logger.info('Disconnecting from the device')
|
||||
with signal.wrap('TARGET_DISCONNECT'):
|
||||
@ -91,18 +96,19 @@ class TargetManager(object):
|
||||
|
||||
@memoized
|
||||
def get_target_info(self):
|
||||
info = get_target_info_from_cache(self.target.system_id)
|
||||
cache = read_target_info_cache()
|
||||
info = get_target_info_from_cache(self.target.system_id, cache=cache)
|
||||
|
||||
if info is None:
|
||||
info = get_target_info(self.target)
|
||||
cache_target_info(info)
|
||||
cache_target_info(info, cache=cache)
|
||||
else:
|
||||
# If module configuration has changed form when the target info
|
||||
# was previously cached, it is possible additional info will be
|
||||
# available, so should re-generate the cache.
|
||||
if set(info.modules) != set(self.target.modules):
|
||||
if module_name_set(info.modules) != module_name_set(self.target.modules):
|
||||
info = get_target_info(self.target)
|
||||
cache_target_info(info, overwrite=True)
|
||||
cache_target_info(info, overwrite=True, cache=cache)
|
||||
|
||||
return info
|
||||
|
||||
|
@ -178,7 +178,7 @@ class HotplugRuntimeConfig(RuntimeConfig):
|
||||
raise TargetError('Target does not appear to support hotplug')
|
||||
|
||||
def validate_parameters(self):
|
||||
if len(self.num_cores) == self.target.number_of_cpus:
|
||||
if self.num_cores and len(self.num_cores) == self.target.number_of_cpus:
|
||||
if all(v is False for v in list(self.num_cores.values())):
|
||||
raise ValueError('Cannot set number of all cores to 0')
|
||||
|
||||
@ -694,7 +694,7 @@ class CpufreqRuntimeConfig(RuntimeConfig):
|
||||
else:
|
||||
common_freqs = common_freqs.intersection(self.supported_cpu_freqs.get(cpu) or set())
|
||||
all_freqs = all_freqs.union(self.supported_cpu_freqs.get(cpu) or set())
|
||||
common_gov = common_gov.intersection(self.supported_cpu_governors.get(cpu))
|
||||
common_gov = common_gov.intersection(self.supported_cpu_governors.get(cpu) or set())
|
||||
|
||||
return all_freqs, common_freqs, common_gov
|
||||
|
||||
@ -732,7 +732,7 @@ class IdleStateValue(object):
|
||||
'''Checks passed state and converts to its ID'''
|
||||
value = caseless_string(value)
|
||||
for s_id, s_name, s_desc in self.values:
|
||||
if value == s_id or value == s_name or value == s_desc:
|
||||
if value in (s_id, s_name, s_desc):
|
||||
return s_id
|
||||
msg = 'Invalid IdleState: "{}"; Must be in {}'
|
||||
raise ValueError(msg.format(value, self.values))
|
||||
@ -878,6 +878,11 @@ class AndroidRuntimeConfig(RuntimeConfig):
|
||||
if value is not None:
|
||||
obj.config['screen_on'] = value
|
||||
|
||||
@staticmethod
|
||||
def set_unlock_screen(obj, value):
|
||||
if value is not None:
|
||||
obj.config['unlock_screen'] = value
|
||||
|
||||
def __init__(self, target):
|
||||
self.config = defaultdict(dict)
|
||||
super(AndroidRuntimeConfig, self).__init__(target)
|
||||
@ -930,6 +935,16 @@ class AndroidRuntimeConfig(RuntimeConfig):
|
||||
Specify whether the device screen should be on
|
||||
""")
|
||||
|
||||
param_name = 'unlock_screen'
|
||||
self._runtime_params[param_name] = \
|
||||
RuntimeParameter(
|
||||
param_name, kind=str,
|
||||
default=None,
|
||||
setter=self.set_unlock_screen,
|
||||
description="""
|
||||
Specify how the device screen should be unlocked (e.g., vertical)
|
||||
""")
|
||||
|
||||
def check_target(self):
|
||||
if self.target.os != 'android' and self.target.os != 'chromeos':
|
||||
raise ConfigError('Target does not appear to be running Android')
|
||||
@ -940,6 +955,7 @@ class AndroidRuntimeConfig(RuntimeConfig):
|
||||
pass
|
||||
|
||||
def commit(self):
|
||||
# pylint: disable=too-many-branches
|
||||
if 'airplane_mode' in self.config:
|
||||
new_airplane_mode = self.config['airplane_mode']
|
||||
old_airplane_mode = self.target.get_airplane_mode()
|
||||
@ -964,13 +980,20 @@ class AndroidRuntimeConfig(RuntimeConfig):
|
||||
|
||||
if 'brightness' in self.config:
|
||||
self.target.set_brightness(self.config['brightness'])
|
||||
|
||||
if 'rotation' in self.config:
|
||||
self.target.set_rotation(self.config['rotation'])
|
||||
|
||||
if 'screen_on' in self.config:
|
||||
if self.config['screen_on']:
|
||||
self.target.ensure_screen_is_on()
|
||||
else:
|
||||
self.target.ensure_screen_is_off()
|
||||
|
||||
if self.config.get('unlock_screen'):
|
||||
self.target.ensure_screen_is_on()
|
||||
if self.target.is_screen_locked():
|
||||
self.target.swipe_to_unlock(self.config['unlock_screen'])
|
||||
|
||||
def clear(self):
|
||||
self.config = {}
|
||||
|
@ -22,6 +22,7 @@ from wa.framework.target.runtime_config import (SysfileValuesRuntimeConfig,
|
||||
CpuidleRuntimeConfig,
|
||||
AndroidRuntimeConfig)
|
||||
from wa.utils.types import obj_dict, caseless_string
|
||||
from wa.framework import pluginloader
|
||||
|
||||
|
||||
class RuntimeParameterManager(object):
|
||||
@ -37,9 +38,16 @@ class RuntimeParameterManager(object):
|
||||
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
self.runtime_configs = [cls(self.target) for cls in self.runtime_config_cls]
|
||||
self.runtime_params = {}
|
||||
|
||||
try:
|
||||
for rt_cls in pluginloader.list_plugins(kind='runtime-config'):
|
||||
if rt_cls not in self.runtime_config_cls:
|
||||
self.runtime_config_cls.append(rt_cls)
|
||||
except ValueError:
|
||||
pass
|
||||
self.runtime_configs = [cls(self.target) for cls in self.runtime_config_cls]
|
||||
|
||||
runtime_parameter = namedtuple('RuntimeParameter', 'cfg_point, rt_config')
|
||||
for cfg in self.runtime_configs:
|
||||
for param in cfg.supported_parameters:
|
||||
|
@ -11,8 +11,8 @@ android {
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile fileTree(include: ['*.jar'], dir: 'libs')
|
||||
compile 'com.android.support.test:runner:0.5'
|
||||
compile 'com.android.support.test:rules:0.5'
|
||||
compile 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2'
|
||||
implementation fileTree(include: ['*.jar'], dir: 'libs')
|
||||
implementation 'com.android.support.test:runner:0.5'
|
||||
implementation 'com.android.support.test:rules:0.5'
|
||||
implementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2'
|
||||
}
|
||||
|
@ -573,9 +573,29 @@ public class BaseUiAutomation {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// If an an app is not designed for running on the latest version of android
|
||||
// (currently Q) an additional screen can popup asking to confirm permissions.
|
||||
public void dismissAndroidPermissionPopup() throws Exception {
|
||||
UiObject permissionAccess =
|
||||
mDevice.findObject(new UiSelector().textMatches(
|
||||
".*Choose what to allow .* to access"));
|
||||
UiObject continueButton =
|
||||
mDevice.findObject(new UiSelector().resourceId("com.android.permissioncontroller:id/continue_button")
|
||||
.textContains("Continue"));
|
||||
if (permissionAccess.exists() && continueButton.exists()) {
|
||||
continueButton.click();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// If an an app is not designed for running on the latest version of android
|
||||
// (currently Q) dissmiss the warning popup if present.
|
||||
public void dismissAndroidVersionPopup() throws Exception {
|
||||
|
||||
// Ensure we have dissmied any permission screens before looking for the version popup
|
||||
dismissAndroidPermissionPopup();
|
||||
|
||||
UiObject warningText =
|
||||
mDevice.findObject(new UiSelector().textContains(
|
||||
"This app was built for an older version of Android"));
|
||||
@ -588,6 +608,29 @@ public class BaseUiAutomation {
|
||||
}
|
||||
|
||||
|
||||
// If Chrome is a fresh install then these popups may be presented
|
||||
// dismiss them if visible.
|
||||
public void dismissChromePopup() throws Exception {
|
||||
UiObject accept =
|
||||
mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/terms_accept")
|
||||
.className("android.widget.Button"));
|
||||
if (accept.waitForExists(3000)){
|
||||
accept.click();
|
||||
UiObject negative =
|
||||
mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/negative_button")
|
||||
.className("android.widget.Button"));
|
||||
if (negative.waitForExists(10000)) {
|
||||
negative.click();
|
||||
}
|
||||
}
|
||||
UiObject lite =
|
||||
mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/button_secondary")
|
||||
.className("android.widget.Button"));
|
||||
if (lite.exists()){
|
||||
lite.click();
|
||||
}
|
||||
}
|
||||
|
||||
// Override getParams function to decode a url encoded parameter bundle before
|
||||
// passing it to workloads.
|
||||
public Bundle getParams() {
|
||||
|
@ -3,9 +3,10 @@
|
||||
buildscript {
|
||||
repositories {
|
||||
jcenter()
|
||||
google()
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:2.3.2'
|
||||
classpath 'com.android.tools.build:gradle:7.2.1'
|
||||
|
||||
|
||||
// NOTE: Do not place your application dependencies here; they belong
|
||||
@ -16,6 +17,7 @@ buildscript {
|
||||
allprojects {
|
||||
repositories {
|
||||
jcenter()
|
||||
google()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-3.3-all.zip
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip
|
||||
|
Binary file not shown.
@ -21,9 +21,9 @@ from subprocess import Popen, PIPE
|
||||
|
||||
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev'])
|
||||
|
||||
version = VersionTuple(3, 2, 0, '')
|
||||
version = VersionTuple(3, 4, 0, 'dev1')
|
||||
|
||||
required_devlib_version = VersionTuple(1, 2, 0, '')
|
||||
required_devlib_version = VersionTuple(1, 4, 0, 'dev3')
|
||||
|
||||
|
||||
def format_version(v):
|
||||
@ -48,13 +48,13 @@ def get_wa_version_with_commit():
|
||||
|
||||
|
||||
def get_commit():
|
||||
p = Popen(['git', 'rev-parse', 'HEAD'],
|
||||
cwd=os.path.dirname(__file__), stdout=PIPE, stderr=PIPE)
|
||||
try:
|
||||
p = Popen(['git', 'rev-parse', 'HEAD'],
|
||||
cwd=os.path.dirname(__file__), stdout=PIPE, stderr=PIPE)
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
std, _ = p.communicate()
|
||||
p.wait()
|
||||
if p.returncode:
|
||||
return None
|
||||
if sys.version_info[0] == 3 and isinstance(std, bytes):
|
||||
return std[:8].decode(sys.stdout.encoding or 'utf-8')
|
||||
else:
|
||||
return std[:8]
|
||||
return std[:8].decode(sys.stdout.encoding or 'utf-8')
|
||||
|
@ -22,8 +22,8 @@ try:
|
||||
except ImportError:
|
||||
from pipes import quote
|
||||
|
||||
from devlib.utils.android import ApkInfo
|
||||
|
||||
from wa.utils.android import get_cacheable_apk_info, build_apk_launch_command
|
||||
from wa.framework.plugin import TargetedPlugin, Parameter
|
||||
from wa.framework.resource import (ApkFile, ReventFile,
|
||||
File, loose_version_matching,
|
||||
@ -32,6 +32,7 @@ from wa.framework.exception import WorkloadError, ConfigError
|
||||
from wa.utils.types import ParameterDict, list_or_string, version_tuple
|
||||
from wa.utils.revent import ReventRecorder
|
||||
from wa.utils.exec_control import once_per_instance
|
||||
from wa.utils.misc import atomic_write_path
|
||||
|
||||
|
||||
class Workload(TargetedPlugin):
|
||||
@ -44,14 +45,6 @@ class Workload(TargetedPlugin):
|
||||
kind = 'workload'
|
||||
|
||||
parameters = [
|
||||
Parameter('cleanup_assets', kind=bool,
|
||||
global_alias='cleanup_assets',
|
||||
aliases=['clean_up'],
|
||||
default=True,
|
||||
description="""
|
||||
If ``True``, assets that are deployed or created as part of the
|
||||
workload will be removed again from the device.
|
||||
"""),
|
||||
Parameter('uninstall', kind=bool,
|
||||
default=True,
|
||||
description="""
|
||||
@ -131,13 +124,11 @@ class Workload(TargetedPlugin):
|
||||
Execute the workload. This is the method that performs the actual
|
||||
"work" of the workload.
|
||||
"""
|
||||
pass
|
||||
|
||||
def extract_results(self, context):
|
||||
"""
|
||||
Extract results on the target
|
||||
"""
|
||||
pass
|
||||
|
||||
def update_output(self, context):
|
||||
"""
|
||||
@ -145,11 +136,9 @@ class Workload(TargetedPlugin):
|
||||
metrics and artifacts for this workload iteration.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def teardown(self, context):
|
||||
""" Perform any final clean up for the Workload. """
|
||||
pass
|
||||
|
||||
@once_per_instance
|
||||
def finalize(self, context):
|
||||
@ -191,6 +180,7 @@ class ApkWorkload(Workload):
|
||||
activity = None
|
||||
view = None
|
||||
clear_data_on_reset = True
|
||||
apk_arguments = {}
|
||||
|
||||
# Set this to True to mark that this workload requires the target apk to be run
|
||||
# for initialisation purposes before the main run is performed.
|
||||
@ -300,7 +290,8 @@ class ApkWorkload(Workload):
|
||||
clear_data_on_reset=self.clear_data_on_reset,
|
||||
activity=self.activity,
|
||||
min_version=self.min_version,
|
||||
max_version=self.max_version)
|
||||
max_version=self.max_version,
|
||||
apk_arguments=self.apk_arguments)
|
||||
|
||||
def validate(self):
|
||||
if self.min_version and self.max_version:
|
||||
@ -332,7 +323,6 @@ class ApkWorkload(Workload):
|
||||
Perform the setup necessary to rerun the workload. Only called if
|
||||
``requires_rerun`` is set.
|
||||
"""
|
||||
pass
|
||||
|
||||
def teardown(self, context):
|
||||
super(ApkWorkload, self).teardown(context)
|
||||
@ -530,7 +520,7 @@ class UiAutomatorGUI(object):
|
||||
def init_resources(self, resolver):
|
||||
self.uiauto_file = resolver.get(ApkFile(self.owner, uiauto=True))
|
||||
if not self.uiauto_package:
|
||||
uiauto_info = ApkInfo(self.uiauto_file)
|
||||
uiauto_info = get_cacheable_apk_info(self.uiauto_file)
|
||||
self.uiauto_package = uiauto_info.package
|
||||
|
||||
def init_commands(self):
|
||||
@ -698,7 +688,7 @@ class PackageHandler(object):
|
||||
def __init__(self, owner, install_timeout=300, version=None, variant=None,
|
||||
package_name=None, strict=False, force_install=False, uninstall=False,
|
||||
exact_abi=False, prefer_host_package=True, clear_data_on_reset=True,
|
||||
activity=None, min_version=None, max_version=None):
|
||||
activity=None, min_version=None, max_version=None, apk_arguments=None):
|
||||
self.logger = logging.getLogger('apk')
|
||||
self.owner = owner
|
||||
self.target = self.owner.target
|
||||
@ -721,6 +711,7 @@ class PackageHandler(object):
|
||||
self.apk_version = None
|
||||
self.logcat_log = None
|
||||
self.error_msg = None
|
||||
self.apk_arguments = apk_arguments
|
||||
|
||||
def initialize(self, context):
|
||||
self.resolve_package(context)
|
||||
@ -749,7 +740,7 @@ class PackageHandler(object):
|
||||
self.resolve_package_from_host(context)
|
||||
|
||||
if self.apk_file:
|
||||
self.apk_info = ApkInfo(self.apk_file)
|
||||
self.apk_info = get_cacheable_apk_info(self.apk_file)
|
||||
else:
|
||||
if self.error_msg:
|
||||
raise WorkloadError(self.error_msg)
|
||||
@ -865,11 +856,10 @@ class PackageHandler(object):
|
||||
self.apk_version = host_version
|
||||
|
||||
def start_activity(self):
|
||||
if not self.activity:
|
||||
cmd = 'am start -W {}'.format(self.apk_info.package)
|
||||
else:
|
||||
cmd = 'am start -W -n {}/{}'.format(self.apk_info.package,
|
||||
self.activity)
|
||||
|
||||
cmd = build_apk_launch_command(self.apk_info.package, self.activity,
|
||||
self.apk_arguments)
|
||||
|
||||
output = self.target.execute(cmd)
|
||||
if 'Error:' in output:
|
||||
# this will dismiss any error dialogs
|
||||
@ -904,16 +894,21 @@ class PackageHandler(object):
|
||||
message = 'Cannot retrieve "{}" as not installed on Target'
|
||||
raise WorkloadError(message.format(package))
|
||||
package_info = self.target.get_package_info(package)
|
||||
self.target.pull(package_info.apk_path, self.owner.dependencies_directory,
|
||||
timeout=self.install_timeout)
|
||||
apk_name = self.target.path.basename(package_info.apk_path)
|
||||
return os.path.join(self.owner.dependencies_directory, apk_name)
|
||||
apk_name = self._get_package_name(package_info.apk_path)
|
||||
host_path = os.path.join(self.owner.dependencies_directory, apk_name)
|
||||
with atomic_write_path(host_path) as at_path:
|
||||
self.target.pull(package_info.apk_path, at_path,
|
||||
timeout=self.install_timeout)
|
||||
return host_path
|
||||
|
||||
def teardown(self):
|
||||
self.target.execute('am force-stop {}'.format(self.apk_info.package))
|
||||
if self.uninstall:
|
||||
self.target.uninstall_package(self.apk_info.package)
|
||||
|
||||
def _get_package_name(self, apk_path):
|
||||
return self.target.path.basename(apk_path)
|
||||
|
||||
def _get_package_error_msg(self, location):
|
||||
if self.version:
|
||||
msg = 'Multiple matches for "{version}" found on {location}.'
|
||||
@ -950,7 +945,7 @@ class TestPackageHandler(PackageHandler):
|
||||
def setup(self, context):
|
||||
self.initialize_package(context)
|
||||
|
||||
words = ['am', 'instrument']
|
||||
words = ['am', 'instrument', '--user', '0']
|
||||
if self.raw:
|
||||
words.append('-r')
|
||||
if self.wait:
|
||||
@ -981,6 +976,9 @@ class TestPackageHandler(PackageHandler):
|
||||
self._instrument_output = self.target.execute(self.cmd)
|
||||
self.logger.debug(self._instrument_output)
|
||||
|
||||
def _get_package_name(self, apk_path):
|
||||
return 'test_{}'.format(self.target.path.basename(apk_path))
|
||||
|
||||
@property
|
||||
def instrument_output(self):
|
||||
if self.instrument_thread.is_alive():
|
||||
|
@ -201,16 +201,16 @@ class DelayInstrument(Instrument):
|
||||
reading = self.target.read_int(self.temperature_file)
|
||||
|
||||
def validate(self):
|
||||
if (self.temperature_between_specs is not None and
|
||||
self.fixed_between_specs is not None):
|
||||
if (self.temperature_between_specs is not None
|
||||
and self.fixed_between_specs is not None):
|
||||
raise ConfigError('Both fixed delay and thermal threshold specified for specs.')
|
||||
|
||||
if (self.temperature_between_jobs is not None and
|
||||
self.fixed_between_jobs is not None):
|
||||
if (self.temperature_between_jobs is not None
|
||||
and self.fixed_between_jobs is not None):
|
||||
raise ConfigError('Both fixed delay and thermal threshold specified for jobs.')
|
||||
|
||||
if (self.temperature_before_start is not None and
|
||||
self.fixed_before_start is not None):
|
||||
if (self.temperature_before_start is not None
|
||||
and self.fixed_before_start is not None):
|
||||
raise ConfigError('Both fixed delay and thermal threshold specified before start.')
|
||||
|
||||
if not any([self.temperature_between_specs, self.fixed_between_specs,
|
||||
|
@ -32,16 +32,16 @@ import tarfile
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
from devlib.exception import TargetError
|
||||
from devlib.utils.android import ApkInfo
|
||||
|
||||
from wa import Instrument, Parameter, very_fast
|
||||
from wa.framework.exception import ConfigError
|
||||
from wa.framework.instrument import slow
|
||||
from wa.utils.diff import diff_sysfs_dirs, diff_interrupt_files
|
||||
from wa.utils.misc import as_relative
|
||||
from wa.utils.misc import as_relative, safe_extract
|
||||
from wa.utils.misc import ensure_file_directory_exists as _f
|
||||
from wa.utils.misc import ensure_directory_exists as _d
|
||||
from wa.utils.types import list_of_strings
|
||||
from wa.utils.android import get_cacheable_apk_info
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -162,16 +162,16 @@ class SysfsExtractor(Instrument):
|
||||
self.target.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True)
|
||||
self.target.pull(on_device_tarball, on_host_tarball)
|
||||
with tarfile.open(on_host_tarball, 'r:gz') as tf:
|
||||
tf.extractall(context.output_directory)
|
||||
safe_extract(tf, context.output_directory)
|
||||
self.target.remove(on_device_tarball)
|
||||
os.remove(on_host_tarball)
|
||||
|
||||
for paths in self.device_and_host_paths:
|
||||
after_dir = paths[self.AFTER_PATH]
|
||||
dev_dir = paths[self.DEVICE_PATH].strip('*') # remove potential trailing '*'
|
||||
if (not os.listdir(after_dir) and
|
||||
self.target.file_exists(dev_dir) and
|
||||
self.target.list_directory(dev_dir)):
|
||||
if (not os.listdir(after_dir)
|
||||
and self.target.file_exists(dev_dir)
|
||||
and self.target.list_directory(dev_dir)):
|
||||
self.logger.error('sysfs files were not pulled from the device.')
|
||||
self.device_and_host_paths.remove(paths) # Path is removed to skip diffing it
|
||||
for dev_dir, before_dir, after_dir, diff_dir in self.device_and_host_paths:
|
||||
@ -244,7 +244,7 @@ class ApkVersion(Instrument):
|
||||
|
||||
def setup(self, context):
|
||||
if hasattr(context.workload, 'apk_file'):
|
||||
self.apk_info = ApkInfo(context.workload.apk_file)
|
||||
self.apk_info = get_cacheable_apk_info(context.workload.apk_file)
|
||||
else:
|
||||
self.apk_info = None
|
||||
|
||||
|
@ -21,7 +21,7 @@ import re
|
||||
|
||||
from devlib.collector.perf import PerfCollector
|
||||
|
||||
from wa import Instrument, Parameter
|
||||
from wa import Instrument, Parameter, ConfigError
|
||||
from wa.utils.types import list_or_string, list_of_strs, numeric
|
||||
|
||||
PERF_COUNT_REGEX = re.compile(r'^(CPU\d+)?\s*(\d+)\s*(.*?)\s*(\[\s*\d+\.\d+%\s*\])?\s*$')
|
||||
@ -31,7 +31,7 @@ class PerfInstrument(Instrument):
|
||||
|
||||
name = 'perf'
|
||||
description = """
|
||||
Perf is a Linux profiling with performance counters.
|
||||
Perf is a Linux profiling tool with performance counters.
|
||||
Simpleperf is an Android profiling tool with performance counters.
|
||||
|
||||
It is highly recomended to use perf_type = simpleperf when using this instrument
|
||||
@ -95,6 +95,11 @@ class PerfInstrument(Instrument):
|
||||
description="""Specifies options to be used to gather report when record command
|
||||
is used. It's highly recommended to use perf_type simpleperf when running on
|
||||
android devices as reporting options are unstable with perf"""),
|
||||
Parameter('run_report_sample', kind=bool, default=False, description="""If true, run
|
||||
'perf/simpleperf report-sample'. It only works with the record command."""),
|
||||
Parameter('report_sample_options', kind=str, default=None,
|
||||
description="""Specifies options to pass to report-samples when run_report_sample
|
||||
is true."""),
|
||||
Parameter('labels', kind=list_of_strs, default=None,
|
||||
global_alias='perf_labels',
|
||||
description="""Provides labels for perf/simpleperf output for each optionstring.
|
||||
@ -104,6 +109,10 @@ class PerfInstrument(Instrument):
|
||||
description="""
|
||||
always install perf binary even if perf is already present on the device.
|
||||
"""),
|
||||
Parameter('validate_pmu_events', kind=bool, default=True,
|
||||
description="""
|
||||
Query the hardware capabilities to verify the specified PMU events.
|
||||
"""),
|
||||
]
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
@ -111,15 +120,29 @@ class PerfInstrument(Instrument):
|
||||
self.collector = None
|
||||
self.outdir = None
|
||||
|
||||
def validate(self):
|
||||
if self.report_option_string and (self.command != "record"):
|
||||
raise ConfigError("report_option_string only works with perf/simpleperf record. Set command to record or remove report_option_string")
|
||||
if self.report_sample_options and (self.command != "record"):
|
||||
raise ConfigError("report_sample_options only works with perf/simpleperf record. Set command to record or remove report_sample_options")
|
||||
if self.run_report_sample and (self.command != "record"):
|
||||
raise ConfigError("run_report_sample only works with perf/simpleperf record. Set command to record or remove run_report_sample")
|
||||
|
||||
def initialize(self, context):
|
||||
if self.report_sample_options:
|
||||
self.run_report_sample = True
|
||||
|
||||
self.collector = PerfCollector(self.target,
|
||||
self.perf_type,
|
||||
self.command,
|
||||
self.events,
|
||||
self.optionstring,
|
||||
self.report_option_string,
|
||||
self.run_report_sample,
|
||||
self.report_sample_options,
|
||||
self.labels,
|
||||
self.force_install)
|
||||
self.force_install,
|
||||
self.validate_pmu_events)
|
||||
|
||||
def setup(self, context):
|
||||
self.outdir = os.path.join(context.output_directory, self.perf_type)
|
||||
@ -240,8 +263,10 @@ class PerfInstrument(Instrument):
|
||||
readCSV = csv.reader(csv_file, delimiter=',')
|
||||
line_num = 0
|
||||
for row in readCSV:
|
||||
if line_num > 0 and 'Total test time' not in row:
|
||||
classifiers = {'scaled from(%)': row[len(row) - 2].replace('(', '').replace(')', '').replace('%', '')}
|
||||
if 'Performance counter statistics' not in row and 'Total test time' not in row:
|
||||
classifiers = {}
|
||||
if '%' in row:
|
||||
classifiers['scaled from(%)'] = row[len(row) - 2].replace('(', '').replace(')', '').replace('%', '')
|
||||
context.add_metric('{}_{}'.format(label, row[1]), row[0], 'count', classifiers=classifiers)
|
||||
line_num += 1
|
||||
|
||||
@ -249,15 +274,21 @@ class PerfInstrument(Instrument):
|
||||
def _process_simpleperf_stat_from_raw(stat_file, context, label):
|
||||
with open(stat_file) as fh:
|
||||
for line in fh:
|
||||
if '#' in line:
|
||||
if '#' in line and not line.startswith('#'):
|
||||
units = 'count'
|
||||
if "(ms)" in line:
|
||||
line = line.replace("(ms)", "")
|
||||
units = 'ms'
|
||||
tmp_line = line.split('#')[0]
|
||||
tmp_line = line.strip()
|
||||
count, metric = tmp_line.split(' ')[0], tmp_line.split(' ')[2]
|
||||
count = int(count.replace(',', ''))
|
||||
scaled_percentage = line.split('(')[1].strip().replace(')', '').replace('%', '')
|
||||
scaled_percentage = int(scaled_percentage)
|
||||
count = float(count) if "." in count else int(count.replace(',', ''))
|
||||
classifiers = {}
|
||||
if '%' in line:
|
||||
scaled_percentage = line.split('(')[1].strip().replace(')', '').replace('%', '')
|
||||
classifiers['scaled from(%)'] = int(scaled_percentage)
|
||||
metric = '{}_{}'.format(label, metric)
|
||||
context.add_metric(metric, count, 'count', classifiers={'scaled from(%)': scaled_percentage})
|
||||
context.add_metric(metric, count, units, classifiers=classifiers)
|
||||
|
||||
def _process_simpleperf_record_output(self, context):
|
||||
for host_file in os.listdir(self.outdir):
|
||||
|
101
wa/instruments/perfetto.py
Normal file
101
wa/instruments/perfetto.py
Normal file
@ -0,0 +1,101 @@
|
||||
# Copyright 2023 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from devlib import PerfettoCollector
|
||||
|
||||
from wa import Instrument, Parameter
|
||||
from wa.framework.instrument import very_slow, is_installed
|
||||
from wa.framework.exception import InstrumentError
|
||||
|
||||
OUTPUT_PERFETTO_TRACE = 'devlib-trace.perfetto-trace'
|
||||
PERFETTO_CONFIG_FILE = 'config.pbtx'
|
||||
|
||||
|
||||
class PerfettoInstrument(Instrument):
|
||||
name = 'perfetto'
|
||||
description = """
|
||||
perfetto is an instrument that interacts with Google's Perfetto tracing
|
||||
infrastructure.
|
||||
|
||||
From Perfetto's website:
|
||||
Perfetto is a production-grade open-source stack for performance instrumentation and trace analysis.
|
||||
It offers services and libraries for recording system-level and app-level traces, native + java heap profiling,
|
||||
a library for analyzing traces using SQL and a web-based UI to visualize and explore multi-GB traces.
|
||||
|
||||
The instrument either requires Perfetto to be present on the target device or the standalone tracebox binary
|
||||
to be built from source and included in devlib's Package Bin directory.
|
||||
For more information, consult the PerfettoCollector documentation in devlib.
|
||||
|
||||
More information can be found on https://perfetto.dev/
|
||||
"""
|
||||
|
||||
parameters = [
|
||||
Parameter('config', kind=str, mandatory=True,
|
||||
description="""
|
||||
Path to the Perfetto trace config file.
|
||||
|
||||
All the Perfetto-specific tracing configuration should be done inside
|
||||
that file. This config option should just take a full
|
||||
filesystem path to where the config can be found.
|
||||
"""),
|
||||
Parameter('force_tracebox', kind=bool, default=False,
|
||||
description="""
|
||||
Install tracebox even if traced is already running on the target device.
|
||||
If set to true, the tracebox binary needs to be placed in devlib's Package Bin directory.
|
||||
""")
|
||||
]
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
super(PerfettoInstrument, self).__init__(target, **kwargs)
|
||||
self.collector = None
|
||||
|
||||
def initialize(self, context): # pylint: disable=unused-argument
|
||||
self.target_config = self.target.path.join(self.target.working_directory, PERFETTO_CONFIG_FILE)
|
||||
# push the config file to target
|
||||
self.target.push(self.config, self.target_config)
|
||||
collector_params = dict(
|
||||
config=self.target_config,
|
||||
force_tracebox=self.force_tracebox
|
||||
)
|
||||
self.collector = PerfettoCollector(self.target, **collector_params)
|
||||
|
||||
@very_slow
|
||||
def start(self, context): # pylint: disable=unused-argument
|
||||
self.collector.start()
|
||||
|
||||
@very_slow
|
||||
def stop(self, context): # pylint: disable=unused-argument
|
||||
self.collector.stop()
|
||||
|
||||
def update_output(self, context):
|
||||
self.logger.info('Extracting Perfetto trace from target...')
|
||||
outfile = os.path.join(context.output_directory, OUTPUT_PERFETTO_TRACE)
|
||||
self.collector.set_output(outfile)
|
||||
self.collector.get_data()
|
||||
context.add_artifact('perfetto-bin', outfile, 'data')
|
||||
|
||||
def teardown(self, context): # pylint: disable=unused-argument
|
||||
self.target.remove(self.collector.target_output_file)
|
||||
|
||||
def finalize(self, context): # pylint: disable=unused-argument
|
||||
self.target.remove(self.target_config)
|
||||
|
||||
def validate(self):
|
||||
if is_installed('trace-cmd'):
|
||||
raise InstrumentError('perfetto cannot be used at the same time as trace-cmd')
|
||||
if not os.path.isfile(self.config):
|
||||
raise InstrumentError('perfetto config file not found at "{}"'.format(self.config))
|
@ -59,6 +59,12 @@ class FilePoller(Instrument):
|
||||
Whether or not the poller will be run as root. This should be
|
||||
used when the file you need to poll can only be accessed by root.
|
||||
"""),
|
||||
Parameter('reopen', kind=bool, default=False,
|
||||
description="""
|
||||
When enabled files will be re-opened with each read. This is
|
||||
useful for some sysfs/debugfs entries that only generate a
|
||||
value when opened.
|
||||
"""),
|
||||
]
|
||||
|
||||
def validate(self):
|
||||
@ -91,13 +97,17 @@ class FilePoller(Instrument):
|
||||
if self.align_with_ftrace:
|
||||
marker_option = '-m'
|
||||
signal.connect(self._adjust_timestamps, signal.AFTER_JOB_OUTPUT_PROCESSED)
|
||||
self.command = '{} -t {} {} -l {} {} > {} 2>{}'.format(target_poller,
|
||||
self.sample_interval * 1000,
|
||||
marker_option,
|
||||
','.join(self.labels),
|
||||
' '.join(self.files),
|
||||
self.target_output_path,
|
||||
self.target_log_path)
|
||||
reopen_option = ''
|
||||
if self.reopen:
|
||||
reopen_option = '-r'
|
||||
self.command = '{} {} -t {} {} -l {} {} > {} 2>{}'.format(target_poller,
|
||||
reopen_option,
|
||||
self.sample_interval * 1000,
|
||||
marker_option,
|
||||
','.join(self.labels),
|
||||
' '.join(self.files),
|
||||
self.target_output_path,
|
||||
self.target_log_path)
|
||||
|
||||
def start(self, context):
|
||||
self.target.kick_off(self.command, as_root=self.as_root)
|
||||
|
Binary file not shown.
Binary file not shown.
@ -77,9 +77,10 @@ int main(int argc, char ** argv) {
|
||||
char *labels;
|
||||
int labelCount = 0;
|
||||
int should_write_marker = 0;
|
||||
int reopen_files = 0;
|
||||
int ret;
|
||||
|
||||
static char usage[] = "usage: %s [-h] [-m] [-t INTERVAL] FILE [FILE ...]\n"
|
||||
static char usage[] = "usage: %s [-h] [-m] [-r] [-t INTERVAL] FILE [FILE ...]\n"
|
||||
"polls FILE(s) every INTERVAL microseconds and outputs\n"
|
||||
"the results in CSV format including a timestamp to STDOUT\n"
|
||||
"\n"
|
||||
@ -87,6 +88,7 @@ int main(int argc, char ** argv) {
|
||||
" -m Insert a marker into ftrace at the time of the first\n"
|
||||
" sample. This marker may be used to align the timestamps\n"
|
||||
" produced by the poller with those of ftrace events.\n"
|
||||
" -r Reopen files on each read (needed for some sysfs/debugfs files)\n"
|
||||
" -t The polling sample interval in microseconds\n"
|
||||
" Defaults to 1000000 (1 second)\n"
|
||||
" -l Comma separated list of labels to use in the CSV\n"
|
||||
@ -94,7 +96,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
|
||||
//Handling command line arguments
|
||||
while ((c = getopt(argc, argv, "hmt:l:")) != -1)
|
||||
while ((c = getopt(argc, argv, "hmrt:l:")) != -1)
|
||||
{
|
||||
switch(c) {
|
||||
case 'h':
|
||||
@ -104,7 +106,10 @@ int main(int argc, char ** argv) {
|
||||
break;
|
||||
case 'm':
|
||||
should_write_marker = 1;
|
||||
break;
|
||||
break;
|
||||
case 'r':
|
||||
reopen_files = 1;
|
||||
break;
|
||||
case 't':
|
||||
interval = (useconds_t)atoi(optarg);
|
||||
break;
|
||||
@ -184,7 +189,20 @@ int main(int argc, char ** argv) {
|
||||
time_float += ((double)current_time.tv_nsec)/1000/1000/1000;
|
||||
printf("%f", time_float);
|
||||
for (i = 0; i < num_files; i++) {
|
||||
lseek(files_to_poll[i].fd, 0, SEEK_SET);
|
||||
if (reopen_files) {
|
||||
// Close and reopen the file to get fresh data
|
||||
close(files_to_poll[i].fd);
|
||||
files_to_poll[i].fd = open(files_to_poll[i].path, O_RDONLY);
|
||||
if (files_to_poll[i].fd == -1) {
|
||||
fprintf(stderr, "WARNING: Could not reopen \"%s\", got: %s\n",
|
||||
files_to_poll[i].path, strerror(errno));
|
||||
printf(",");
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
lseek(files_to_poll[i].fd, 0, SEEK_SET);
|
||||
}
|
||||
|
||||
bytes_read = read(files_to_poll[i].fd, buf, 1024);
|
||||
|
||||
if (bytes_read < 0) {
|
||||
|
94
wa/instruments/proc_stat/__init__.py
Normal file
94
wa/instruments/proc_stat/__init__.py
Normal file
@ -0,0 +1,94 @@
|
||||
# Copyright 2020 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from wa import Instrument, Parameter, File, InstrumentError
|
||||
|
||||
|
||||
class ProcStatCollector(Instrument):
|
||||
|
||||
name = 'proc_stat'
|
||||
description = '''
|
||||
Collect CPU load information from /proc/stat.
|
||||
'''
|
||||
|
||||
parameters = [
|
||||
Parameter('period', int, default=5,
|
||||
constraint=lambda x: x > 0,
|
||||
description='''
|
||||
Time (in seconds) between collections.
|
||||
'''),
|
||||
]
|
||||
|
||||
def initialize(self, context): # pylint: disable=unused-argument
|
||||
self.host_script = context.get_resource(File(self, 'gather-load.sh'))
|
||||
self.target_script = self.target.install(self.host_script)
|
||||
self.target_output = self.target.get_workpath('proc-stat-raw.csv')
|
||||
self.stop_file = self.target.get_workpath('proc-stat-stop.signal')
|
||||
|
||||
def setup(self, context): # pylint: disable=unused-argument
|
||||
self.command = '{} sh {} {} {} {} {}'.format(
|
||||
self.target.busybox,
|
||||
self.target_script,
|
||||
self.target.busybox,
|
||||
self.target_output,
|
||||
self.period,
|
||||
self.stop_file,
|
||||
)
|
||||
self.target.remove(self.target_output)
|
||||
self.target.remove(self.stop_file)
|
||||
|
||||
def start(self, context): # pylint: disable=unused-argument
|
||||
self.target.kick_off(self.command)
|
||||
|
||||
def stop(self, context): # pylint: disable=unused-argument
|
||||
self.target.execute('{} touch {}'.format(self.target.busybox, self.stop_file))
|
||||
|
||||
def update_output(self, context):
|
||||
self.logger.debug('Waiting for collector script to terminate...')
|
||||
self._wait_for_script()
|
||||
self.logger.debug('Waiting for collector script to terminate...')
|
||||
host_output = os.path.join(context.output_directory, 'proc-stat-raw.csv')
|
||||
self.target.pull(self.target_output, host_output)
|
||||
context.add_artifact('proc-stat-raw', host_output, kind='raw')
|
||||
|
||||
df = pd.read_csv(host_output)
|
||||
no_ts = df[df.columns[1:]]
|
||||
deltas = (no_ts - no_ts.shift())
|
||||
total = deltas.sum(axis=1)
|
||||
util = (total - deltas.idle) / total * 100
|
||||
out_df = pd.concat([df.timestamp, util], axis=1).dropna()
|
||||
out_df.columns = ['timestamp', 'cpu_util']
|
||||
|
||||
util_file = os.path.join(context.output_directory, 'proc-stat.csv')
|
||||
out_df.to_csv(util_file, index=False)
|
||||
context.add_artifact('proc-stat', util_file, kind='data')
|
||||
|
||||
def finalize(self, context): # pylint: disable=unused-argument
|
||||
if self.cleanup_assets and getattr(self, 'target_output'):
|
||||
self.target.remove(self.target_output)
|
||||
self.target.remove(self.target_script)
|
||||
|
||||
def _wait_for_script(self):
|
||||
start_time = datetime.utcnow()
|
||||
timeout = timedelta(seconds=300)
|
||||
while self.target.file_exists(self.stop_file):
|
||||
delta = datetime.utcnow() - start_time
|
||||
if delta > timeout:
|
||||
raise InstrumentError('Timed out wating for /proc/stat collector to terminate..')
|
23
wa/instruments/proc_stat/gather-load.sh
Executable file
23
wa/instruments/proc_stat/gather-load.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/sh
|
||||
BUSYBOX=$1
|
||||
OUTFILE=$2
|
||||
PERIOD=$3
|
||||
STOP_SIGNAL_FILE=$4
|
||||
|
||||
if [ "$#" != "4" ]; then
|
||||
echo "USAGE: gather-load.sh BUSYBOX OUTFILE PERIOD STOP_SIGNAL_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "timestamp,user,nice,system,idle,iowait,irq,softirq,steal,guest,guest_nice" > $OUTFILE
|
||||
while true; do
|
||||
echo -n $(${BUSYBOX} date -Iseconds) >> $OUTFILE
|
||||
${BUSYBOX} cat /proc/stat | ${BUSYBOX} head -n 1 | \
|
||||
${BUSYBOX} cut -d ' ' -f 2- | ${BUSYBOX} sed 's/ /,/g' >> $OUTFILE
|
||||
if [ -f $STOP_SIGNAL_FILE ]; then
|
||||
rm $STOP_SIGNAL_FILE
|
||||
break
|
||||
else
|
||||
sleep $PERIOD
|
||||
fi
|
||||
done
|
@ -62,7 +62,7 @@ class SerialMon(Instrument):
|
||||
if self._collector.collecting:
|
||||
self._collector.stop()
|
||||
data = self._collector.get_data()
|
||||
for l in data:
|
||||
for l in data: # noqa: E741
|
||||
context.add_artifact("{}_serial_log".format(identifier),
|
||||
l.path, kind="log")
|
||||
|
||||
|
@ -22,7 +22,7 @@ from devlib import FtraceCollector
|
||||
|
||||
from wa import Instrument, Parameter
|
||||
from wa.framework import signal
|
||||
from wa.framework.instrument import very_slow
|
||||
from wa.framework.instrument import very_slow, is_installed
|
||||
from wa.framework.exception import InstrumentError
|
||||
from wa.utils.types import list_of_strings
|
||||
from wa.utils.misc import which
|
||||
@ -125,6 +125,13 @@ class TraceCmdInstrument(Instrument):
|
||||
value by going down from the specified size in
|
||||
``buffer_size_step`` intervals.
|
||||
"""),
|
||||
Parameter('top_buffer_size', kind=int, default=None,
|
||||
global_alias='trace_top_buffer_size',
|
||||
description="""
|
||||
The same as buffer_size except it sets the size of the
|
||||
top-level buffer instead of the devlib one. If left unset,
|
||||
it will default to the same as the devlib buffer size.
|
||||
"""),
|
||||
Parameter('buffer_size_step', kind=int, default=1000,
|
||||
global_alias='trace_buffer_size_step',
|
||||
description="""
|
||||
@ -155,6 +162,13 @@ class TraceCmdInstrument(Instrument):
|
||||
installed on the host (the one in your
|
||||
distribution's repos may be too old).
|
||||
"""),
|
||||
Parameter('mode', kind=str, default='write-to-memory',
|
||||
allowed_values=['write-to-disk', 'write-to-memory'],
|
||||
description="""
|
||||
Specifies whether collected traces should be saved in memory or disk.
|
||||
Extensive workloads may hit out of memory issue. Hence, write-to-disk
|
||||
mode can help in such cases.
|
||||
"""),
|
||||
]
|
||||
|
||||
def __init__(self, target, **kwargs):
|
||||
@ -168,6 +182,7 @@ class TraceCmdInstrument(Instrument):
|
||||
events=self.events,
|
||||
functions=self.functions,
|
||||
buffer_size=self.buffer_size,
|
||||
top_buffer_size=self.top_buffer_size,
|
||||
buffer_size_step=1000,
|
||||
automark=False,
|
||||
autoreport=True,
|
||||
@ -175,6 +190,7 @@ class TraceCmdInstrument(Instrument):
|
||||
no_install=self.no_install,
|
||||
strict=False,
|
||||
report_on_target=False,
|
||||
mode=self.mode,
|
||||
)
|
||||
if self.report and self.report_on_target:
|
||||
collector_params['autoreport'] = True
|
||||
@ -190,24 +206,31 @@ class TraceCmdInstrument(Instrument):
|
||||
signal.connect(self.mark_stop, signal.AFTER_WORKLOAD_EXECUTION, priority=11)
|
||||
|
||||
def setup(self, context):
|
||||
self.collector.reset()
|
||||
if self.collector:
|
||||
self.collector.reset()
|
||||
|
||||
@very_slow
|
||||
def start(self, context):
|
||||
self.collector.start()
|
||||
if self.collector:
|
||||
self.collector.start()
|
||||
|
||||
@very_slow
|
||||
def stop(self, context):
|
||||
self.collector.stop()
|
||||
if self.collector:
|
||||
self.collector.stop()
|
||||
|
||||
def update_output(self, context): # NOQA pylint: disable=R0912
|
||||
if not self.collector:
|
||||
return
|
||||
self.logger.info('Extracting trace from target...')
|
||||
outfile = os.path.join(context.output_directory, 'trace.dat')
|
||||
outfile = os.path.join(context.output_directory, OUTPUT_TRACE_FILE)
|
||||
|
||||
self.collector.set_output(outfile)
|
||||
self.collector.get_data()
|
||||
context.add_artifact('trace-cmd-bin', outfile, 'data')
|
||||
if self.report:
|
||||
textfile = os.path.join(context.output_directory, 'trace.txt')
|
||||
textfile = os.path.join(context.output_directory, OUTPUT_TEXT_FILE)
|
||||
|
||||
if not self.report_on_target:
|
||||
self.collector.report(outfile, textfile)
|
||||
context.add_artifact('trace-cmd-txt', textfile, 'export')
|
||||
@ -222,6 +245,8 @@ class TraceCmdInstrument(Instrument):
|
||||
def validate(self):
|
||||
if self.report and not self.report_on_target and not which('trace-cmd'):
|
||||
raise InstrumentError('trace-cmd is not in PATH; is it installed?')
|
||||
if is_installed('perfetto'):
|
||||
raise InstrumentError('trace-cmd cannot be used at the same time as perfetto')
|
||||
|
||||
def mark_start(self, context):
|
||||
if self.is_enabled:
|
||||
|
@ -134,8 +134,8 @@ class CpuStatesProcessor(OutputProcessor):
|
||||
parallel_rows.append([job_id, workload, iteration] + record)
|
||||
for state in sorted(powerstate_report.state_stats):
|
||||
stats = powerstate_report.state_stats[state]
|
||||
powerstate_rows.append([job_id, workload, iteration, state] +
|
||||
['{:.3f}'.format(s if s is not None else 0)
|
||||
powerstate_rows.append([job_id, workload, iteration, state]
|
||||
+ ['{:.3f}'.format(s if s is not None else 0)
|
||||
for s in stats])
|
||||
|
||||
outpath = output.get_path('parallel-stats.csv')
|
||||
|
@ -90,8 +90,8 @@ class CsvReportProcessor(OutputProcessor):
|
||||
|
||||
outfile = output.get_path('results.csv')
|
||||
with csvwriter(outfile) as writer:
|
||||
writer.writerow(['id', 'workload', 'iteration', 'metric', ] +
|
||||
extra_columns + ['value', 'units'])
|
||||
writer.writerow(['id', 'workload', 'iteration', 'metric', ]
|
||||
+ extra_columns + ['value', 'units'])
|
||||
|
||||
for o in outputs:
|
||||
if o.kind == 'job':
|
||||
@ -106,8 +106,8 @@ class CsvReportProcessor(OutputProcessor):
|
||||
'Output of kind "{}" unrecognised by csvproc'.format(o.kind))
|
||||
|
||||
for metric in o.result.metrics:
|
||||
row = (header + [metric.name] +
|
||||
[str(metric.classifiers.get(c, ''))
|
||||
for c in extra_columns] +
|
||||
[str(metric.value), metric.units or ''])
|
||||
row = (header + [metric.name]
|
||||
+ [str(metric.classifiers.get(c, ''))
|
||||
for c in extra_columns]
|
||||
+ [str(metric.value), metric.units or ''])
|
||||
writer.writerow(row)
|
||||
|
@ -92,9 +92,9 @@ class PostgresqlResultProcessor(OutputProcessor):
|
||||
"create_job": "INSERT INTO Jobs (oid, run_oid, status, retry, label, job_id, iterations, workload_name, metadata, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);",
|
||||
"create_target": "INSERT INTO Targets (oid, run_oid, target, modules, cpus, os, os_version, hostid, hostname, abi, is_rooted, kernel_version, kernel_release, kernel_sha1, kernel_config, sched_features, page_size_kb, system_id, screen_resolution, prop, android_id, _pod_version, _pod_serialization_version) "
|
||||
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
|
||||
"create_event": "INSERT INTO Events (oid, run_oid, job_oid, timestamp, message, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s",
|
||||
"create_event": "INSERT INTO Events (oid, run_oid, job_oid, timestamp, message, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s)",
|
||||
"create_artifact": "INSERT INTO Artifacts (oid, run_oid, job_oid, name, large_object_uuid, description, kind, is_dir, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
|
||||
"create_metric": "INSERT INTO Metrics (oid, run_oid, job_oid, name, value, units, lower_is_better, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s , %s, %s, %s)",
|
||||
"create_metric": "INSERT INTO Metrics (oid, run_oid, job_oid, name, value, units, lower_is_better, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",
|
||||
"create_augmentation": "INSERT INTO Augmentations (oid, run_oid, name) VALUES (%s, %s, %s)",
|
||||
"create_classifier": "INSERT INTO Classifiers (oid, artifact_oid, metric_oid, job_oid, run_oid, key, value) VALUES (%s, %s, %s, %s, %s, %s, %s)",
|
||||
"create_parameter": "INSERT INTO Parameters (oid, run_oid, job_oid, augmentation_oid, resource_getter_oid, name, value, value_type, type) "
|
||||
@ -124,8 +124,8 @@ class PostgresqlResultProcessor(OutputProcessor):
|
||||
|
||||
if not psycopg2:
|
||||
raise ImportError(
|
||||
'The psycopg2 module is required for the ' +
|
||||
'Postgresql Output Processor: {}'.format(import_error_msg))
|
||||
'The psycopg2 module is required for the '
|
||||
+ 'Postgresql Output Processor: {}'.format(import_error_msg))
|
||||
# N.B. Typecasters are for postgres->python and adapters the opposite
|
||||
self.connect_to_database()
|
||||
|
||||
@ -515,8 +515,8 @@ class PostgresqlResultProcessor(OutputProcessor):
|
||||
self.conn = connect(dsn=dsn)
|
||||
except Psycopg2Error as e:
|
||||
raise OutputProcessorError(
|
||||
"Database error, if the database doesn't exist, " +
|
||||
"please use 'wa create database' to create the database: {}".format(e))
|
||||
"Database error, if the database doesn't exist, "
|
||||
+ "please use 'wa create database' to create the database: {}".format(e))
|
||||
self.cursor = self.conn.cursor()
|
||||
self.verify_schema_versions()
|
||||
|
||||
|
@ -14,16 +14,26 @@
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime
|
||||
from shlex import quote
|
||||
|
||||
from devlib.utils.android import ApkInfo as _ApkInfo
|
||||
|
||||
from wa.framework.configuration import settings
|
||||
from wa.utils.serializer import read_pod, write_pod, Podable
|
||||
from wa.utils.types import enum
|
||||
from wa.utils.misc import atomic_write_path
|
||||
|
||||
|
||||
LogcatLogLevel = enum(['verbose', 'debug', 'info', 'warn', 'error', 'assert'], start=2)
|
||||
|
||||
log_level_map = ''.join(n[0].upper() for n in LogcatLogLevel.names)
|
||||
|
||||
logger = logging.getLogger('logcat')
|
||||
logcat_logger = logging.getLogger('logcat')
|
||||
apk_info_cache_logger = logging.getLogger('apk_info_cache')
|
||||
|
||||
apk_info_cache = None
|
||||
|
||||
|
||||
class LogcatEvent(object):
|
||||
@ -51,7 +61,7 @@ class LogcatEvent(object):
|
||||
class LogcatParser(object):
|
||||
|
||||
def parse(self, filepath):
|
||||
with open(filepath) as fh:
|
||||
with open(filepath, errors='replace') as fh:
|
||||
for line in fh:
|
||||
event = self.parse_line(line)
|
||||
if event:
|
||||
@ -74,7 +84,142 @@ class LogcatParser(object):
|
||||
tag = (parts.pop(0) if parts else '').strip()
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
message = 'Invalid metadata for line:\n\t{}\n\tgot: "{}"'
|
||||
logger.warning(message.format(line, e))
|
||||
logcat_logger.warning(message.format(line, e))
|
||||
return None
|
||||
|
||||
return LogcatEvent(timestamp, pid, tid, level, tag, message)
|
||||
|
||||
|
||||
# pylint: disable=protected-access,attribute-defined-outside-init
|
||||
class ApkInfo(_ApkInfo, Podable):
|
||||
'''Implement ApkInfo as a Podable class.'''
|
||||
|
||||
_pod_serialization_version = 1
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
instance = ApkInfo()
|
||||
instance.path = pod['path']
|
||||
instance.package = pod['package']
|
||||
instance.activity = pod['activity']
|
||||
instance.label = pod['label']
|
||||
instance.version_name = pod['version_name']
|
||||
instance.version_code = pod['version_code']
|
||||
instance.native_code = pod['native_code']
|
||||
instance.permissions = pod['permissions']
|
||||
instance._apk_path = pod['_apk_path']
|
||||
instance._activities = pod['_activities']
|
||||
instance._methods = pod['_methods']
|
||||
return instance
|
||||
|
||||
def __init__(self, path=None):
|
||||
super().__init__(path)
|
||||
self._pod_version = self._pod_serialization_version
|
||||
|
||||
def to_pod(self):
|
||||
pod = super().to_pod()
|
||||
pod['path'] = self.path
|
||||
pod['package'] = self.package
|
||||
pod['activity'] = self.activity
|
||||
pod['label'] = self.label
|
||||
pod['version_name'] = self.version_name
|
||||
pod['version_code'] = self.version_code
|
||||
pod['native_code'] = self.native_code
|
||||
pod['permissions'] = self.permissions
|
||||
pod['_apk_path'] = self._apk_path
|
||||
pod['_activities'] = self.activities # Force extraction
|
||||
pod['_methods'] = self.methods # Force extraction
|
||||
return pod
|
||||
|
||||
@staticmethod
|
||||
def _pod_upgrade_v1(pod):
|
||||
pod['_pod_version'] = pod.get('_pod_version', 1)
|
||||
return pod
|
||||
|
||||
|
||||
class ApkInfoCache:
|
||||
|
||||
@staticmethod
|
||||
def _check_env():
|
||||
if not os.path.exists(settings.cache_directory):
|
||||
os.makedirs(settings.cache_directory)
|
||||
|
||||
def __init__(self, path=settings.apk_info_cache_file):
|
||||
self._check_env()
|
||||
self.path = path
|
||||
self.last_modified = None
|
||||
self.cache = {}
|
||||
self._update_cache()
|
||||
|
||||
def store(self, apk_info, apk_id, overwrite=True):
|
||||
self._update_cache()
|
||||
if apk_id in self.cache and not overwrite:
|
||||
raise ValueError('ApkInfo for {} is already in cache.'.format(apk_info.path))
|
||||
self.cache[apk_id] = apk_info.to_pod()
|
||||
with atomic_write_path(self.path) as at_path:
|
||||
write_pod(self.cache, at_path)
|
||||
self.last_modified = os.stat(self.path)
|
||||
|
||||
def get_info(self, key):
|
||||
self._update_cache()
|
||||
pod = self.cache.get(key)
|
||||
|
||||
info = ApkInfo.from_pod(pod) if pod else None
|
||||
return info
|
||||
|
||||
def _update_cache(self):
|
||||
if not os.path.exists(self.path):
|
||||
return
|
||||
if self.last_modified != os.stat(self.path):
|
||||
apk_info_cache_logger.debug('Updating cache {}'.format(self.path))
|
||||
self.cache = read_pod(self.path)
|
||||
self.last_modified = os.stat(self.path)
|
||||
|
||||
|
||||
def get_cacheable_apk_info(path):
|
||||
# pylint: disable=global-statement
|
||||
global apk_info_cache
|
||||
if not path:
|
||||
return
|
||||
stat = os.stat(path)
|
||||
modified = stat.st_mtime
|
||||
apk_id = '{}-{}'.format(path, modified)
|
||||
info = apk_info_cache.get_info(apk_id)
|
||||
|
||||
if info:
|
||||
msg = 'Using ApkInfo ({}) from cache'.format(info.package)
|
||||
else:
|
||||
info = ApkInfo(path)
|
||||
apk_info_cache.store(info, apk_id, overwrite=True)
|
||||
msg = 'Storing ApkInfo ({}) in cache'.format(info.package)
|
||||
apk_info_cache_logger.debug(msg)
|
||||
return info
|
||||
|
||||
|
||||
apk_info_cache = ApkInfoCache()
|
||||
|
||||
|
||||
def build_apk_launch_command(package, activity=None, apk_args=None):
|
||||
args_string = ''
|
||||
if apk_args:
|
||||
for k, v in apk_args.items():
|
||||
if isinstance(v, str):
|
||||
arg = '--es'
|
||||
v = quote(v)
|
||||
elif isinstance(v, float):
|
||||
arg = '--ef'
|
||||
elif isinstance(v, bool):
|
||||
arg = '--ez'
|
||||
elif isinstance(v, int):
|
||||
arg = '--ei'
|
||||
else:
|
||||
raise ValueError('Unable to encode {} {}'.format(v, type(v)))
|
||||
|
||||
args_string = '{} {} {} {}'.format(args_string, arg, k, v)
|
||||
|
||||
if not activity:
|
||||
cmd = 'am start -W {} {}'.format(package, args_string)
|
||||
else:
|
||||
cmd = 'am start -W -n {}/{} {}'.format(package, activity, args_string)
|
||||
|
||||
return cmd
|
||||
|
@ -151,7 +151,7 @@ class PowerStateProcessor(object):
|
||||
|
||||
def __init__(self, cpus, wait_for_marker=True, no_idle=None):
|
||||
if no_idle is None:
|
||||
no_idle = False if cpus[0].cpuidle and cpus[0].cpuidle.states else True
|
||||
no_idle = not (cpus[0].cpuidle and cpus[0].cpuidle.states)
|
||||
self.power_state = SystemPowerState(len(cpus), no_idle=no_idle)
|
||||
self.requested_states = {} # cpu_id -> requeseted state
|
||||
self.wait_for_marker = wait_for_marker
|
||||
@ -368,8 +368,10 @@ class PowerStateTimeline(object):
|
||||
if frequency is None:
|
||||
if idle_state == -1:
|
||||
row.append('Running (unknown kHz)')
|
||||
elif idle_state is None or not self.idle_state_names[cpu_idx]:
|
||||
elif idle_state is None:
|
||||
row.append('unknown')
|
||||
elif not self.idle_state_names[cpu_idx]:
|
||||
row.append('idle[{}]'.format(idle_state))
|
||||
else:
|
||||
row.append(self.idle_state_names[cpu_idx][idle_state])
|
||||
else: # frequency is not None
|
||||
@ -403,7 +405,7 @@ class ParallelStats(object):
|
||||
|
||||
for i, clust in enumerate(clusters):
|
||||
self.clusters[str(i)] = set(clust)
|
||||
self.clusters['all'] = set([cpu.id for cpu in cpus])
|
||||
self.clusters['all'] = {cpu.id for cpu in cpus}
|
||||
|
||||
self.first_timestamp = None
|
||||
self.last_timestamp = None
|
||||
|
@ -95,8 +95,8 @@ def diff_sysfs_dirs(before, after, result): # pylint: disable=R0914
|
||||
logger.debug('Token length mismatch in {} on line {}'.format(bfile, i))
|
||||
dfh.write('xxx ' + bline)
|
||||
continue
|
||||
if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2) and
|
||||
(bchunks[0] == achunks[0])):
|
||||
if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2)
|
||||
and (bchunks[0] == achunks[0])):
|
||||
# if there are only two columns and the first column is the
|
||||
# same, assume it's a "header" column and do not diff it.
|
||||
dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])]
|
||||
|
@ -62,7 +62,7 @@ def get_type_name(obj):
|
||||
elif match.group(1) == 'function':
|
||||
text = str(obj)
|
||||
name = text.split()[1]
|
||||
if name == '<lambda>':
|
||||
if name.endswith('<lambda>'):
|
||||
source = inspect.getsource(obj).strip().replace('\n', ' ')
|
||||
match = re.search(r'lambda\s+(\w+)\s*:\s*(.*?)\s*[\n,]', source)
|
||||
if not match:
|
||||
@ -285,7 +285,7 @@ def get_params_rst(parameters):
|
||||
text += indent('\nallowed values: {}\n'.format(', '.join(map(format_literal, param.allowed_values))))
|
||||
elif param.constraint:
|
||||
text += indent('\nconstraint: ``{}``\n'.format(get_type_name(param.constraint)))
|
||||
if param.default:
|
||||
if param.default is not None:
|
||||
value = param.default
|
||||
if isinstance(value, str) and value.startswith(USER_HOME):
|
||||
value = value.replace(USER_HOME, '~')
|
||||
|
@ -105,6 +105,30 @@ def once_per_class(method):
|
||||
return wrapper
|
||||
|
||||
|
||||
def once_per_attribute_value(attr_name):
|
||||
"""
|
||||
The specified method will be invoked once for all instances that share the
|
||||
same value for the specified attribute (sameness is established by comparing
|
||||
repr() of the values).
|
||||
"""
|
||||
def wrapped_once_per_attribute_value(method):
|
||||
def wrapper(*args, **kwargs):
|
||||
if __active_environment is None:
|
||||
activate_environment('default')
|
||||
|
||||
attr_value = getattr(args[0], attr_name)
|
||||
func_id = repr(method.__name__) + repr(args[0].__class__) + repr(attr_value)
|
||||
|
||||
if func_id in __environments[__active_environment]:
|
||||
return
|
||||
else:
|
||||
__environments[__active_environment].append(func_id)
|
||||
return method(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
return wrapped_once_per_attribute_value
|
||||
|
||||
|
||||
def once(method):
|
||||
"""
|
||||
The specified method will be invoked only once within the
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user