mirror of
https://github.com/ARM-software/devlib.git
synced 2025-09-23 04:11:54 +01:00
Compare commits
433 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
0ff8628c9c | ||
|
c0d8a98d90 | ||
|
441eea9897 | ||
|
b0db2067a2 | ||
|
1417e81605 | ||
|
2e81a72b39 | ||
|
22f2c8b663 | ||
|
c2db6c17ab | ||
|
e01a76ef1b | ||
|
9fcca25031 | ||
|
a6b9542f0f | ||
|
413e83f5d6 | ||
|
ac19873423 | ||
|
17d4b22b9f | ||
|
f65130b7c7 | ||
|
5b51c2644e | ||
|
a752f55956 | ||
|
781f9b068d | ||
|
7e79eeb9cb | ||
|
911a9f2ef4 | ||
|
cc0679e40f | ||
|
5dea9f8bcf | ||
|
a9ee41855d | ||
|
c13e3c260b | ||
|
aabb74c8cb | ||
|
a4c22cef71 | ||
|
3da7fbc9dd | ||
|
f2a87ce61c | ||
|
2b6cb264cf | ||
|
7e0e6e8706 | ||
|
4fabcae0b4 | ||
|
3c4a282c29 | ||
|
683da92067 | ||
|
1569be9ba7 | ||
|
f1b7fd184a | ||
|
22a5945460 | ||
|
fbf0875357 | ||
|
b7ab340d33 | ||
|
beb824256d | ||
|
efbf630422 | ||
|
389ec76c1e | ||
|
1f50b0ffc2 | ||
|
ed7f0e56a2 | ||
|
d376bc10ee | ||
|
60c2e7721e | ||
|
5e13a045a3 | ||
|
c4c76ebcf8 | ||
|
bdaea26f6f | ||
|
a3c04fc140 | ||
|
94c1339efd | ||
|
85e0fb08fe | ||
|
74444210e7 | ||
|
da3afeba2e | ||
|
4a4739cefb | ||
|
01c39cfe4c | ||
|
b9b38a20f6 | ||
|
809d987f84 | ||
|
bf1310c278 | ||
|
78de479a43 | ||
|
75332cf14a | ||
|
6089eaf40a | ||
|
fa41bb01d2 | ||
|
8654a6dc2b | ||
|
150fe2b32b | ||
|
f2a88fd1dc | ||
|
b7a04c9ebc | ||
|
5d97c3186b | ||
|
d86d67f49c | ||
|
996ee82f09 | ||
|
61208ce2e0 | ||
|
8cd1470bb8 | ||
|
66be73be3e | ||
|
63d2fb53fc | ||
|
30dc161f12 | ||
|
d6df5c81fd | ||
|
b0463e58d8 | ||
|
512c5f3737 | ||
|
cc0582ef59 | ||
|
ec717e3399 | ||
|
511d478164 | ||
|
d6d322c8ac | ||
|
ae99db3e24 | ||
|
241c7e01bd | ||
|
68b418dac2 | ||
|
df61b2a269 | ||
|
e8a03e00f3 | ||
|
4b5f65699f | ||
|
454b94501c | ||
|
5cb551b315 | ||
|
3b0df282a9 | ||
|
27fc75f74c | ||
|
473f37f1bc | ||
|
ae8db119a9 | ||
|
472c5a3294 | ||
|
8ac89fe9ed | ||
|
56f3b1c317 | ||
|
34c6d1983b | ||
|
c4ababcd50 | ||
|
9fd690efb3 | ||
|
e16c42fe2c | ||
|
8aa9d672a1 | ||
|
533a2fd2c1 | ||
|
8e1dc1359a | ||
|
fec0868734 | ||
|
0915d97f71 | ||
|
d81b72a91b | ||
|
96ffa64ad8 | ||
|
38037850b6 | ||
|
56a7394d58 | ||
|
bda1115adb | ||
|
cc04e1a839 | ||
|
4a862d06bb | ||
|
f1c945bb5e | ||
|
51452d204c | ||
|
7231030991 | ||
|
085737bbfa | ||
|
9e45d65c94 | ||
|
008f96673f | ||
|
77a6de9453 | ||
|
d4b0dedc2a | ||
|
69cd3be96c | ||
|
7e942cdd4a | ||
|
41f460afbe | ||
|
804a044efc | ||
|
b06035fb12 | ||
|
6abe6067da | ||
|
c4f6a1a85f | ||
|
fe0d6eda2a | ||
|
5cafd2ec4d | ||
|
0d63386343 | ||
|
a35f715b63 | ||
|
55762edf19 | ||
|
1d9dc42af5 | ||
|
be4f01ebaf | ||
|
d6ccbb44c3 | ||
|
329df6f42e | ||
|
63bf68b49d | ||
|
7e39ecf142 | ||
|
1e839028a1 | ||
|
9eb88cd598 | ||
|
bb3ae48d25 | ||
|
58c0d30b26 | ||
|
87b235638a | ||
|
b88b400d8d | ||
|
8370c8fba3 | ||
|
2a23c435d4 | ||
|
59e2f2d126 | ||
|
56e9147e58 | ||
|
9678c7372e | ||
|
078f0dc641 | ||
|
335fa77e4e | ||
|
c585a4e489 | ||
|
a992a890b8 | ||
|
5001fae516 | ||
|
f515420387 | ||
|
e3d9c4b2fd | ||
|
e22d278267 | ||
|
17d32a4d40 | ||
|
7a8f98720d | ||
|
328e0ade4b | ||
|
d5ff73290e | ||
|
f39631293e | ||
|
c706e693ba | ||
|
f490a55be2 | ||
|
0e017ddf9f | ||
|
b368acb755 | ||
|
83e5ddfd1b | ||
|
8f3dc05f97 | ||
|
bb4f92c326 | ||
|
a0fc7202a1 | ||
|
9e8f77b8f2 | ||
|
515095d9b2 | ||
|
f3c8ce975e | ||
|
bfda5c4271 | ||
|
d1b08f6df6 | ||
|
17c110cc97 | ||
|
e9cf7f5cbe | ||
|
ead0c90069 | ||
|
2954a73c1c | ||
|
cc0210af37 | ||
|
730118d6d0 | ||
|
f0b58b32c4 | ||
|
30257456ab | ||
|
853bdff936 | ||
|
54d6a6d39d | ||
|
3761b488a0 | ||
|
462aecdca0 | ||
|
cafc0a4bc0 | ||
|
724c0ec8df | ||
|
ceb493f98d | ||
|
8ac588bc1f | ||
|
56a5f8ab12 | ||
|
75ff31c6c7 | ||
|
1e34390b99 | ||
|
a2072d5c48 | ||
|
35c7196396 | ||
|
0dde18bb56 | ||
|
7393ab757e | ||
|
002939d599 | ||
|
dd4c37901b | ||
|
0c7d440070 | ||
|
e414a3a193 | ||
|
857edbd48b | ||
|
f52ac6650d | ||
|
eaafe6c0eb | ||
|
2a8f2c51d7 | ||
|
01b0ab8dce | ||
|
c0a896642d | ||
|
c492f2e191 | ||
|
f3b04fcd73 | ||
|
02384615dd | ||
|
791edc297c | ||
|
4ef1e51b97 | ||
|
899dbfe4fb | ||
|
0390c9d26b | ||
|
405c155b96 | ||
|
bd03b2f8ac | ||
|
5d40b23310 | ||
|
6fae051deb | ||
|
aca3d451f7 | ||
|
fa9d7a17b3 | ||
|
61bbece59b | ||
|
efbd04992d | ||
|
a7b9ef594f | ||
|
e2ce5689bd | ||
|
fae12d70a6 | ||
|
61390a714c | ||
|
7b816b2345 | ||
|
1b71507d8e | ||
|
af0ed2ab48 | ||
|
417ab3df3e | ||
|
dcffccbb69 | ||
|
486b3f524e | ||
|
1ce96e0097 | ||
|
3056e333e1 | ||
|
a679d579fd | ||
|
fe403b629e | ||
|
16d5e0b6a7 | ||
|
4a6aacef99 | ||
|
9837b4012b | ||
|
71d5b8bc79 | ||
|
5421ddaae8 | ||
|
1d85501181 | ||
|
a01418b075 | ||
|
0f2ac2589f | ||
|
da22befd80 | ||
|
0bfb6e4e54 | ||
|
dc453ad891 | ||
|
b0457f7ed7 | ||
|
4d269774f7 | ||
|
34e7e4c895 | ||
|
535fc7ea63 | ||
|
99aca25438 | ||
|
7dd7811355 | ||
|
dbe568f51b | ||
|
0b04ffcc44 | ||
|
8a0554faab | ||
|
17bcabd461 | ||
|
1072a1a9f0 | ||
|
661ba19114 | ||
|
7e073c1fce | ||
|
98e19ae048 | ||
|
3e3f964e43 | ||
|
d1e83b53a3 | ||
|
a0b273b031 | ||
|
5c28e41677 | ||
|
d560aea660 | ||
|
4d8da589f8 | ||
|
f042646792 | ||
|
d7ca39e4d1 | ||
|
5a599f91db | ||
|
181bc180c4 | ||
|
92fb54d57b | ||
|
bfb4721715 | ||
|
e21265f6f6 | ||
|
a3f78cabc1 | ||
|
4593d8605d | ||
|
9f666320f3 | ||
|
f692315d9c | ||
|
e4fda7898d | ||
|
109fcc6deb | ||
|
96693a3035 | ||
|
d952abf52e | ||
|
50dfb297cd | ||
|
e7a319b0a7 | ||
|
6bb24aa12a | ||
|
fb5a260f4b | ||
|
e1ec1eacfb | ||
|
22c1f5e911 | ||
|
8cf4a44bd7 | ||
|
a59093465d | ||
|
b3242a1ee4 | ||
|
a290d28835 | ||
|
a8ca0fc6c8 | ||
|
01b5cffe03 | ||
|
adf25f93bb | ||
|
dd26b43ac5 | ||
|
8479af48c4 | ||
|
07ba177e58 | ||
|
9192deb8ee | ||
|
823ce718bf | ||
|
2afa8f86a4 | ||
|
15333eb09c | ||
|
dfd0b8ebd9 | ||
|
ff366b3fd9 | ||
|
25ad53feff | ||
|
1513db0951 | ||
|
90040e8b58 | ||
|
3658eec66c | ||
|
24d5630e54 | ||
|
ee153210c6 | ||
|
6bda0934ad | ||
|
a46f1038f8 | ||
|
4de973483e | ||
|
0e9221f58e | ||
|
0d3a0223b3 | ||
|
7c2fd87a3b | ||
|
035181a8f1 | ||
|
f5a00140e4 | ||
|
77482a6c70 | ||
|
34d73e6af1 | ||
|
4b36439de8 | ||
|
3c8294c6eb | ||
|
64c865de59 | ||
|
66a50a2f49 | ||
|
60e69fc4e8 | ||
|
c62905cfdc | ||
|
eeb5e93e6f | ||
|
c093d56754 | ||
|
049b275665 | ||
|
411719d58d | ||
|
7dd934a5d8 | ||
|
30fdfc23d3 | ||
|
d3c3015fc8 | ||
|
5ef99f2cff | ||
|
9b465c2766 | ||
|
2de2b36387 | ||
|
210712b384 | ||
|
2a0d110012 | ||
|
5b99c1613b | ||
|
3d10e3eae9 | ||
|
4d95656e49 | ||
|
38258eb74c | ||
|
8839ed01ba | ||
|
1229af0895 | ||
|
003785dde1 | ||
|
3e751746d6 | ||
|
ddd2e29b87 | ||
|
22b6514c35 | ||
|
380ad0515d | ||
|
93b39a7f47 | ||
|
70d755d75b | ||
|
1da8d3f95f | ||
|
7f347e9d71 | ||
|
d25beb5c8b | ||
|
edf200dbc9 | ||
|
63e60401d5 | ||
|
e206e9b24a | ||
|
0844a393ab | ||
|
36aa3af66d | ||
|
b392a0a1b4 | ||
|
9f74b9978c | ||
|
b54dc19b81 | ||
|
7919a5643c | ||
|
df4d06bc7f | ||
|
02c93b48ab | ||
|
98fb2e2306 | ||
|
68be9d8acc | ||
|
09f69dcf38 | ||
|
1fd5636217 | ||
|
6bc3479abb | ||
|
85036fbb30 | ||
|
b933dbda67 | ||
|
0c7eb9e91e | ||
|
f26f942723 | ||
|
b062097221 | ||
|
0a95bbed87 | ||
|
85f30ed4c7 | ||
|
86c9b6a1c7 | ||
|
3f1a1c4086 | ||
|
7a827e2b11 | ||
|
ce48ad217d | ||
|
f9bc6966c0 | ||
|
baedd676a9 | ||
|
8f63914b85 | ||
|
c8af995392 | ||
|
92b0c25ed3 | ||
|
fb58e47cf5 | ||
|
3660361df0 | ||
|
86c6a1a826 | ||
|
1199f2512b | ||
|
c837a29299 | ||
|
de15658025 | ||
|
d3396f2725 | ||
|
c33dd65249 | ||
|
7145b366ab | ||
|
2d96840873 | ||
|
48d717b301 | ||
|
1d3b4c8062 | ||
|
2b3cee6a7e | ||
|
4adefecb55 | ||
|
195085e28d | ||
|
59f36fc768 | ||
|
871c59a3f4 | ||
|
df81742100 | ||
|
8296d6c5d6 | ||
|
a900f94069 | ||
|
0cac92af27 | ||
|
2e106c9f70 | ||
|
a48775ec5a | ||
|
d92b18c102 | ||
|
b738655050 | ||
|
597231f3d5 | ||
|
cac70cba19 | ||
|
1198e42cdf | ||
|
e11573594a | ||
|
1a5c1dce07 | ||
|
1f7421bc39 | ||
|
64292ad6b4 | ||
|
3dbd3f7fda | ||
|
4a936da62f | ||
|
0b7ab6aa94 | ||
|
4e0c03ebdd | ||
|
dc32fa9704 | ||
|
35987d5281 | ||
|
69a83d4128 | ||
|
8b2ac8d29d | ||
|
97a89970d0 | ||
|
92d9e690f0 | ||
|
61b13383a4 | ||
|
7e80a381d8 | ||
|
4dc54728c1 | ||
|
f5906cb4ab |
@@ -17,7 +17,7 @@ Installation
|
||||
Usage
|
||||
-----
|
||||
|
||||
Please refer to the "Overview" section of the documentation.
|
||||
Please refer to the "Overview" section of the `documentation <http://devlib.readthedocs.io/en/latest/>`_.
|
||||
|
||||
|
||||
License
|
||||
|
@@ -1,6 +1,21 @@
|
||||
from devlib.target import Target, LinuxTarget, AndroidTarget, LocalLinuxTarget
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from devlib.target import Target, LinuxTarget, AndroidTarget, LocalLinuxTarget, ChromeOsTarget
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.exception import DevlibError, TargetError, HostError, TargetNotRespondingError
|
||||
from devlib.exception import DevlibError, DevlibTransientError, DevlibStableError, TargetError, TargetTransientError, TargetStableError, TargetNotRespondingError, HostError
|
||||
|
||||
from devlib.module import Module, HardRestModule, BootModule, FlashModule
|
||||
from devlib.module import get_module, register_module
|
||||
@@ -13,12 +28,39 @@ from devlib.instrument import Instrument, InstrumentChannel, Measurement, Measur
|
||||
from devlib.instrument import MEASUREMENT_TYPES, INSTANTANEOUS, CONTINUOUS
|
||||
from devlib.instrument.daq import DaqInstrument
|
||||
from devlib.instrument.energy_probe import EnergyProbeInstrument
|
||||
from devlib.instrument.arm_energy_probe import ArmEnergyProbeInstrument
|
||||
from devlib.instrument.frames import GfxInfoFramesInstrument, SurfaceFlingerFramesInstrument
|
||||
from devlib.instrument.hwmon import HwmonInstrument
|
||||
from devlib.instrument.monsoon import MonsoonInstrument
|
||||
from devlib.instrument.netstats import NetstatsInstrument
|
||||
from devlib.instrument.gem5power import Gem5PowerInstrument
|
||||
from devlib.instrument.baylibre_acme import (
|
||||
BaylibreAcmeNetworkInstrument,
|
||||
BaylibreAcmeXMLInstrument,
|
||||
BaylibreAcmeLocalInstrument,
|
||||
BaylibreAcmeInstrument,
|
||||
)
|
||||
|
||||
from devlib.derived import DerivedMeasurements, DerivedMetric
|
||||
from devlib.derived.energy import DerivedEnergyMeasurements
|
||||
from devlib.derived.fps import DerivedGfxInfoStats, DerivedSurfaceFlingerStats
|
||||
|
||||
from devlib.trace.ftrace import FtraceCollector
|
||||
from devlib.trace.perf import PerfCollector
|
||||
from devlib.trace.serial_trace import SerialTraceCollector
|
||||
|
||||
from devlib.host import LocalConnection
|
||||
from devlib.utils.android import AdbConnection
|
||||
from devlib.utils.ssh import SshConnection, TelnetConnection, Gem5Connection
|
||||
|
||||
from devlib.utils.version import (get_devlib_version as __get_devlib_version,
|
||||
get_commit as __get_commit)
|
||||
|
||||
|
||||
__version__ = __get_devlib_version()
|
||||
|
||||
__commit = __get_commit()
|
||||
if __commit:
|
||||
__full_version__ = '{}+{}'.format(__version__, __commit)
|
||||
else:
|
||||
__full_version__ = __version__
|
||||
|
BIN
devlib/bin/arm64/perf
Normal file
BIN
devlib/bin/arm64/perf
Normal file
Binary file not shown.
Binary file not shown.
BIN
devlib/bin/armeabi/perf
Normal file
BIN
devlib/bin/armeabi/perf
Normal file
Binary file not shown.
BIN
devlib/bin/ppc64le/busybox
Executable file
BIN
devlib/bin/ppc64le/busybox
Executable file
Binary file not shown.
BIN
devlib/bin/ppc64le/trace-cmd
Executable file
BIN
devlib/bin/ppc64le/trace-cmd
Executable file
Binary file not shown.
@@ -47,6 +47,37 @@ cpufreq_trace_all_frequencies() {
|
||||
done
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# DevFrequency Utility Functions
|
||||
################################################################################
|
||||
|
||||
devfreq_set_all_frequencies() {
|
||||
FREQ=$1
|
||||
for DEV in /sys/class/devfreq/*; do
|
||||
echo $FREQ > $DEV/min_freq
|
||||
echo $FREQ > $DEV/max_freq
|
||||
done
|
||||
}
|
||||
|
||||
devfreq_get_all_frequencies() {
|
||||
for DEV in /sys/class/devfreq/*; do
|
||||
echo "`basename $DEV` `cat $DEV/cur_freq`"
|
||||
done
|
||||
}
|
||||
|
||||
devfreq_set_all_governors() {
|
||||
GOV=$1
|
||||
for DEV in /sys/class/devfreq/*; do
|
||||
echo $GOV > $DEV/governor
|
||||
done
|
||||
}
|
||||
|
||||
devfreq_get_all_governors() {
|
||||
for DEV in /sys/class/devfreq/*; do
|
||||
echo "`basename $DEV` `cat $DEV/governor`"
|
||||
done
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# CPUIdle Utility Functions
|
||||
################################################################################
|
||||
@@ -124,14 +155,14 @@ cgroups_run_into() {
|
||||
|
||||
# Check if the required CGroup exists
|
||||
$FIND $CGMOUNT -type d -mindepth 1 | \
|
||||
$GREP "$CGP" &>/dev/null
|
||||
$GREP -E "^$CGMOUNT/devlib_cgh[0-9]{1,2}$CGP" &>/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: could not find any $CGP cgroup under $CGMOUNT"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
$FIND $CGMOUNT -type d -mindepth 1 | \
|
||||
$GREP "$CGP" | \
|
||||
$GREP -E "^$CGMOUNT/devlib_cgh[0-9]{1,2}$CGP$" | \
|
||||
while read CGPATH; do
|
||||
# Move this shell into that control group
|
||||
echo $$ > $CGPATH/cgroup.procs
|
||||
@@ -177,6 +208,116 @@ cgroups_tasks_in() {
|
||||
exit 0
|
||||
}
|
||||
|
||||
cgroups_freezer_set_state() {
|
||||
STATE=${1}
|
||||
SYSFS_ENTRY=${2}/freezer.state
|
||||
|
||||
# Set the state of the freezer
|
||||
echo $STATE > $SYSFS_ENTRY
|
||||
|
||||
# And check it applied cleanly
|
||||
for i in `seq 1 10`; do
|
||||
[ $($CAT $SYSFS_ENTRY) = $STATE ] && exit 0
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# We have an issue
|
||||
echo "ERROR: Freezer stalled while changing state to \"$STATE\"." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Hotplug
|
||||
################################################################################
|
||||
|
||||
hotplug_online_all() {
|
||||
for path in /sys/devices/system/cpu/cpu[0-9]*; do
|
||||
if [ $(cat $path/online) -eq 0 ]; then
|
||||
echo 1 > $path/online
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
################################################################################
|
||||
# Scheduler
|
||||
################################################################################
|
||||
|
||||
sched_get_kernel_attributes() {
|
||||
MATCH=${1:-'.*'}
|
||||
[ -d /proc/sys/kernel/ ] || exit 1
|
||||
$GREP '' /proc/sys/kernel/sched_* | \
|
||||
$SED -e 's|/proc/sys/kernel/sched_||' | \
|
||||
$GREP -e "$MATCH"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Misc
|
||||
################################################################################
|
||||
|
||||
read_tree_values() {
|
||||
BASEPATH=$1
|
||||
MAXDEPTH=$2
|
||||
|
||||
if [ ! -e $BASEPATH ]; then
|
||||
echo "ERROR: $BASEPATH does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PATHS=$($BUSYBOX find $BASEPATH -follow -maxdepth $MAXDEPTH)
|
||||
i=0
|
||||
for path in $PATHS; do
|
||||
i=$(expr $i + 1)
|
||||
if [ $i -gt 1 ]; then
|
||||
break;
|
||||
fi
|
||||
done
|
||||
if [ $i -gt 1 ]; then
|
||||
$BUSYBOX grep -s '' $PATHS
|
||||
fi
|
||||
}
|
||||
|
||||
read_tree_tgz_b64() {
|
||||
BASEPATH=$1
|
||||
MAXDEPTH=$2
|
||||
TMPBASE=$3
|
||||
|
||||
if [ ! -e $BASEPATH ]; then
|
||||
echo "ERROR: $BASEPATH does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd $TMPBASE
|
||||
TMP_FOLDER=$($BUSYBOX realpath $($BUSYBOX mktemp -d XXXXXX))
|
||||
|
||||
# 'tar' doesn't work as expected on debugfs, so copy the tree first to
|
||||
# workaround the issue
|
||||
cd $BASEPATH
|
||||
for CUR_FILE in $($BUSYBOX find . -follow -type f -maxdepth $MAXDEPTH); do
|
||||
$BUSYBOX cp --parents $CUR_FILE $TMP_FOLDER/ 2> /dev/null
|
||||
done
|
||||
|
||||
cd $TMP_FOLDER
|
||||
$BUSYBOX tar cz * 2>/dev/null | $BUSYBOX base64
|
||||
|
||||
# Clean-up the tmp folder since we won't need it any more
|
||||
cd $TMPBASE
|
||||
rm -rf $TMP_FOLDER
|
||||
}
|
||||
|
||||
get_linux_system_id() {
|
||||
kernel=$($BUSYBOX uname -r)
|
||||
hardware=$($BUSYBOX ip a | $BUSYBOX grep 'link/ether' | $BUSYBOX sed 's/://g' | $BUSYBOX awk '{print $2}' | $BUSYBOX tr -d '\n')
|
||||
filesystem=$(ls /dev/disk/by-uuid | $BUSYBOX tr '\n' '-' | $BUSYBOX sed 's/-$//')
|
||||
echo "$hardware/$kernel/$filesystem"
|
||||
}
|
||||
|
||||
get_android_system_id() {
|
||||
kernel=$($BUSYBOX uname -r)
|
||||
hardware=$($BUSYBOX ip a | $BUSYBOX grep 'link/ether' | $BUSYBOX sed 's/://g' | $BUSYBOX awk '{print $2}' | $BUSYBOX tr -d '\n')
|
||||
filesystem=$(content query --uri content://settings/secure --projection value --where "name='android_id'" | $BUSYBOX cut -f2 -d=)
|
||||
echo "$hardware/$kernel/$filesystem"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Main Function Dispatcher
|
||||
@@ -198,6 +339,18 @@ cpufreq_get_all_governors)
|
||||
cpufreq_trace_all_frequencies)
|
||||
cpufreq_trace_all_frequencies $*
|
||||
;;
|
||||
devfreq_set_all_frequencies)
|
||||
devfreq_set_all_frequencies $*
|
||||
;;
|
||||
devfreq_get_all_frequencies)
|
||||
devfreq_get_all_frequencies
|
||||
;;
|
||||
devfreq_set_all_governors)
|
||||
devfreq_set_all_governors $*
|
||||
;;
|
||||
devfreq_get_all_governors)
|
||||
devfreq_get_all_governors
|
||||
;;
|
||||
cpuidle_wake_all_cpus)
|
||||
cpuidle_wake_all_cpus $*
|
||||
;;
|
||||
@@ -213,9 +366,30 @@ cgroups_tasks_move)
|
||||
cgroups_tasks_in)
|
||||
cgroups_tasks_in $*
|
||||
;;
|
||||
cgroups_freezer_set_state)
|
||||
cgroups_freezer_set_state $*
|
||||
;;
|
||||
ftrace_get_function_stats)
|
||||
ftrace_get_function_stats
|
||||
;;
|
||||
hotplug_online_all)
|
||||
hotplug_online_all
|
||||
;;
|
||||
read_tree_values)
|
||||
read_tree_values $*
|
||||
;;
|
||||
read_tree_tgz_b64)
|
||||
read_tree_tgz_b64 $*
|
||||
;;
|
||||
get_linux_system_id)
|
||||
get_linux_system_id $*
|
||||
;;
|
||||
get_android_system_id)
|
||||
get_android_system_id $*
|
||||
;;
|
||||
sched_get_kernel_attributes)
|
||||
sched_get_kernel_attributes $*
|
||||
;;
|
||||
*)
|
||||
echo "Command [$CMD] not supported"
|
||||
exit -1
|
||||
|
BIN
devlib/bin/x86_64/trace-cmd
Executable file
BIN
devlib/bin/x86_64/trace-cmd
Executable file
Binary file not shown.
63
devlib/derived/__init__.py
Normal file
63
devlib/derived/__init__.py
Normal file
@@ -0,0 +1,63 @@
|
||||
# Copyright 2015-2017 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from devlib.instrument import MeasurementType, MEASUREMENT_TYPES
|
||||
|
||||
|
||||
class DerivedMetric(object):
|
||||
|
||||
__slots__ = ['name', 'value', 'measurement_type']
|
||||
|
||||
@property
|
||||
def units(self):
|
||||
return self.measurement_type.units
|
||||
|
||||
def __init__(self, name, value, measurement_type):
|
||||
self.name = name
|
||||
self.value = value
|
||||
if isinstance(measurement_type, MeasurementType):
|
||||
self.measurement_type = measurement_type
|
||||
else:
|
||||
try:
|
||||
self.measurement_type = MEASUREMENT_TYPES[measurement_type]
|
||||
except KeyError:
|
||||
msg = 'Unknown measurement type: {}'
|
||||
raise ValueError(msg.format(measurement_type))
|
||||
|
||||
def __str__(self):
|
||||
if self.units:
|
||||
return '{}: {} {}'.format(self.name, self.value, self.units)
|
||||
else:
|
||||
return '{}: {}'.format(self.name, self.value)
|
||||
|
||||
# pylint: disable=undefined-variable
|
||||
def __cmp__(self, other):
|
||||
if hasattr(other, 'value'):
|
||||
return cmp(self.value, other.value)
|
||||
else:
|
||||
return cmp(self.value, other)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
class DerivedMeasurements(object):
|
||||
|
||||
# pylint: disable=no-self-use,unused-argument
|
||||
def process(self, measurements_csv):
|
||||
return []
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def process_raw(self, *args):
|
||||
return []
|
98
devlib/derived/energy.py
Normal file
98
devlib/derived/energy.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# Copyright 2013-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from __future__ import division
|
||||
from collections import defaultdict
|
||||
|
||||
from devlib.derived import DerivedMeasurements, DerivedMetric
|
||||
from devlib.instrument import MEASUREMENT_TYPES
|
||||
|
||||
|
||||
class DerivedEnergyMeasurements(DerivedMeasurements):
|
||||
|
||||
# pylint: disable=too-many-locals,too-many-branches
|
||||
@staticmethod
|
||||
def process(measurements_csv):
|
||||
|
||||
should_calculate_energy = []
|
||||
use_timestamp = False
|
||||
|
||||
# Determine sites to calculate energy for
|
||||
channel_map = defaultdict(list)
|
||||
for channel in measurements_csv.channels:
|
||||
channel_map[channel.site].append(channel.kind)
|
||||
if channel.site == 'timestamp':
|
||||
use_timestamp = True
|
||||
time_measurment = channel.measurement_type
|
||||
for site, kinds in channel_map.items():
|
||||
if 'power' in kinds and not 'energy' in kinds:
|
||||
should_calculate_energy.append(site)
|
||||
|
||||
if measurements_csv.sample_rate_hz is None and not use_timestamp:
|
||||
msg = 'Timestamp data is unavailable, please provide a sample rate'
|
||||
raise ValueError(msg)
|
||||
|
||||
if use_timestamp:
|
||||
# Find index of timestamp column
|
||||
ts_index = [i for i, chan in enumerate(measurements_csv.channels)
|
||||
if chan.site == 'timestamp']
|
||||
if len(ts_index) > 1:
|
||||
raise ValueError('Multiple timestamps detected')
|
||||
ts_index = ts_index[0]
|
||||
|
||||
row_ts = 0
|
||||
last_ts = 0
|
||||
energy_results = defaultdict(dict)
|
||||
power_results = defaultdict(float)
|
||||
|
||||
# Process data
|
||||
for count, row in enumerate(measurements_csv.iter_measurements()):
|
||||
if use_timestamp:
|
||||
last_ts = row_ts
|
||||
row_ts = time_measurment.convert(float(row[ts_index].value), 'time')
|
||||
for entry in row:
|
||||
channel = entry.channel
|
||||
site = channel.site
|
||||
if channel.kind == 'energy':
|
||||
if count == 0:
|
||||
energy_results[site]['start'] = entry.value
|
||||
else:
|
||||
energy_results[site]['end'] = entry.value
|
||||
|
||||
if channel.kind == 'power':
|
||||
power_results[site] += entry.value
|
||||
|
||||
if site in should_calculate_energy:
|
||||
if count == 0:
|
||||
energy_results[site]['start'] = 0
|
||||
energy_results[site]['end'] = 0
|
||||
elif use_timestamp:
|
||||
energy_results[site]['end'] += entry.value * (row_ts - last_ts)
|
||||
else:
|
||||
energy_results[site]['end'] += entry.value * (1 /
|
||||
measurements_csv.sample_rate_hz)
|
||||
|
||||
# Calculate final measurements
|
||||
derived_measurements = []
|
||||
for site in energy_results:
|
||||
total_energy = energy_results[site]['end'] - energy_results[site]['start']
|
||||
name = '{}_total_energy'.format(site)
|
||||
derived_measurements.append(DerivedMetric(name, total_energy, MEASUREMENT_TYPES['energy']))
|
||||
|
||||
for site in power_results:
|
||||
power = power_results[site] / (count + 1) #pylint: disable=undefined-loop-variable
|
||||
name = '{}_average_power'.format(site)
|
||||
derived_measurements.append(DerivedMetric(name, power, MEASUREMENT_TYPES['power']))
|
||||
|
||||
return derived_measurements
|
236
devlib/derived/fps.py
Normal file
236
devlib/derived/fps.py
Normal file
@@ -0,0 +1,236 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from __future__ import division
|
||||
import os
|
||||
|
||||
try:
|
||||
import pandas as pd
|
||||
except ImportError:
|
||||
pd = None
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.derived import DerivedMeasurements, DerivedMetric
|
||||
from devlib.exception import HostError
|
||||
from devlib.instrument import MeasurementsCsv
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
from devlib.utils.rendering import gfxinfo_get_last_dump, VSYNC_INTERVAL
|
||||
from devlib.utils.types import numeric
|
||||
|
||||
|
||||
class DerivedFpsStats(DerivedMeasurements):
|
||||
|
||||
def __init__(self, drop_threshold=5, suffix=None, filename=None, outdir=None):
|
||||
self.drop_threshold = drop_threshold
|
||||
self.suffix = suffix
|
||||
self.filename = filename
|
||||
self.outdir = outdir
|
||||
if (filename is None) and (suffix is None):
|
||||
self.suffix = '-fps'
|
||||
elif (filename is not None) and (suffix is not None):
|
||||
raise ValueError('suffix and filename cannot be specified at the same time.')
|
||||
if filename is not None and os.sep in filename:
|
||||
raise ValueError('filename cannot be a path (cannot countain "{}"'.format(os.sep))
|
||||
|
||||
# pylint: disable=no-member
|
||||
def process(self, measurements_csv):
|
||||
if isinstance(measurements_csv, basestring):
|
||||
measurements_csv = MeasurementsCsv(measurements_csv)
|
||||
if pd is not None:
|
||||
return self._process_with_pandas(measurements_csv)
|
||||
return self._process_without_pandas(measurements_csv)
|
||||
|
||||
def _get_csv_file_name(self, frames_file):
|
||||
outdir = self.outdir or os.path.dirname(frames_file)
|
||||
if self.filename:
|
||||
return os.path.join(outdir, self.filename)
|
||||
|
||||
frames_basename = os.path.basename(frames_file)
|
||||
rest, ext = os.path.splitext(frames_basename)
|
||||
csv_basename = rest + self.suffix + ext
|
||||
return os.path.join(outdir, csv_basename)
|
||||
|
||||
|
||||
class DerivedGfxInfoStats(DerivedFpsStats):
|
||||
|
||||
#pylint: disable=arguments-differ
|
||||
@staticmethod
|
||||
def process_raw(filepath, *args):
|
||||
metrics = []
|
||||
dump = gfxinfo_get_last_dump(filepath)
|
||||
seen_stats = False
|
||||
for line in dump.split('\n'):
|
||||
if seen_stats and not line.strip():
|
||||
break
|
||||
elif line.startswith('Janky frames:'):
|
||||
text = line.split(': ')[-1]
|
||||
val_text, pc_text = text.split('(')
|
||||
metrics.append(DerivedMetric('janks', numeric(val_text.strip()), 'count'))
|
||||
metrics.append(DerivedMetric('janks_pc', numeric(pc_text[:-3]), 'percent'))
|
||||
elif ' percentile: ' in line:
|
||||
ptile, val_text = line.split(' percentile: ')
|
||||
name = 'render_time_{}_ptile'.format(ptile)
|
||||
value = numeric(val_text.strip()[:-2])
|
||||
metrics.append(DerivedMetric(name, value, 'time_ms'))
|
||||
elif line.startswith('Number '):
|
||||
name_text, val_text = line.strip().split(': ')
|
||||
name = name_text[7:].lower().replace(' ', '_')
|
||||
value = numeric(val_text)
|
||||
metrics.append(DerivedMetric(name, value, 'count'))
|
||||
else:
|
||||
continue
|
||||
seen_stats = True
|
||||
return metrics
|
||||
|
||||
def _process_without_pandas(self, measurements_csv):
|
||||
per_frame_fps = []
|
||||
start_vsync, end_vsync = None, None
|
||||
frame_count = 0
|
||||
|
||||
for frame_data in measurements_csv.iter_values():
|
||||
if frame_data.Flags_flags != 0:
|
||||
continue
|
||||
frame_count += 1
|
||||
|
||||
if start_vsync is None:
|
||||
start_vsync = frame_data.Vsync_time_ns
|
||||
end_vsync = frame_data.Vsync_time_ns
|
||||
|
||||
frame_time = frame_data.FrameCompleted_time_ns - frame_data.IntendedVsync_time_ns
|
||||
pff = 1e9 / frame_time
|
||||
if pff > self.drop_threshold:
|
||||
per_frame_fps.append([pff])
|
||||
|
||||
if frame_count:
|
||||
duration = end_vsync - start_vsync
|
||||
fps = (1e9 * frame_count) / float(duration)
|
||||
else:
|
||||
duration = 0
|
||||
fps = 0
|
||||
|
||||
csv_file = self._get_csv_file_name(measurements_csv.path)
|
||||
with csvwriter(csv_file) as writer:
|
||||
writer.writerow(['fps'])
|
||||
writer.writerows(per_frame_fps)
|
||||
|
||||
return [DerivedMetric('fps', fps, 'fps'),
|
||||
DerivedMetric('total_frames', frame_count, 'frames'),
|
||||
MeasurementsCsv(csv_file)]
|
||||
|
||||
def _process_with_pandas(self, measurements_csv):
|
||||
data = pd.read_csv(measurements_csv.path)
|
||||
data = data[data.Flags_flags == 0]
|
||||
frame_time = data.FrameCompleted_time_ns - data.IntendedVsync_time_ns
|
||||
per_frame_fps = (1e9 / frame_time)
|
||||
keep_filter = per_frame_fps > self.drop_threshold
|
||||
per_frame_fps = per_frame_fps[keep_filter]
|
||||
per_frame_fps.name = 'fps'
|
||||
|
||||
frame_count = data.index.size
|
||||
if frame_count > 1:
|
||||
duration = data.Vsync_time_ns.iloc[-1] - data.Vsync_time_ns.iloc[0]
|
||||
fps = (1e9 * frame_count) / float(duration)
|
||||
else:
|
||||
duration = 0
|
||||
fps = 0
|
||||
|
||||
csv_file = self._get_csv_file_name(measurements_csv.path)
|
||||
per_frame_fps.to_csv(csv_file, index=False, header=True)
|
||||
|
||||
return [DerivedMetric('fps', fps, 'fps'),
|
||||
DerivedMetric('total_frames', frame_count, 'frames'),
|
||||
MeasurementsCsv(csv_file)]
|
||||
|
||||
|
||||
class DerivedSurfaceFlingerStats(DerivedFpsStats):
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
def _process_with_pandas(self, measurements_csv):
|
||||
data = pd.read_csv(measurements_csv.path)
|
||||
|
||||
# fiter out bogus frames.
|
||||
bogus_frames_filter = data.actual_present_time_us != 0x7fffffffffffffff
|
||||
actual_present_times = data.actual_present_time_us[bogus_frames_filter]
|
||||
actual_present_time_deltas = actual_present_times.diff().dropna()
|
||||
|
||||
vsyncs_to_compose = actual_present_time_deltas.div(VSYNC_INTERVAL)
|
||||
vsyncs_to_compose.apply(lambda x: int(round(x, 0)))
|
||||
|
||||
# drop values lower than drop_threshold FPS as real in-game frame
|
||||
# rate is unlikely to drop below that (except on loading screens
|
||||
# etc, which should not be factored in frame rate calculation).
|
||||
per_frame_fps = (1.0 / (vsyncs_to_compose.multiply(VSYNC_INTERVAL / 1e9)))
|
||||
keep_filter = per_frame_fps > self.drop_threshold
|
||||
filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter]
|
||||
per_frame_fps.name = 'fps'
|
||||
|
||||
csv_file = self._get_csv_file_name(measurements_csv.path)
|
||||
per_frame_fps.to_csv(csv_file, index=False, header=True)
|
||||
|
||||
if not filtered_vsyncs_to_compose.empty:
|
||||
fps = 0
|
||||
total_vsyncs = filtered_vsyncs_to_compose.sum()
|
||||
frame_count = filtered_vsyncs_to_compose.size
|
||||
|
||||
if total_vsyncs:
|
||||
fps = 1e9 * frame_count / (VSYNC_INTERVAL * total_vsyncs)
|
||||
|
||||
janks = self._calc_janks(filtered_vsyncs_to_compose)
|
||||
not_at_vsync = self._calc_not_at_vsync(vsyncs_to_compose)
|
||||
else:
|
||||
fps = 0
|
||||
frame_count = 0
|
||||
janks = 0
|
||||
not_at_vsync = 0
|
||||
|
||||
janks_pc = 0 if frame_count == 0 else janks * 100 / frame_count
|
||||
|
||||
return [DerivedMetric('fps', fps, 'fps'),
|
||||
DerivedMetric('total_frames', frame_count, 'frames'),
|
||||
MeasurementsCsv(csv_file),
|
||||
DerivedMetric('janks', janks, 'count'),
|
||||
DerivedMetric('janks_pc', janks_pc, 'percent'),
|
||||
DerivedMetric('missed_vsync', not_at_vsync, 'count')]
|
||||
|
||||
# pylint: disable=unused-argument,no-self-use
|
||||
def _process_without_pandas(self, measurements_csv):
|
||||
# Given that SurfaceFlinger has been deprecated in favor of GfxInfo,
|
||||
# it does not seem worth it implementing this.
|
||||
raise HostError('Please install "pandas" Python package to process SurfaceFlinger frames')
|
||||
|
||||
@staticmethod
|
||||
def _calc_janks(filtered_vsyncs_to_compose):
|
||||
"""
|
||||
Internal method for calculating jank frames.
|
||||
"""
|
||||
pause_latency = 20
|
||||
vtc_deltas = filtered_vsyncs_to_compose.diff().dropna()
|
||||
vtc_deltas = vtc_deltas.abs()
|
||||
janks = vtc_deltas.apply(lambda x: (pause_latency > x > 1.5) and 1 or 0).sum()
|
||||
|
||||
return janks
|
||||
|
||||
@staticmethod
|
||||
def _calc_not_at_vsync(vsyncs_to_compose):
|
||||
"""
|
||||
Internal method for calculating the number of frames that did not
|
||||
render in a single vsync cycle.
|
||||
"""
|
||||
epsilon = 0.0001
|
||||
func = lambda x: (abs(x - 1.0) > epsilon) and 1 or 0
|
||||
not_at_vsync = vsyncs_to_compose.apply(func).sum()
|
||||
|
||||
return not_at_vsync
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
# Copyright 2013-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,9 +13,30 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
class DevlibError(Exception):
|
||||
"""Base class for all Devlib exceptions."""
|
||||
@property
|
||||
def message(self):
|
||||
if self.args:
|
||||
return self.args[0]
|
||||
return str(self)
|
||||
|
||||
|
||||
class DevlibStableError(DevlibError):
|
||||
"""Non transient target errors, that are not subject to random variations
|
||||
in the environment and can be reliably linked to for example a missing
|
||||
feature on a target."""
|
||||
pass
|
||||
|
||||
|
||||
class DevlibTransientError(DevlibError):
|
||||
"""Exceptions inheriting from ``DevlibTransientError`` represent random
|
||||
transient events that are usually related to issues in the environment, as
|
||||
opposed to programming errors, for example network failures or
|
||||
timeout-related exceptions. When the error could come from
|
||||
indistinguishable transient or non-transient issue, it can generally be
|
||||
assumed that the configuration is correct and therefore, a transient
|
||||
exception is raised."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -24,11 +45,22 @@ class TargetError(DevlibError):
|
||||
pass
|
||||
|
||||
|
||||
class TargetNotRespondingError(DevlibError):
|
||||
"""The target is unresponsive."""
|
||||
class TargetTransientError(TargetError, DevlibTransientError):
|
||||
"""Transient target errors that can happen randomly when everything is
|
||||
properly configured."""
|
||||
pass
|
||||
|
||||
def __init__(self, target):
|
||||
super(TargetNotRespondingError, self).__init__('Target {} is not responding.'.format(target))
|
||||
|
||||
class TargetStableError(TargetError, DevlibStableError):
|
||||
"""Non-transient target errors that can be linked to a programming error or
|
||||
a configuration issue, and is not influenced by non-controllable parameters
|
||||
such as network issues."""
|
||||
pass
|
||||
|
||||
|
||||
class TargetNotRespondingError(TargetTransientError):
|
||||
"""The target is unresponsive."""
|
||||
pass
|
||||
|
||||
|
||||
class HostError(DevlibError):
|
||||
@@ -36,7 +68,8 @@ class HostError(DevlibError):
|
||||
pass
|
||||
|
||||
|
||||
class TimeoutError(DevlibError):
|
||||
# pylint: disable=redefined-builtin
|
||||
class TimeoutError(DevlibTransientError):
|
||||
"""Raised when a subprocess command times out. This is basically a ``DevlibError``-derived version
|
||||
of ``subprocess.CalledProcessError``, the thinking being that while a timeout could be due to
|
||||
programming error (e.g. not setting long enough timers), it is often due to some failure in the
|
||||
@@ -49,3 +82,52 @@ class TimeoutError(DevlibError):
|
||||
|
||||
def __str__(self):
|
||||
return '\n'.join([self.message, 'OUTPUT:', self.output or ''])
|
||||
|
||||
|
||||
class WorkerThreadError(DevlibError):
|
||||
"""
|
||||
This should get raised in the main thread if a non-WAError-derived
|
||||
exception occurs on a worker/background thread. If a WAError-derived
|
||||
exception is raised in the worker, then it that exception should be
|
||||
re-raised on the main thread directly -- the main point of this is to
|
||||
preserve the backtrace in the output, and backtrace doesn't get output for
|
||||
WAErrors.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, thread, exc_info):
|
||||
self.thread = thread
|
||||
self.exc_info = exc_info
|
||||
orig = self.exc_info[1]
|
||||
orig_name = type(orig).__name__
|
||||
message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread)
|
||||
message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
|
||||
super(WorkerThreadError, self).__init__(message)
|
||||
|
||||
|
||||
class KernelConfigKeyError(KeyError, IndexError, DevlibError):
|
||||
"""
|
||||
Exception raised when a kernel config option cannot be found.
|
||||
|
||||
It inherits from :exc:`IndexError` for backward compatibility, and
|
||||
:exc:`KeyError` to behave like a regular mapping.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def get_traceback(exc=None):
|
||||
"""
|
||||
Returns the string with the traceback for the specifiec exc
|
||||
object, or for the current exception exc is not specified.
|
||||
|
||||
"""
|
||||
import io, traceback, sys # pylint: disable=multiple-imports
|
||||
if exc is None:
|
||||
exc = sys.exc_info()
|
||||
if not exc:
|
||||
return None
|
||||
tb = exc[2]
|
||||
sio = io.BytesIO()
|
||||
traceback.print_tb(tb, file=sio)
|
||||
del tb # needs to be done explicitly see: http://docs.python.org/2/library/sys.html#sys.exc_info
|
||||
return sio.getvalue()
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2017 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,21 +14,31 @@
|
||||
#
|
||||
from glob import iglob
|
||||
import os
|
||||
import signal
|
||||
import shutil
|
||||
import subprocess
|
||||
import logging
|
||||
from distutils.dir_util import copy_tree
|
||||
from getpass import getpass
|
||||
from pipes import quote
|
||||
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetTransientError, TargetStableError
|
||||
from devlib.utils.misc import check_output
|
||||
|
||||
PACKAGE_BIN_DIRECTORY = os.path.join(os.path.dirname(__file__), 'bin')
|
||||
|
||||
# pylint: disable=redefined-outer-name
|
||||
def kill_children(pid, signal=signal.SIGKILL):
|
||||
with open('/proc/{0}/task/{0}/children'.format(pid), 'r') as fd:
|
||||
for cpid in map(int, fd.read().strip().split()):
|
||||
kill_children(cpid, signal)
|
||||
os.kill(cpid, signal)
|
||||
|
||||
class LocalConnection(object):
|
||||
|
||||
name = 'local'
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def __init__(self, platform=None, keep_password=True, unrooted=False,
|
||||
password=None, timeout=None):
|
||||
self.logger = logging.getLogger('local_connection')
|
||||
@@ -47,30 +57,38 @@ class LocalConnection(object):
|
||||
for each_source in iglob(source):
|
||||
shutil.copy(each_source, dest)
|
||||
else:
|
||||
shutil.copy(source, dest)
|
||||
if os.path.isdir(source):
|
||||
# Use distutils to allow copying into an existing directory structure.
|
||||
copy_tree(source, dest)
|
||||
else:
|
||||
shutil.copy(source, dest)
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def execute(self, command, timeout=None, check_exit_code=True,
|
||||
as_root=False, strip_colors=True):
|
||||
as_root=False, strip_colors=True, will_succeed=False):
|
||||
self.logger.debug(command)
|
||||
if as_root:
|
||||
if self.unrooted:
|
||||
raise TargetError('unrooted')
|
||||
raise TargetStableError('unrooted')
|
||||
password = self._get_password()
|
||||
command = 'echo \'{}\' | sudo -S '.format(password) + command
|
||||
command = 'echo {} | sudo -S '.format(quote(password)) + command
|
||||
ignore = None if check_exit_code else 'all'
|
||||
try:
|
||||
return check_output(command, shell=True, timeout=timeout, ignore=ignore)[0]
|
||||
except subprocess.CalledProcessError as e:
|
||||
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'.format(
|
||||
e.returncode, command, e.output)
|
||||
raise TargetError(message)
|
||||
if will_succeed:
|
||||
raise TargetTransientError(message)
|
||||
else:
|
||||
raise TargetStableError(message)
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
if as_root:
|
||||
if self.unrooted:
|
||||
raise TargetError('unrooted')
|
||||
raise TargetStableError('unrooted')
|
||||
password = self._get_password()
|
||||
command = 'echo \'{}\' | sudo -S '.format(password) + command
|
||||
command = 'echo {} | sudo -S '.format(quote(password)) + command
|
||||
return subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True)
|
||||
|
||||
def close(self):
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,11 +12,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import csv
|
||||
from __future__ import division
|
||||
import logging
|
||||
import collections
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.utils.csvutil import csvreader
|
||||
from devlib.utils.types import numeric
|
||||
from devlib.utils.types import identifier
|
||||
|
||||
|
||||
# Channel modes describe what sort of measurement the instrument supports.
|
||||
@@ -24,29 +28,37 @@ from devlib.utils.types import numeric
|
||||
INSTANTANEOUS = 1
|
||||
CONTINUOUS = 2
|
||||
|
||||
MEASUREMENT_TYPES = {} # populated further down
|
||||
|
||||
class MeasurementType(tuple):
|
||||
|
||||
__slots__ = []
|
||||
class MeasurementType(object):
|
||||
|
||||
def __new__(cls, name, units, category=None):
|
||||
return tuple.__new__(cls, (name, units, category))
|
||||
def __init__(self, name, units, category=None, conversions=None):
|
||||
self.name = name
|
||||
self.units = units
|
||||
self.category = category
|
||||
self.conversions = {}
|
||||
if conversions is not None:
|
||||
for key, value in conversions.items():
|
||||
if not callable(value):
|
||||
msg = 'Converter must be callable; got {} "{}"'
|
||||
raise ValueError(msg.format(type(value), value))
|
||||
self.conversions[key] = value
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return tuple.__getitem__(self, 0)
|
||||
|
||||
@property
|
||||
def units(self):
|
||||
return tuple.__getitem__(self, 1)
|
||||
|
||||
@property
|
||||
def category(self):
|
||||
return tuple.__getitem__(self, 2)
|
||||
|
||||
def __getitem__(self, item):
|
||||
raise TypeError()
|
||||
def convert(self, value, to):
|
||||
if isinstance(to, basestring) and to in MEASUREMENT_TYPES:
|
||||
to = MEASUREMENT_TYPES[to]
|
||||
if not isinstance(to, MeasurementType):
|
||||
msg = 'Unexpected conversion target: "{}"'
|
||||
raise ValueError(msg.format(to))
|
||||
if to.name == self.name:
|
||||
return value
|
||||
if not to.name in self.conversions:
|
||||
msg = 'No conversion from {} to {} available'
|
||||
raise ValueError(msg.format(self.name, to.name))
|
||||
return self.conversions[to.name](value)
|
||||
|
||||
# pylint: disable=undefined-variable
|
||||
def __cmp__(self, other):
|
||||
if isinstance(other, MeasurementType):
|
||||
other = other.name
|
||||
@@ -55,24 +67,83 @@ class MeasurementType(tuple):
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
__repr__ = __str__
|
||||
def __repr__(self):
|
||||
if self.category:
|
||||
text = 'MeasurementType({}, {}, {})'
|
||||
return text.format(self.name, self.units, self.category)
|
||||
else:
|
||||
text = 'MeasurementType({}, {})'
|
||||
return text.format(self.name, self.units)
|
||||
|
||||
|
||||
# Standard measures
|
||||
# Standard measures. In order to make sure that downstream data processing is not tied
|
||||
# to particular insturments (e.g. a particular method of mearuing power), instruments
|
||||
# must, where possible, resport their measurments formatted as on of the standard types
|
||||
# defined here.
|
||||
_measurement_types = [
|
||||
MeasurementType('time', 'seconds'),
|
||||
MeasurementType('temperature', 'degrees'),
|
||||
# For whatever reason, the type of measurement could not be established.
|
||||
MeasurementType('unknown', None),
|
||||
|
||||
# Generic measurements
|
||||
MeasurementType('count', 'count'),
|
||||
MeasurementType('percent', 'percent'),
|
||||
|
||||
# Time measurement. While there is typically a single "canonical" unit
|
||||
# used for each type of measurmenent, time may be measured to a wide variety
|
||||
# of events occuring at a wide range of scales. Forcing everying into a
|
||||
# single scale will lead to inefficient and awkward to work with result tables.
|
||||
# Coversion functions between the formats are specified, so that downstream
|
||||
# processors that expect all times time be at a particular scale can automatically
|
||||
# covert without being familar with individual instruments.
|
||||
MeasurementType('time', 'seconds', 'time',
|
||||
conversions={
|
||||
'time_us': lambda x: x * 1e6,
|
||||
'time_ms': lambda x: x * 1e3,
|
||||
'time_ns': lambda x: x * 1e9,
|
||||
}
|
||||
),
|
||||
MeasurementType('time_us', 'microseconds', 'time',
|
||||
conversions={
|
||||
'time': lambda x: x / 1e6,
|
||||
'time_ms': lambda x: x / 1e3,
|
||||
'time_ns': lambda x: x * 1e3,
|
||||
}
|
||||
),
|
||||
MeasurementType('time_ms', 'milliseconds', 'time',
|
||||
conversions={
|
||||
'time': lambda x: x / 1e3,
|
||||
'time_us': lambda x: x * 1e3,
|
||||
'time_ns': lambda x: x * 1e6,
|
||||
}
|
||||
),
|
||||
MeasurementType('time_ns', 'nanoseconds', 'time',
|
||||
conversions={
|
||||
'time': lambda x: x / 1e9,
|
||||
'time_ms': lambda x: x / 1e6,
|
||||
'time_us': lambda x: x / 1e3,
|
||||
}
|
||||
),
|
||||
|
||||
# Measurements related to thermals.
|
||||
MeasurementType('temperature', 'degrees', 'thermal'),
|
||||
|
||||
# Measurements related to power end energy consumption.
|
||||
MeasurementType('power', 'watts', 'power/energy'),
|
||||
MeasurementType('voltage', 'volts', 'power/energy'),
|
||||
MeasurementType('current', 'amps', 'power/energy'),
|
||||
MeasurementType('energy', 'joules', 'power/energy'),
|
||||
|
||||
# Measurments realted to data transfer, e.g. neworking,
|
||||
# memory, or backing storage.
|
||||
MeasurementType('tx', 'bytes', 'data transfer'),
|
||||
MeasurementType('rx', 'bytes', 'data transfer'),
|
||||
MeasurementType('tx/rx', 'bytes', 'data transfer'),
|
||||
|
||||
MeasurementType('fps', 'fps', 'ui render'),
|
||||
MeasurementType('frames', 'frames', 'ui render'),
|
||||
]
|
||||
MEASUREMENT_TYPES = {m.name: m for m in _measurement_types}
|
||||
for m in _measurement_types:
|
||||
MEASUREMENT_TYPES[m.name] = m
|
||||
|
||||
|
||||
class Measurement(object):
|
||||
@@ -91,8 +162,9 @@ class Measurement(object):
|
||||
self.value = value
|
||||
self.channel = channel
|
||||
|
||||
# pylint: disable=undefined-variable
|
||||
def __cmp__(self, other):
|
||||
if isinstance(other, Measurement):
|
||||
if hasattr(other, 'value'):
|
||||
return cmp(self.value, other.value)
|
||||
else:
|
||||
return cmp(self.value, other)
|
||||
@@ -108,28 +180,73 @@ class Measurement(object):
|
||||
|
||||
class MeasurementsCsv(object):
|
||||
|
||||
def __init__(self, path, channels):
|
||||
def __init__(self, path, channels=None, sample_rate_hz=None):
|
||||
self.path = path
|
||||
self.channels = channels
|
||||
self._fh = open(path, 'rb')
|
||||
self.sample_rate_hz = sample_rate_hz
|
||||
if self.channels is None:
|
||||
self._load_channels()
|
||||
headings = [chan.label for chan in self.channels]
|
||||
self.data_tuple = collections.namedtuple('csv_entry',
|
||||
map(identifier, headings))
|
||||
|
||||
def measurements(self):
|
||||
return list(self.itermeasurements())
|
||||
return list(self.iter_measurements())
|
||||
|
||||
def itermeasurements(self):
|
||||
self._fh.seek(0)
|
||||
reader = csv.reader(self._fh)
|
||||
reader.next() # headings
|
||||
for row in reader:
|
||||
def iter_measurements(self):
|
||||
for row in self._iter_rows():
|
||||
values = map(numeric, row)
|
||||
yield [Measurement(v, c) for (v, c) in zip(values, self.channels)]
|
||||
|
||||
def values(self):
|
||||
return list(self.iter_values())
|
||||
|
||||
def iter_values(self):
|
||||
for row in self._iter_rows():
|
||||
values = list(map(numeric, row))
|
||||
yield self.data_tuple(*values)
|
||||
|
||||
def _load_channels(self):
|
||||
header = []
|
||||
with csvreader(self.path) as reader:
|
||||
header = next(reader)
|
||||
|
||||
self.channels = []
|
||||
for entry in header:
|
||||
for mt in MEASUREMENT_TYPES:
|
||||
suffix = '_{}'.format(mt)
|
||||
if entry.endswith(suffix):
|
||||
site = entry[:-len(suffix)]
|
||||
measure = mt
|
||||
break
|
||||
else:
|
||||
if entry in MEASUREMENT_TYPES:
|
||||
site = None
|
||||
measure = entry
|
||||
else:
|
||||
site = entry
|
||||
measure = 'unknown'
|
||||
|
||||
chan = InstrumentChannel(site, measure)
|
||||
self.channels.append(chan)
|
||||
|
||||
# pylint: disable=stop-iteration-return
|
||||
def _iter_rows(self):
|
||||
with csvreader(self.path) as reader:
|
||||
next(reader) # headings
|
||||
for row in reader:
|
||||
yield row
|
||||
|
||||
|
||||
class InstrumentChannel(object):
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
return '{}_{}'.format(self.site, self.kind)
|
||||
if self.site is not None:
|
||||
return '{}_{}'.format(self.site, self.kind)
|
||||
return self.kind
|
||||
|
||||
name = label
|
||||
|
||||
@property
|
||||
def kind(self):
|
||||
@@ -139,8 +256,7 @@ class InstrumentChannel(object):
|
||||
def units(self):
|
||||
return self.measurement_type.units
|
||||
|
||||
def __init__(self, name, site, measurement_type, **attrs):
|
||||
self.name = name
|
||||
def __init__(self, site, measurement_type, **attrs):
|
||||
self.site = site
|
||||
if isinstance(measurement_type, MeasurementType):
|
||||
self.measurement_type = measurement_type
|
||||
@@ -149,7 +265,7 @@ class InstrumentChannel(object):
|
||||
self.measurement_type = MEASUREMENT_TYPES[measurement_type]
|
||||
except KeyError:
|
||||
raise ValueError('Unknown measurement type: {}'.format(measurement_type))
|
||||
for atname, atvalue in attrs.iteritems():
|
||||
for atname, atvalue in attrs.items():
|
||||
setattr(self, atname, atvalue)
|
||||
|
||||
def __str__(self):
|
||||
@@ -175,17 +291,15 @@ class Instrument(object):
|
||||
# channel management
|
||||
|
||||
def list_channels(self):
|
||||
return self.channels.values()
|
||||
return list(self.channels.values())
|
||||
|
||||
def get_channels(self, measure):
|
||||
if hasattr(measure, 'name'):
|
||||
measure = measure.name
|
||||
return [c for c in self.list_channels() if c.kind == measure]
|
||||
|
||||
def add_channel(self, site, measure, name=None, **attrs):
|
||||
if name is None:
|
||||
name = '{}_{}'.format(site, measure)
|
||||
chan = InstrumentChannel(name, site, measure, **attrs)
|
||||
def add_channel(self, site, measure, **attrs):
|
||||
chan = InstrumentChannel(site, measure, **attrs)
|
||||
self.channels[chan.label] = chan
|
||||
|
||||
# initialization and teardown
|
||||
@@ -197,24 +311,26 @@ class Instrument(object):
|
||||
pass
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
if kinds is None and sites is None and channels is None:
|
||||
if channels is not None:
|
||||
if sites is not None or kinds is not None:
|
||||
raise ValueError('sites and kinds should not be set if channels is set')
|
||||
|
||||
try:
|
||||
self.active_channels = [self.channels[ch] for ch in channels]
|
||||
except KeyError as e:
|
||||
msg = 'Unexpected channel "{}"; must be in {}'
|
||||
raise ValueError(msg.format(e, self.channels.keys()))
|
||||
elif sites is None and kinds is None:
|
||||
self.active_channels = sorted(self.channels.values(), key=lambda x: x.label)
|
||||
else:
|
||||
if isinstance(sites, basestring):
|
||||
sites = [sites]
|
||||
if isinstance(kinds, basestring):
|
||||
kinds = [kinds]
|
||||
self.active_channels = []
|
||||
for chan_name in (channels or []):
|
||||
try:
|
||||
self.active_channels.append(self.channels[chan_name])
|
||||
except KeyError:
|
||||
msg = 'Unexpected channel "{}"; must be in {}'
|
||||
raise ValueError(msg.format(chan_name, self.channels.keys()))
|
||||
for chan in self.channels.values():
|
||||
if (kinds is None or chan.kind in kinds) and \
|
||||
(sites is None or chan.site in sites):
|
||||
self.active_channels.append(chan)
|
||||
|
||||
wanted = lambda ch: ((kinds is None or ch.kind in kinds) and
|
||||
(sites is None or ch.site in sites))
|
||||
self.active_channels = list(filter(wanted, self.channels.values()))
|
||||
|
||||
# instantaneous
|
||||
|
||||
@@ -229,5 +345,9 @@ class Instrument(object):
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def get_data(self, outfile):
|
||||
pass
|
||||
|
||||
def get_raw(self):
|
||||
return []
|
||||
|
161
devlib/instrument/acmecape.py
Normal file
161
devlib/instrument/acmecape.py
Normal file
@@ -0,0 +1,161 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#pylint: disable=attribute-defined-outside-init
|
||||
from __future__ import division
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import tempfile
|
||||
import shlex
|
||||
from fcntl import fcntl, F_GETFL, F_SETFL
|
||||
from string import Template
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
from pipes import quote
|
||||
|
||||
from devlib import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import HostError
|
||||
from devlib.utils.csvutil import csvreader, csvwriter
|
||||
from devlib.utils.misc import which
|
||||
|
||||
OUTPUT_CAPTURE_FILE = 'acme-cape.csv'
|
||||
IIOCAP_CMD_TEMPLATE = Template("""
|
||||
${iio_capture} -n ${host} -b ${buffer_size} -c -f ${outfile} ${iio_device}
|
||||
""")
|
||||
|
||||
def _read_nonblock(pipe, size=1024):
|
||||
fd = pipe.fileno()
|
||||
flags = fcntl(fd, F_GETFL)
|
||||
flags |= os.O_NONBLOCK
|
||||
fcntl(fd, F_SETFL, flags)
|
||||
|
||||
output = ''
|
||||
try:
|
||||
while True:
|
||||
output += pipe.read(size)
|
||||
except IOError:
|
||||
pass
|
||||
return output
|
||||
|
||||
|
||||
class AcmeCapeInstrument(Instrument):
|
||||
|
||||
mode = CONTINUOUS
|
||||
|
||||
def __init__(self, target,
|
||||
iio_capture=which('iio-capture'),
|
||||
host='baylibre-acme.local',
|
||||
iio_device='iio:device0',
|
||||
buffer_size=256):
|
||||
super(AcmeCapeInstrument, self).__init__(target)
|
||||
self.iio_capture = iio_capture
|
||||
self.host = host
|
||||
self.iio_device = iio_device
|
||||
self.buffer_size = buffer_size
|
||||
self.sample_rate_hz = 100
|
||||
if self.iio_capture is None:
|
||||
raise HostError('Missing iio-capture binary')
|
||||
self.command = None
|
||||
self.process = None
|
||||
|
||||
self.add_channel('shunt', 'voltage')
|
||||
self.add_channel('bus', 'voltage')
|
||||
self.add_channel('device', 'power')
|
||||
self.add_channel('device', 'current')
|
||||
self.add_channel('timestamp', 'time_ms')
|
||||
|
||||
def __del__(self):
|
||||
if self.process and self.process.pid:
|
||||
self.logger.warning('killing iio-capture process [{}]...'.format(self.process.pid))
|
||||
self.process.kill()
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(AcmeCapeInstrument, self).reset(sites, kinds, channels)
|
||||
self.raw_data_file = tempfile.mkstemp('.csv')[1]
|
||||
params = dict(
|
||||
iio_capture=self.iio_capture,
|
||||
host=self.host,
|
||||
# This must be a string for quote()
|
||||
buffer_size=str(self.buffer_size),
|
||||
iio_device=self.iio_device,
|
||||
outfile=self.raw_data_file
|
||||
)
|
||||
params = {k: quote(v) for k, v in params.items()}
|
||||
self.command = IIOCAP_CMD_TEMPLATE.substitute(**params)
|
||||
self.logger.debug('ACME cape command: {}'.format(self.command))
|
||||
|
||||
def start(self):
|
||||
self.process = Popen(shlex.split(self.command), stdout=PIPE, stderr=STDOUT)
|
||||
|
||||
def stop(self):
|
||||
self.process.terminate()
|
||||
timeout_secs = 10
|
||||
output = ''
|
||||
for _ in range(timeout_secs):
|
||||
if self.process.poll() is not None:
|
||||
break
|
||||
time.sleep(1)
|
||||
else:
|
||||
output += _read_nonblock(self.process.stdout)
|
||||
self.process.kill()
|
||||
self.logger.error('iio-capture did not terminate gracefully')
|
||||
if self.process.poll() is None:
|
||||
msg = 'Could not terminate iio-capture:\n{}'
|
||||
raise HostError(msg.format(output))
|
||||
if self.process.returncode != 15: # iio-capture exits with 15 when killed
|
||||
if sys.version_info[0] == 3:
|
||||
output += self.process.stdout.read().decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
else:
|
||||
output += self.process.stdout.read()
|
||||
self.logger.info('ACME instrument encountered an error, '
|
||||
'you may want to try rebooting the ACME device:\n'
|
||||
' ssh root@{} reboot'.format(self.host))
|
||||
raise HostError('iio-capture exited with an error ({}), output:\n{}'
|
||||
.format(self.process.returncode, output))
|
||||
if not os.path.isfile(self.raw_data_file):
|
||||
raise HostError('Output CSV not generated.')
|
||||
self.process = None
|
||||
|
||||
def get_data(self, outfile):
|
||||
if os.stat(self.raw_data_file).st_size == 0:
|
||||
self.logger.warning('"{}" appears to be empty'.format(self.raw_data_file))
|
||||
return
|
||||
|
||||
all_channels = [c.label for c in self.list_channels()]
|
||||
active_channels = [c.label for c in self.active_channels]
|
||||
active_indexes = [all_channels.index(ac) for ac in active_channels]
|
||||
|
||||
with csvreader(self.raw_data_file, skipinitialspace=True) as reader:
|
||||
with csvwriter(outfile) as writer:
|
||||
writer.writerow(active_channels)
|
||||
|
||||
header = next(reader)
|
||||
ts_index = header.index('timestamp ms')
|
||||
|
||||
|
||||
for row in reader:
|
||||
output_row = []
|
||||
for i in active_indexes:
|
||||
if i == ts_index:
|
||||
# Leave time in ms
|
||||
output_row.append(float(row[i]))
|
||||
else:
|
||||
# Convert rest into standard units.
|
||||
output_row.append(float(row[i])/1000)
|
||||
writer.writerow(output_row)
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
|
||||
def get_raw(self):
|
||||
return [self.raw_data_file]
|
144
devlib/instrument/arm_energy_probe.py
Normal file
144
devlib/instrument/arm_energy_probe.py
Normal file
@@ -0,0 +1,144 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Copyright 2018 Linaro Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
# pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init
|
||||
from __future__ import division
|
||||
import os
|
||||
import subprocess
|
||||
import signal
|
||||
from pipes import quote
|
||||
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import HostError
|
||||
from devlib.utils.csvutil import csvreader, csvwriter
|
||||
from devlib.utils.misc import which
|
||||
|
||||
from devlib.utils.parse_aep import AepParser
|
||||
|
||||
class ArmEnergyProbeInstrument(Instrument):
|
||||
"""
|
||||
Collects power traces using the ARM Energy Probe.
|
||||
|
||||
This instrument requires ``arm-probe`` utility to be installed on the host and be in the PATH.
|
||||
arm-probe is available here:
|
||||
``https://git.linaro.org/tools/arm-probe.git``.
|
||||
|
||||
Details about how to build and use it is available here:
|
||||
``https://git.linaro.org/tools/arm-probe.git/tree/README``
|
||||
|
||||
ARM energy probe (AEP) device can simultaneously collect power from up to 3 power rails and
|
||||
arm-probe utility can record data from several AEP devices simultaneously.
|
||||
|
||||
To connect the energy probe on a rail, connect the white wire to the pin that is closer to the
|
||||
Voltage source and the black wire to the pin that is closer to the load (the SoC or the device
|
||||
you are probing). Between the pins there should be a shunt resistor of known resistance in the
|
||||
range of 5 to 500 mOhm but the voltage on the shunt resistor must stay smaller than 165mV.
|
||||
The resistance of the shunt resistors is a mandatory parameter to be set in the ``config`` file.
|
||||
"""
|
||||
|
||||
mode = CONTINUOUS
|
||||
|
||||
MAX_CHANNELS = 12 # 4 Arm Energy Probes
|
||||
|
||||
def __init__(self, target, config_file='./config-aep', ):
|
||||
super(ArmEnergyProbeInstrument, self).__init__(target)
|
||||
self.arm_probe = which('arm-probe')
|
||||
if self.arm_probe is None:
|
||||
raise HostError('arm-probe must be installed on the host')
|
||||
#todo detect is config file exist
|
||||
self.attributes = ['power', 'voltage', 'current']
|
||||
self.sample_rate_hz = 10000
|
||||
self.config_file = config_file
|
||||
|
||||
self.parser = AepParser()
|
||||
#TODO make it generic
|
||||
topo = self.parser.topology_from_config(self.config_file)
|
||||
for item in topo:
|
||||
if item == 'time':
|
||||
self.add_channel('timestamp', 'time')
|
||||
else:
|
||||
self.add_channel(item, 'power')
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(ArmEnergyProbeInstrument, self).reset(sites, kinds, channels)
|
||||
self.output_directory = tempfile.mkdtemp(prefix='energy_probe')
|
||||
self.output_file_raw = os.path.join(self.output_directory, 'data_raw')
|
||||
self.output_file = os.path.join(self.output_directory, 'data')
|
||||
self.output_file_figure = os.path.join(self.output_directory, 'summary.txt')
|
||||
self.output_file_error = os.path.join(self.output_directory, 'error.log')
|
||||
self.output_fd_error = open(self.output_file_error, 'w')
|
||||
self.command = 'arm-probe --config {} > {}'.format(quote(self.config_file), quote(self.output_file_raw))
|
||||
|
||||
def start(self):
|
||||
self.logger.debug(self.command)
|
||||
self.armprobe = subprocess.Popen(self.command,
|
||||
stderr=self.output_fd_error,
|
||||
preexec_fn=os.setpgrp,
|
||||
shell=True)
|
||||
|
||||
def stop(self):
|
||||
self.logger.debug("kill running arm-probe")
|
||||
os.killpg(self.armprobe.pid, signal.SIGTERM)
|
||||
|
||||
def get_data(self, outfile): # pylint: disable=R0914
|
||||
self.logger.debug("Parse data and compute consumed energy")
|
||||
self.parser.prepare(self.output_file_raw, self.output_file, self.output_file_figure)
|
||||
self.parser.parse_aep()
|
||||
self.parser.unprepare()
|
||||
skip_header = 1
|
||||
|
||||
all_channels = [c.label for c in self.list_channels()]
|
||||
active_channels = [c.label for c in self.active_channels]
|
||||
active_indexes = [all_channels.index(ac) for ac in active_channels]
|
||||
|
||||
with csvreader(self.output_file, delimiter=' ') as reader:
|
||||
with csvwriter(outfile) as writer:
|
||||
for row in reader:
|
||||
if skip_header == 1:
|
||||
writer.writerow(active_channels)
|
||||
skip_header = 0
|
||||
continue
|
||||
if len(row) < len(active_channels):
|
||||
continue
|
||||
# all data are in micro (seconds/watt)
|
||||
new = [float(row[i])/1000000 for i in active_indexes]
|
||||
writer.writerow(new)
|
||||
|
||||
self.output_fd_error.close()
|
||||
shutil.rmtree(self.output_directory)
|
||||
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
|
||||
def get_raw(self):
|
||||
return [self.output_file_raw]
|
557
devlib/instrument/baylibre_acme.py
Normal file
557
devlib/instrument/baylibre_acme.py
Normal file
@@ -0,0 +1,557 @@
|
||||
#pylint: disable=attribute-defined-outside-init
|
||||
|
||||
import collections
|
||||
import functools
|
||||
import re
|
||||
import threading
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
try:
|
||||
import iio
|
||||
except ImportError as e:
|
||||
iio_import_failed = True
|
||||
iio_import_error = e
|
||||
else:
|
||||
iio_import_failed = False
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from devlib import CONTINUOUS, Instrument, HostError, MeasurementsCsv, TargetError
|
||||
from devlib.utils.ssh import SshConnection
|
||||
|
||||
class IIOINA226Channel(object):
|
||||
|
||||
def __init__(self, iio_channel):
|
||||
|
||||
channel_id = iio_channel.id
|
||||
channel_type = iio_channel.attrs['type'].value
|
||||
|
||||
re_measure = r'(?P<measure>\w+)(?P<index>\d*)$'
|
||||
re_dtype = r'le:(?P<sign>\w)(?P<width>\d+)/(?P<size>\d+)>>(?P<align>\d+)'
|
||||
|
||||
match_measure = re.search(re_measure, channel_id)
|
||||
match_dtype = re.search(re_dtype, channel_type)
|
||||
|
||||
if not match_measure:
|
||||
msg = "IIO channel ID '{}' does not match expected RE '{}'"
|
||||
raise ValueError(msg.format(channel_id, re_measure))
|
||||
|
||||
if not match_dtype:
|
||||
msg = "'IIO channel type '{}' does not match expected RE '{}'"
|
||||
raise ValueError(msg.format(channel_type, re_dtype))
|
||||
|
||||
self.measure = match_measure.group('measure')
|
||||
self.iio_dtype = 'int{}'.format(match_dtype.group('width'))
|
||||
self.iio_channel = iio_channel
|
||||
# Data is reported in amps, volts, watts and microseconds:
|
||||
self.iio_scale = (1. if 'scale' not in iio_channel.attrs
|
||||
else float(iio_channel.attrs['scale'].value))
|
||||
self.iio_scale /= 1000
|
||||
# As calls to iio_store_buffer will be blocking and probably coming
|
||||
# from a loop retrieving samples from the ACME, we want to provide
|
||||
# consistency in processing timing between iterations i.e. we want
|
||||
# iio_store_buffer to be o(1) for every call (can't have that with []):
|
||||
self.sample_buffers = collections.deque()
|
||||
|
||||
def iio_store_buffer_samples(self, iio_buffer):
|
||||
# IIO buffers receive and store their data as an interlaced array of
|
||||
# samples from all the IIO channels of the IIO device. The IIO library
|
||||
# provides a reliable function to extract the samples (bytes, actually)
|
||||
# corresponding to a channel from the received buffer; in Python, it is
|
||||
# iio.Channel.read(iio.Buffer).
|
||||
#
|
||||
# NB: As this is called in a potentially tightly timed loop, we do as
|
||||
# little work as possible:
|
||||
self.sample_buffers.append(self.iio_channel.read(iio_buffer))
|
||||
|
||||
def iio_get_samples(self, absolute_timestamps=False):
|
||||
# Up to this point, the data is not interpreted yet i.e. these are
|
||||
# bytearrays. Hence the use of np.dtypes.
|
||||
buffers = [np.frombuffer(b, dtype=self.iio_dtype)
|
||||
for b in self.sample_buffers]
|
||||
|
||||
must_shift = (self.measure == 'timestamp' and not absolute_timestamps)
|
||||
samples = np.concatenate(buffers)
|
||||
return (samples - samples[0] if must_shift else samples) * self.iio_scale
|
||||
|
||||
def iio_forget_samples(self):
|
||||
self.sample_buffers.clear()
|
||||
|
||||
|
||||
# Decorators for the attributes of IIOINA226Instrument:
|
||||
|
||||
def only_set_to(valid_values, dynamic=False):
|
||||
def validating_wrapper(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(self, value):
|
||||
values = (valid_values if not dynamic
|
||||
else getattr(self, valid_values))
|
||||
if value not in values:
|
||||
msg = '{} is invalid; expected values are {}'
|
||||
raise ValueError(msg.format(value, valid_values))
|
||||
return func(self, value)
|
||||
return wrapper
|
||||
return validating_wrapper
|
||||
|
||||
|
||||
def with_input_as(wanted_type):
|
||||
def typecasting_wrapper(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(self, value):
|
||||
return func(self, wanted_type(value))
|
||||
return wrapper
|
||||
return typecasting_wrapper
|
||||
|
||||
|
||||
def _IIODeviceAttr(attr_name, attr_type, writable=False, dyn_vals=None, stat_vals=None):
|
||||
|
||||
def getter(self):
|
||||
return attr_type(self.iio_device.attrs[attr_name].value)
|
||||
|
||||
def setter(self, value):
|
||||
self.iio_device.attrs[attr_name].value = str(attr_type(value))
|
||||
|
||||
if writable and (dyn_vals or stat_vals):
|
||||
vals, dyn = dyn_vals or stat_vals, dyn_vals is not None
|
||||
setter = with_input_as(attr_type)(only_set_to(vals, dyn)(setter))
|
||||
|
||||
return property(getter, setter if writable else None)
|
||||
|
||||
|
||||
def _IIOChannelIntTime(chan_name):
|
||||
|
||||
attr_name, attr_type = 'integration_time', float
|
||||
|
||||
def getter(self):
|
||||
ch = self.iio_device.find_channel(chan_name)
|
||||
return attr_type(ch.attrs[attr_name].value)
|
||||
|
||||
@only_set_to('INTEGRATION_TIMES_AVAILABLE', dynamic=True)
|
||||
@with_input_as(attr_type)
|
||||
def setter(self, value):
|
||||
ch = self.iio_device.find_channel(chan_name)
|
||||
ch.attrs[attr_name].value = str(value)
|
||||
|
||||
return property(getter, setter)
|
||||
|
||||
|
||||
def _setify(x):
|
||||
return {x} if isinstance(x, basestring) else set(x) #Py3: basestring->str
|
||||
|
||||
|
||||
class IIOINA226Instrument(object):
|
||||
|
||||
IIO_DEVICE_NAME = 'ina226'
|
||||
|
||||
def __init__(self, iio_device):
|
||||
|
||||
if iio_device.name != self.IIO_DEVICE_NAME:
|
||||
msg = 'IIO device is {}; expected {}'
|
||||
raise TargetError(msg.format(iio_device.name, self.IIO_DEVICE_NAME))
|
||||
|
||||
self.iio_device = iio_device
|
||||
self.absolute_timestamps = False
|
||||
self.high_resolution = True
|
||||
self.buffer_samples_count = None
|
||||
self.buffer_is_circular = False
|
||||
|
||||
self.collector = None
|
||||
self.work_done = threading.Event()
|
||||
self.collector_exception = None
|
||||
|
||||
self.data = collections.OrderedDict()
|
||||
|
||||
channels = {
|
||||
'timestamp': 'timestamp',
|
||||
'shunt' : 'voltage0',
|
||||
'voltage' : 'voltage1', # bus
|
||||
'power' : 'power2',
|
||||
'current' : 'current3',
|
||||
}
|
||||
self.computable_channels = {'current' : {'shunt'},
|
||||
'power' : {'shunt', 'voltage'}}
|
||||
self.uncomputable_channels = set(channels) - set(self.computable_channels)
|
||||
self.channels = {k: IIOINA226Channel(self.iio_device.find_channel(v))
|
||||
for k, v in channels.items()}
|
||||
# We distinguish between "output" channels (as seen by the user of this
|
||||
# class) and "hardware" channels (as requested from the INA226).
|
||||
# This is necessary because of the 'high_resolution' feature which
|
||||
# requires outputting computed channels:
|
||||
self.active_channels = set() # "hardware" channels
|
||||
self.wanted_channels = set() # "output" channels
|
||||
|
||||
|
||||
# Properties
|
||||
|
||||
OVERSAMPLING_RATIOS_AVAILABLE = (1, 4, 16, 64, 128, 256, 512, 1024)
|
||||
INTEGRATION_TIMES_AVAILABLE = _IIODeviceAttr('integration_time_available',
|
||||
lambda x: tuple(map(float, x.split())))
|
||||
|
||||
sample_rate_hz = _IIODeviceAttr('in_sampling_frequency', int)
|
||||
shunt_resistor = _IIODeviceAttr('in_shunt_resistor' , int, True)
|
||||
oversampling_ratio = _IIODeviceAttr('in_oversampling_ratio', int, True,
|
||||
dyn_vals='OVERSAMPLING_RATIOS_AVAILABLE')
|
||||
|
||||
integration_time_shunt = _IIOChannelIntTime('voltage0')
|
||||
integration_time_bus = _IIOChannelIntTime('voltage1')
|
||||
|
||||
def list_channels(self):
|
||||
return self.channels.keys()
|
||||
|
||||
def activate(self, channels=None):
|
||||
all_channels = set(self.channels)
|
||||
requested_channels = (all_channels if channels is None
|
||||
else _setify(channels))
|
||||
|
||||
unknown = ', '.join(requested_channels - all_channels)
|
||||
if unknown:
|
||||
raise ValueError('Unknown channel(s): {}'.format(unknown))
|
||||
|
||||
self.wanted_channels |= requested_channels
|
||||
|
||||
def deactivate(self, channels=None):
|
||||
unwanted_channels = (self.wanted_channels if channels is None
|
||||
else _setify(channels))
|
||||
|
||||
unknown = ', '.join(unwanted_channels - set(self.channels))
|
||||
if unknown:
|
||||
raise ValueError('Unknown channel(s): {}'.format(unknown))
|
||||
|
||||
unactive = ', '.join(unwanted_channels - self.wanted_channels)
|
||||
if unactive:
|
||||
raise ValueError('Already unactive channel(s): {}'.format(unactive))
|
||||
|
||||
self.wanted_channels -= unwanted_channels
|
||||
|
||||
def sample_collector(self):
|
||||
class Collector(threading.Thread):
|
||||
def run(collector_self):
|
||||
for name, ch in self.channels.items():
|
||||
ch.iio_channel.enabled = (name in self.active_channels)
|
||||
|
||||
samples_count = self.buffer_samples_count or self.sample_rate_hz
|
||||
|
||||
iio_buffer = iio.Buffer(self.iio_device, samples_count,
|
||||
self.buffer_is_circular)
|
||||
# NB: This buffer creates a communication pipe to the
|
||||
# BeagleBone (or is it between the BBB and the ACME?)
|
||||
# that locks down any configuration. The IIO drivers
|
||||
# do not limit access when a buffer exists so that
|
||||
# configuring the INA226 (i.e. accessing iio.Device.attrs
|
||||
# or iio.Channel.attrs from iio.Device.channels i.e.
|
||||
# assigning to or reading from any property of this class
|
||||
# or calling its setup or reset methods) will screw up the
|
||||
# whole system and will require rebooting the BBB-ACME board!
|
||||
|
||||
self.collector_exception = None
|
||||
try:
|
||||
refilled_once = False
|
||||
while not (refilled_once and self.work_done.is_set()):
|
||||
refilled_once = True
|
||||
iio_buffer.refill()
|
||||
for name in self.active_channels:
|
||||
self.channels[name].iio_store_buffer_samples(iio_buffer)
|
||||
except Exception as e:
|
||||
self.collector_exception = e
|
||||
finally:
|
||||
del iio_buffer
|
||||
for ch in self.channels.values():
|
||||
ch.enabled = False
|
||||
|
||||
return Collector()
|
||||
|
||||
def start_capturing(self):
|
||||
if not self.wanted_channels:
|
||||
raise TargetError('No active channel: aborting.')
|
||||
|
||||
self.active_channels = self.wanted_channels.copy()
|
||||
if self.high_resolution:
|
||||
self.active_channels &= self.uncomputable_channels
|
||||
for channel, dependencies in self.computable_channels.items():
|
||||
if channel in self.wanted_channels:
|
||||
self.active_channels |= dependencies
|
||||
|
||||
self.work_done.clear()
|
||||
self.collector = self.sample_collector()
|
||||
self.collector.daemon = True
|
||||
self.collector.start()
|
||||
|
||||
def stop_capturing(self):
|
||||
self.work_done.set()
|
||||
self.collector.join()
|
||||
|
||||
if self.collector_exception:
|
||||
raise self.collector_exception
|
||||
|
||||
self.data.clear()
|
||||
for channel in self.active_channels:
|
||||
ch = self.channels[channel]
|
||||
self.data[channel] = ch.iio_get_samples(self.absolute_timestamps)
|
||||
ch.iio_forget_samples()
|
||||
|
||||
if self.high_resolution:
|
||||
res_ohm = 1e-6 * self.shunt_resistor
|
||||
current = self.data['shunt'] / res_ohm
|
||||
if 'current' in self.wanted_channels:
|
||||
self.data['current'] = current
|
||||
if 'power' in self.wanted_channels:
|
||||
self.data['power'] = current * self.data['voltage']
|
||||
for channel in set(self.data) - self.wanted_channels:
|
||||
del self.data[channel]
|
||||
|
||||
self.active_channels.clear()
|
||||
|
||||
def get_data(self):
|
||||
return self.data
|
||||
|
||||
|
||||
class BaylibreAcmeInstrument(Instrument):
|
||||
|
||||
mode = CONTINUOUS
|
||||
|
||||
MINIMAL_ACME_SD_IMAGE_VERSION = (2, 1, 3)
|
||||
MINIMAL_ACME_IIO_DRIVERS_VERSION = (0, 6)
|
||||
MINIMAL_HOST_IIO_DRIVERS_VERSION = (0, 15)
|
||||
|
||||
def __init__(self, target=None, iio_context=None,
|
||||
use_base_iio_context=False, probe_names=None):
|
||||
|
||||
if iio_import_failed:
|
||||
raise HostError('Could not import "iio": {}'.format(iio_import_error))
|
||||
|
||||
super(BaylibreAcmeInstrument, self).__init__(target)
|
||||
|
||||
if isinstance(probe_names, basestring):
|
||||
probe_names = [probe_names]
|
||||
|
||||
self.iio_context = (iio_context if not use_base_iio_context
|
||||
else iio.Context(iio_context))
|
||||
|
||||
self.check_version()
|
||||
|
||||
if probe_names is not None:
|
||||
if len(probe_names) != len(set(probe_names)):
|
||||
msg = 'Probe names should be unique: {}'
|
||||
raise ValueError(msg.format(probe_names))
|
||||
|
||||
if len(probe_names) != len(self.iio_context.devices):
|
||||
msg = ('There should be as many probe_names ({}) '
|
||||
'as detected probes ({}).')
|
||||
raise ValueError(msg.format(len(probe_names),
|
||||
len(self.iio_context.devices)))
|
||||
|
||||
probes = [IIOINA226Instrument(d) for d in self.iio_context.devices]
|
||||
|
||||
self.probes = (dict(zip(probe_names, probes)) if probe_names
|
||||
else {p.iio_device.id : p for p in probes})
|
||||
self.active_probes = set()
|
||||
|
||||
for probe in self.probes:
|
||||
for measure in ['voltage', 'power', 'current']:
|
||||
self.add_channel(site=probe, measure=measure)
|
||||
self.add_channel('timestamp', 'time_us')
|
||||
|
||||
self.data = pd.DataFrame()
|
||||
|
||||
def check_version(self):
|
||||
msg = ('The IIO drivers running on {} ({}) are out-of-date; '
|
||||
'devlib requires {} or later.')
|
||||
|
||||
if iio.version[:2] < self.MINIMAL_HOST_IIO_DRIVERS_VERSION:
|
||||
ver_str = '.'.join(map(str, iio.version[:2]))
|
||||
min_str = '.'.join(map(str, self.MINIMAL_HOST_IIO_DRIVERS_VERSION))
|
||||
raise HostError(msg.format('this host', ver_str, min_str))
|
||||
|
||||
if self.version[:2] < self.MINIMAL_ACME_IIO_DRIVERS_VERSION:
|
||||
ver_str = '.'.join(map(str, self.version[:2]))
|
||||
min_str = '.'.join(map(str, self.MINIMAL_ACME_IIO_DRIVERS_VERSION))
|
||||
raise TargetError(msg.format('the BBB', ver_str, min_str))
|
||||
|
||||
# properties
|
||||
|
||||
def probes_unique_property(self, property_name):
|
||||
probes = self.active_probes or self.probes
|
||||
try:
|
||||
# This will fail if there is not exactly one single value:
|
||||
(value,) = {getattr(self.probes[p], property_name) for p in probes}
|
||||
except ValueError:
|
||||
msg = 'Probes have different values for {}.'
|
||||
raise ValueError(msg.format(property_name) if probes else 'No probe')
|
||||
return value
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
return self.iio_context.version
|
||||
|
||||
@property
|
||||
def OVERSAMPLING_RATIOS_AVAILABLE(self):
|
||||
return self.probes_unique_property('OVERSAMPLING_RATIOS_AVAILABLE')
|
||||
|
||||
@property
|
||||
def INTEGRATION_TIMES_AVAILABLE(self):
|
||||
return self.probes_unique_property('INTEGRATION_TIMES_AVAILABLE')
|
||||
|
||||
@property
|
||||
def sample_rate_hz(self):
|
||||
return self.probes_unique_property('sample_rate_hz')
|
||||
|
||||
@sample_rate_hz.setter
|
||||
# This setter is required for compliance with the inherited methods
|
||||
def sample_rate_hz(self, value):
|
||||
if value is not None:
|
||||
raise AttributeError("can't set attribute")
|
||||
|
||||
# initialization and teardown
|
||||
|
||||
def setup(self, shunt_resistor,
|
||||
integration_time_bus,
|
||||
integration_time_shunt,
|
||||
oversampling_ratio,
|
||||
buffer_samples_count=None,
|
||||
buffer_is_circular=False,
|
||||
absolute_timestamps=False,
|
||||
high_resolution=True):
|
||||
|
||||
def pseudo_list(v, i):
|
||||
try:
|
||||
return v[i]
|
||||
except TypeError:
|
||||
return v
|
||||
|
||||
for i, p in enumerate(self.probes.values()):
|
||||
for attr, val in locals().items():
|
||||
if attr != 'self':
|
||||
setattr(p, attr, pseudo_list(val, i))
|
||||
|
||||
self.absolute_timestamps = all(pseudo_list(absolute_timestamps, i)
|
||||
for i in range(len(self.probes)))
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
|
||||
# populate self.active_channels:
|
||||
super(BaylibreAcmeInstrument, self).reset(sites, kinds, channels)
|
||||
|
||||
for ch in self.active_channels:
|
||||
if ch.site != 'timestamp':
|
||||
self.probes[ch.site].activate(['timestamp', ch.kind])
|
||||
self.active_probes.add(ch.site)
|
||||
|
||||
def teardown(self):
|
||||
del self.active_channels[:]
|
||||
self.active_probes.clear()
|
||||
|
||||
def start(self):
|
||||
for p in self.active_probes:
|
||||
self.probes[p].start_capturing()
|
||||
|
||||
def stop(self):
|
||||
for p in self.active_probes:
|
||||
self.probes[p].stop_capturing()
|
||||
|
||||
max_rate_probe = max(self.active_probes,
|
||||
key=lambda p: self.probes[p].sample_rate_hz)
|
||||
|
||||
probes_dataframes = {
|
||||
probe: pd.DataFrame.from_dict(self.probes[probe].get_data())
|
||||
.set_index('timestamp')
|
||||
for probe in self.active_probes
|
||||
}
|
||||
|
||||
for df in probes_dataframes.values():
|
||||
df.set_index(pd.to_datetime(df.index, unit='us'), inplace=True)
|
||||
|
||||
final_index = probes_dataframes[max_rate_probe].index
|
||||
|
||||
df = pd.concat(probes_dataframes, axis=1).sort_index()
|
||||
df.columns = ['_'.join(c).strip() for c in df.columns.values]
|
||||
|
||||
self.data = df.interpolate('time').reindex(final_index)
|
||||
|
||||
if not self.absolute_timestamps:
|
||||
epoch_index = self.data.index.astype(np.int64) // 1000
|
||||
self.data.set_index(epoch_index, inplace=True)
|
||||
# self.data.index is in [us]
|
||||
# columns are in volts, amps and watts
|
||||
|
||||
def get_data(self, outfile=None, **to_csv_kwargs):
|
||||
if outfile is None:
|
||||
return self.data
|
||||
|
||||
self.data.to_csv(outfile, **to_csv_kwargs)
|
||||
return MeasurementsCsv(outfile, self.active_channels)
|
||||
|
||||
class BaylibreAcmeLocalInstrument(BaylibreAcmeInstrument):
|
||||
|
||||
def __init__(self, target=None, probe_names=None):
|
||||
|
||||
if iio_import_failed:
|
||||
raise HostError('Could not import "iio": {}'.format(iio_import_error))
|
||||
|
||||
super(BaylibreAcmeLocalInstrument, self).__init__(
|
||||
target=target,
|
||||
iio_context=iio.LocalContext(),
|
||||
probe_names=probe_names
|
||||
)
|
||||
|
||||
class BaylibreAcmeXMLInstrument(BaylibreAcmeInstrument):
|
||||
|
||||
def __init__(self, target=None, xmlfile=None, probe_names=None):
|
||||
|
||||
if iio_import_failed:
|
||||
raise HostError('Could not import "iio": {}'.format(iio_import_error))
|
||||
|
||||
super(BaylibreAcmeXMLInstrument, self).__init__(
|
||||
target=target,
|
||||
iio_context=iio.XMLContext(xmlfile),
|
||||
probe_names=probe_names
|
||||
)
|
||||
|
||||
class BaylibreAcmeNetworkInstrument(BaylibreAcmeInstrument):
|
||||
|
||||
def __init__(self, target=None, hostname=None, probe_names=None):
|
||||
|
||||
if iio_import_failed:
|
||||
raise HostError('Could not import "iio": {}'.format(iio_import_error))
|
||||
|
||||
super(BaylibreAcmeNetworkInstrument, self).__init__(
|
||||
target=target,
|
||||
iio_context=iio.NetworkContext(hostname),
|
||||
probe_names=probe_names
|
||||
)
|
||||
|
||||
try:
|
||||
self.ssh_connection = SshConnection(hostname, username='root', password=None)
|
||||
except TargetError as e:
|
||||
msg = 'No SSH connexion could be established to {}: {}'
|
||||
self.logger.debug(msg.format(hostname, e))
|
||||
self.ssh_connection = None
|
||||
|
||||
def check_version(self):
|
||||
super(BaylibreAcmeNetworkInstrument, self).check_version()
|
||||
|
||||
cmd = r"""sed -nr 's/^VERSION_ID="(.+)"$/\1/p' < /etc/os-release"""
|
||||
try:
|
||||
ver_str = self._ssh(cmd).rstrip()
|
||||
ver = tuple(map(int, ver_str.split('.')))
|
||||
except Exception as e:
|
||||
self.logger.debug('Unable to verify ACME SD image version through SSH: {}'.format(e))
|
||||
else:
|
||||
if ver < self.MINIMAL_ACME_SD_IMAGE_VERSION:
|
||||
min_str = '.'.join(map(str, self.MINIMAL_ACME_SD_IMAGE_VERSION))
|
||||
msg = ('The ACME SD image for the BBB (ver. {}) is out-of-date; '
|
||||
'devlib requires {} or later.')
|
||||
raise TargetError(msg.format(ver_str, min_str))
|
||||
|
||||
def _ssh(self, cmd=''):
|
||||
"""Connections are assumed to be rare."""
|
||||
if self.ssh_connection is None:
|
||||
raise TargetError('No SSH connection; see log.')
|
||||
return self.ssh_connection.execute(cmd)
|
||||
|
||||
def _reboot(self):
|
||||
"""Always delete the object after calling its _reboot method"""
|
||||
try:
|
||||
self._ssh('reboot')
|
||||
except:
|
||||
pass
|
@@ -1,19 +1,34 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import csv
|
||||
import tempfile
|
||||
from itertools import chain
|
||||
|
||||
from devlib.instrument import Instrument, MeasurementsCsv, CONTINUOUS
|
||||
from devlib.exception import HostError
|
||||
from devlib.utils.csvutil import csvwriter, create_reader
|
||||
from devlib.utils.misc import unique
|
||||
|
||||
try:
|
||||
from daqpower.client import execute_command, Status
|
||||
from daqpower.config import DeviceConfiguration, ServerConfiguration
|
||||
except ImportError, e:
|
||||
except ImportError as e:
|
||||
execute_command, Status = None, None
|
||||
DeviceConfiguration, ServerConfiguration, ConfigurationError = None, None, None
|
||||
import_error_mesg = e.message
|
||||
import_error_mesg = e.args[0] if e.args else str(e)
|
||||
|
||||
|
||||
class DaqInstrument(Instrument):
|
||||
@@ -33,10 +48,11 @@ class DaqInstrument(Instrument):
|
||||
# pylint: disable=no-member
|
||||
super(DaqInstrument, self).__init__(target)
|
||||
self._need_reset = True
|
||||
self._raw_files = []
|
||||
if execute_command is None:
|
||||
raise HostError('Could not import "daqpower": {}'.format(import_error_mesg))
|
||||
if labels is None:
|
||||
labels = ['PORT_{}'.format(i) for i in xrange(len(resistor_values))]
|
||||
labels = ['PORT_{}'.format(i) for i in range(len(resistor_values))]
|
||||
if len(labels) != len(resistor_values):
|
||||
raise ValueError('"labels" and "resistor_values" must be of the same length')
|
||||
self.server_config = ServerConfiguration(host=host,
|
||||
@@ -44,7 +60,8 @@ class DaqInstrument(Instrument):
|
||||
result = self.execute('list_devices')
|
||||
if result.status == Status.OK:
|
||||
if device_id not in result.data:
|
||||
raise ValueError('Device "{}" is not found on the DAQ server.'.format(device_id))
|
||||
msg = 'Device "{}" is not found on the DAQ server. Available devices are: "{}"'
|
||||
raise ValueError(msg.format(device_id, ', '.join(result.data)))
|
||||
elif result.status != Status.OKISH:
|
||||
raise HostError('Problem querying DAQ server: {}'.format(result.message))
|
||||
|
||||
@@ -68,6 +85,7 @@ class DaqInstrument(Instrument):
|
||||
if not result.status == Status.OK: # pylint: disable=no-member
|
||||
raise HostError(result.message)
|
||||
self._need_reset = False
|
||||
self._raw_files = []
|
||||
|
||||
def start(self):
|
||||
if self._need_reset:
|
||||
@@ -86,6 +104,7 @@ class DaqInstrument(Instrument):
|
||||
site = os.path.splitext(entry)[0]
|
||||
path = os.path.join(tempdir, entry)
|
||||
raw_file_map[site] = path
|
||||
self._raw_files.append(path)
|
||||
|
||||
active_sites = unique([c.site for c in self.active_channels])
|
||||
file_handles = []
|
||||
@@ -94,8 +113,8 @@ class DaqInstrument(Instrument):
|
||||
for site in active_sites:
|
||||
try:
|
||||
site_file = raw_file_map[site]
|
||||
fh = open(site_file, 'rb')
|
||||
site_readers[site] = csv.reader(fh)
|
||||
reader, fh = create_reader(site_file)
|
||||
site_readers[site] = reader
|
||||
file_handles.append(fh)
|
||||
except KeyError:
|
||||
message = 'Could not get DAQ trace for {}; Obtained traces are in {}'
|
||||
@@ -103,22 +122,21 @@ class DaqInstrument(Instrument):
|
||||
|
||||
# The first row is the headers
|
||||
channel_order = []
|
||||
for site, reader in site_readers.iteritems():
|
||||
for site, reader in site_readers.items():
|
||||
channel_order.extend(['{}_{}'.format(site, kind)
|
||||
for kind in reader.next()])
|
||||
for kind in next(reader)])
|
||||
|
||||
def _read_next_rows():
|
||||
parts = []
|
||||
for reader in site_readers.itervalues():
|
||||
for reader in site_readers.values():
|
||||
try:
|
||||
parts.extend(reader.next())
|
||||
parts.extend(next(reader))
|
||||
except StopIteration:
|
||||
parts.extend([None, None])
|
||||
return list(chain(parts))
|
||||
|
||||
with open(outfile, 'wb') as wfh:
|
||||
with csvwriter(outfile) as writer:
|
||||
field_names = [c.label for c in self.active_channels]
|
||||
writer = csv.writer(wfh)
|
||||
writer.writerow(field_names)
|
||||
raw_row = _read_next_rows()
|
||||
while any(raw_row):
|
||||
@@ -126,14 +144,16 @@ class DaqInstrument(Instrument):
|
||||
writer.writerow(row)
|
||||
raw_row = _read_next_rows()
|
||||
|
||||
return MeasurementsCsv(outfile, self.active_channels)
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
finally:
|
||||
for fh in file_handles:
|
||||
fh.close()
|
||||
|
||||
def get_raw(self):
|
||||
return self._raw_files
|
||||
|
||||
def teardown(self):
|
||||
self.execute('close')
|
||||
|
||||
def execute(self, command, **kwargs):
|
||||
return execute_command(self.server_config, command, **kwargs)
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,14 +14,16 @@
|
||||
#
|
||||
from __future__ import division
|
||||
import os
|
||||
import csv
|
||||
import signal
|
||||
import tempfile
|
||||
import struct
|
||||
import subprocess
|
||||
import sys
|
||||
from pipes import quote
|
||||
|
||||
from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import HostError
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
from devlib.utils.misc import which
|
||||
|
||||
|
||||
@@ -39,7 +41,7 @@ class EnergyProbeInstrument(Instrument):
|
||||
self.labels = labels
|
||||
else:
|
||||
self.labels = ['PORT_{}'.format(i)
|
||||
for i in xrange(len(resistor_values))]
|
||||
for i in range(len(resistor_values))]
|
||||
self.device_entry = device_entry
|
||||
self.caiman = which('caiman')
|
||||
if self.caiman is None:
|
||||
@@ -52,6 +54,7 @@ class EnergyProbeInstrument(Instrument):
|
||||
self.raw_output_directory = None
|
||||
self.process = None
|
||||
self.sample_rate_hz = 10000 # Determined empirically
|
||||
self.raw_data_file = None
|
||||
|
||||
for label in self.labels:
|
||||
for kind in self.attributes:
|
||||
@@ -63,7 +66,11 @@ class EnergyProbeInstrument(Instrument):
|
||||
parts = ['-r {}:{} '.format(i, int(1000 * rval))
|
||||
for i, rval in enumerate(self.resistor_values)]
|
||||
rstring = ''.join(parts)
|
||||
self.command = '{} -d {} -l {} {}'.format(self.caiman, self.device_entry, rstring, self.raw_output_directory)
|
||||
self.command = '{} -d {} -l {} {}'.format(
|
||||
quote(self.caiman), quote(self.device_entry),
|
||||
rstring, quote(self.raw_output_directory)
|
||||
)
|
||||
self.raw_data_file = None
|
||||
|
||||
def start(self):
|
||||
self.logger.debug(self.command)
|
||||
@@ -78,11 +85,14 @@ class EnergyProbeInstrument(Instrument):
|
||||
self.process.poll()
|
||||
if self.process.returncode is not None:
|
||||
stdout, stderr = self.process.communicate()
|
||||
if sys.version_info[0] == 3:
|
||||
stdout = stdout.decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
stderr = stderr.decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
raise HostError(
|
||||
'Energy Probe: Caiman exited unexpectedly with exit code {}.\n'
|
||||
'stdout:\n{}\nstderr:\n{}'.format(self.process.returncode,
|
||||
stdout, stderr))
|
||||
os.killpg(self.process.pid, signal.SIGTERM)
|
||||
os.killpg(self.process.pid, signal.SIGINT)
|
||||
|
||||
def get_data(self, outfile): # pylint: disable=R0914
|
||||
all_channels = [c.label for c in self.list_channels()]
|
||||
@@ -92,12 +102,11 @@ class EnergyProbeInstrument(Instrument):
|
||||
num_of_ports = len(self.resistor_values)
|
||||
struct_format = '{}I'.format(num_of_ports * self.attributes_per_sample)
|
||||
not_a_full_row_seen = False
|
||||
raw_data_file = os.path.join(self.raw_output_directory, '0000000000')
|
||||
self.raw_data_file = os.path.join(self.raw_output_directory, '0000000000')
|
||||
|
||||
self.logger.debug('Parsing raw data file: {}'.format(raw_data_file))
|
||||
with open(raw_data_file, 'rb') as bfile:
|
||||
with open(outfile, 'wb') as wfh:
|
||||
writer = csv.writer(wfh)
|
||||
self.logger.debug('Parsing raw data file: {}'.format(self.raw_data_file))
|
||||
with open(self.raw_data_file, 'rb') as bfile:
|
||||
with csvwriter(outfile) as writer:
|
||||
writer.writerow(active_channels)
|
||||
while True:
|
||||
data = bfile.read(num_of_ports * self.bytes_per_sample)
|
||||
@@ -109,8 +118,11 @@ class EnergyProbeInstrument(Instrument):
|
||||
writer.writerow(row)
|
||||
except struct.error:
|
||||
if not_a_full_row_seen:
|
||||
self.logger.warn('possibly missaligned caiman raw data, row contained {} bytes'.format(len(data)))
|
||||
self.logger.warning('possibly missaligned caiman raw data, row contained {} bytes'.format(len(data)))
|
||||
continue
|
||||
else:
|
||||
not_a_full_row_seen = True
|
||||
return MeasurementsCsv(outfile, self.active_channels)
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
|
||||
def get_raw(self):
|
||||
return [self.raw_data_file]
|
||||
|
98
devlib/instrument/frames.py
Normal file
98
devlib/instrument/frames.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from __future__ import division
|
||||
from devlib.instrument import (Instrument, CONTINUOUS,
|
||||
MeasurementsCsv, MeasurementType)
|
||||
from devlib.utils.rendering import (GfxinfoFrameCollector,
|
||||
SurfaceFlingerFrameCollector,
|
||||
SurfaceFlingerFrame,
|
||||
read_gfxinfo_columns)
|
||||
|
||||
|
||||
class FramesInstrument(Instrument):
|
||||
|
||||
mode = CONTINUOUS
|
||||
collector_cls = None
|
||||
|
||||
def __init__(self, target, collector_target, period=2, keep_raw=True):
|
||||
super(FramesInstrument, self).__init__(target)
|
||||
self.collector_target = collector_target
|
||||
self.period = period
|
||||
self.keep_raw = keep_raw
|
||||
self.sample_rate_hz = 1 / self.period
|
||||
self.collector = None
|
||||
self.header = None
|
||||
self._need_reset = True
|
||||
self._raw_file = None
|
||||
self._init_channels()
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(FramesInstrument, self).reset(sites, kinds, channels)
|
||||
# pylint: disable=not-callable
|
||||
self.collector = self.collector_cls(self.target, self.period,
|
||||
self.collector_target, self.header)
|
||||
self._need_reset = False
|
||||
self._raw_file = None
|
||||
|
||||
def start(self):
|
||||
if self._need_reset:
|
||||
self.reset()
|
||||
self.collector.start()
|
||||
|
||||
def stop(self):
|
||||
self.collector.stop()
|
||||
self._need_reset = True
|
||||
|
||||
def get_data(self, outfile):
|
||||
if self.keep_raw:
|
||||
self._raw_file = outfile + '.raw'
|
||||
self.collector.process_frames(self._raw_file)
|
||||
active_sites = [chan.label for chan in self.active_channels]
|
||||
self.collector.write_frames(outfile, columns=active_sites)
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
|
||||
def get_raw(self):
|
||||
return [self._raw_file] if self._raw_file else []
|
||||
|
||||
def _init_channels(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class GfxInfoFramesInstrument(FramesInstrument):
|
||||
|
||||
mode = CONTINUOUS
|
||||
collector_cls = GfxinfoFrameCollector
|
||||
|
||||
def _init_channels(self):
|
||||
columns = read_gfxinfo_columns(self.target)
|
||||
for entry in columns:
|
||||
if entry == 'Flags':
|
||||
self.add_channel('Flags', MeasurementType('flags', 'flags'))
|
||||
else:
|
||||
self.add_channel(entry, 'time_ns')
|
||||
self.header = [chan.label for chan in self.channels.values()]
|
||||
|
||||
|
||||
class SurfaceFlingerFramesInstrument(FramesInstrument):
|
||||
|
||||
mode = CONTINUOUS
|
||||
collector_cls = SurfaceFlingerFrameCollector
|
||||
|
||||
def _init_channels(self):
|
||||
for field in SurfaceFlingerFrame._fields:
|
||||
# remove the "_time" from filed names to avoid duplication
|
||||
self.add_channel(field[:-5], 'time_us')
|
||||
self.header = [chan.label for chan in self.channels.values()]
|
78
devlib/instrument/gem5power.py
Normal file
78
devlib/instrument/gem5power.py
Normal file
@@ -0,0 +1,78 @@
|
||||
# Copyright 2017-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import division
|
||||
|
||||
from devlib.platform.gem5 import Gem5SimulationPlatform
|
||||
from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
|
||||
|
||||
class Gem5PowerInstrument(Instrument):
|
||||
'''
|
||||
Instrument enabling power monitoring in gem5
|
||||
'''
|
||||
|
||||
mode = CONTINUOUS
|
||||
roi_label = 'power_instrument'
|
||||
site_mapping = {'timestamp': 'sim_seconds'}
|
||||
|
||||
def __init__(self, target, power_sites):
|
||||
'''
|
||||
Parameter power_sites is a list of gem5 identifiers for power values.
|
||||
One example of such a field:
|
||||
system.cluster0.cores0.power_model.static_power
|
||||
'''
|
||||
if not isinstance(target.platform, Gem5SimulationPlatform):
|
||||
raise TargetStableError('Gem5PowerInstrument requires a gem5 platform')
|
||||
if not target.has('gem5stats'):
|
||||
raise TargetStableError('Gem5StatsModule is not loaded')
|
||||
super(Gem5PowerInstrument, self).__init__(target)
|
||||
|
||||
# power_sites is assumed to be a list later
|
||||
if isinstance(power_sites, list):
|
||||
self.power_sites = power_sites
|
||||
else:
|
||||
self.power_sites = [power_sites]
|
||||
self.add_channel('timestamp', 'time')
|
||||
for field in self.power_sites:
|
||||
self.add_channel(field, 'power')
|
||||
self.target.gem5stats.book_roi(self.roi_label)
|
||||
self.sample_period_ns = 10000000
|
||||
# Sample rate must remain unset as gem5 does not provide samples
|
||||
# at regular intervals therefore the reported timestamp should be used.
|
||||
self.sample_rate_hz = None
|
||||
self.target.gem5stats.start_periodic_dump(0, self.sample_period_ns)
|
||||
self._base_stats_dump = 0
|
||||
|
||||
def start(self):
|
||||
self.target.gem5stats.roi_start(self.roi_label)
|
||||
|
||||
def stop(self):
|
||||
self.target.gem5stats.roi_end(self.roi_label)
|
||||
|
||||
def get_data(self, outfile):
|
||||
active_sites = [c.site for c in self.active_channels]
|
||||
with csvwriter(outfile) as writer:
|
||||
writer.writerow([c.label for c in self.active_channels]) # headers
|
||||
sites_to_match = [self.site_mapping.get(s, s) for s in active_sites]
|
||||
for rec, _ in self.target.gem5stats.match_iter(sites_to_match,
|
||||
[self.roi_label], self._base_stats_dump):
|
||||
writer.writerow([rec[s] for s in sites_to_match])
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(Gem5PowerInstrument, self).reset(sites, kinds, channels)
|
||||
self._base_stats_dump = self.target.gem5stats.next_dump_no()
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2017 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,7 +16,7 @@ from __future__ import division
|
||||
import re
|
||||
|
||||
from devlib.instrument import Instrument, Measurement, INSTANTANEOUS
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetStableError
|
||||
|
||||
|
||||
class HwmonInstrument(Instrument):
|
||||
@@ -35,7 +35,7 @@ class HwmonInstrument(Instrument):
|
||||
|
||||
def __init__(self, target):
|
||||
if not hasattr(target, 'hwmon'):
|
||||
raise TargetError('Target does not support HWMON')
|
||||
raise TargetStableError('Target does not support HWMON')
|
||||
super(HwmonInstrument, self).__init__(target)
|
||||
|
||||
self.logger.debug('Discovering available HWMON sensors...')
|
||||
@@ -45,7 +45,7 @@ class HwmonInstrument(Instrument):
|
||||
measure = self.measure_map.get(ts.kind)[0]
|
||||
if measure:
|
||||
self.logger.debug('\tAdding sensor {}'.format(ts.name))
|
||||
self.add_channel(_guess_site(ts), measure, name=ts.name, sensor=ts)
|
||||
self.add_channel(_guess_site(ts), measure, sensor=ts)
|
||||
else:
|
||||
self.logger.debug('\tSkipping sensor {} (unknown kind "{}")'.format(ts.name, ts.kind))
|
||||
except ValueError:
|
||||
|
@@ -1,23 +1,41 @@
|
||||
import csv
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from subprocess import Popen, PIPE
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import HostError
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
from devlib.utils.misc import which
|
||||
|
||||
INSTALL_INSTRUCTIONS="""
|
||||
|
||||
INSTALL_INSTRUCTIONS = """
|
||||
MonsoonInstrument requires the monsoon.py tool, available from AOSP:
|
||||
|
||||
https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py
|
||||
|
||||
Download this script and put it in your $PATH (or pass it as the monsoon_bin
|
||||
parameter to MonsoonInstrument). `pip install gflags pyserial` to install the
|
||||
dependencies.
|
||||
parameter to MonsoonInstrument). `pip install python-gflags pyserial` to install
|
||||
the dependencies.
|
||||
"""
|
||||
|
||||
|
||||
class MonsoonInstrument(Instrument):
|
||||
"""Instrument for Monsoon Solutions power monitor
|
||||
|
||||
@@ -49,6 +67,7 @@ class MonsoonInstrument(Instrument):
|
||||
|
||||
self.process = None
|
||||
self.output = None
|
||||
self.buffer_file = None
|
||||
|
||||
self.sample_rate_hz = 500
|
||||
self.add_channel('output', 'power')
|
||||
@@ -81,6 +100,9 @@ class MonsoonInstrument(Instrument):
|
||||
process.poll()
|
||||
if process.returncode is not None:
|
||||
stdout, stderr = process.communicate()
|
||||
if sys.version_info[0] == 3:
|
||||
stdout = stdout.encode(sys.stdout.encoding or 'utf-8')
|
||||
stderr = stderr.encode(sys.stdout.encoding or 'utf-8')
|
||||
raise HostError(
|
||||
'Monsoon script exited unexpectedly with exit code {}.\n'
|
||||
'stdout:\n{}\nstderr:\n{}'.format(process.returncode,
|
||||
@@ -88,7 +110,7 @@ class MonsoonInstrument(Instrument):
|
||||
|
||||
process.send_signal(signal.SIGINT)
|
||||
|
||||
stderr = process.stderr.read()
|
||||
stderr = process.stderr.read()
|
||||
|
||||
self.buffer_file.close()
|
||||
with open(self.buffer_file.name) as f:
|
||||
@@ -102,10 +124,9 @@ class MonsoonInstrument(Instrument):
|
||||
if self.process:
|
||||
raise RuntimeError('`get_data` called before `stop`')
|
||||
|
||||
stdout, stderr = self.output
|
||||
stdout, _ = self.output
|
||||
|
||||
with open(outfile, 'wb') as f:
|
||||
writer = csv.writer(f)
|
||||
with csvwriter(outfile) as writer:
|
||||
active_sites = [c.site for c in self.active_channels]
|
||||
|
||||
# Write column headers
|
||||
@@ -129,4 +150,4 @@ class MonsoonInstrument(Instrument):
|
||||
row.append(usb)
|
||||
writer.writerow(row)
|
||||
|
||||
return MeasurementsCsv(outfile, self.active_channels)
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
|
@@ -1,14 +1,30 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
import csv
|
||||
import tempfile
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
from itertools import izip_longest
|
||||
|
||||
from future.moves.itertools import zip_longest
|
||||
|
||||
from devlib.instrument import Instrument, MeasurementsCsv, CONTINUOUS
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.exception import TargetStableError, HostError
|
||||
from devlib.utils.android import ApkInfo
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
|
||||
|
||||
THIS_DIR = os.path.dirname(__file__)
|
||||
@@ -46,10 +62,9 @@ def netstats_to_measurements(netstats):
|
||||
def write_measurements_csv(measurements, filepath):
|
||||
headers = sorted(measurements.keys())
|
||||
columns = [measurements[h] for h in headers]
|
||||
with open(filepath, 'wb') as wfh:
|
||||
writer = csv.writer(wfh)
|
||||
with csvwriter(filepath) as writer:
|
||||
writer.writerow(headers)
|
||||
writer.writerows(izip_longest(*columns))
|
||||
writer.writerows(zip_longest(*columns))
|
||||
|
||||
|
||||
class NetstatsInstrument(Instrument):
|
||||
@@ -69,7 +84,7 @@ class NetstatsInstrument(Instrument):
|
||||
|
||||
"""
|
||||
if target.os != 'android':
|
||||
raise TargetError('netstats insturment only supports Android targets')
|
||||
raise TargetStableError('netstats instrument only supports Android targets')
|
||||
if apk is None:
|
||||
apk = os.path.join(THIS_DIR, 'netstats.apk')
|
||||
if not os.path.isfile(apk):
|
||||
@@ -86,6 +101,7 @@ class NetstatsInstrument(Instrument):
|
||||
self.add_channel(package, 'tx')
|
||||
self.add_channel(package, 'rx')
|
||||
|
||||
# pylint: disable=keyword-arg-before-vararg,arguments-differ
|
||||
def setup(self, force=False, *args, **kwargs):
|
||||
if self.target.package_is_installed(self.package):
|
||||
if force:
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,6 +15,8 @@
|
||||
import logging
|
||||
from inspect import isclass
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.utils.misc import walk_modules
|
||||
from devlib.utils.types import identifier
|
||||
|
||||
@@ -35,6 +37,9 @@ class Module(object):
|
||||
# serial).
|
||||
# 'connected' -- installed when a connection to to the target has been
|
||||
# established. This is the default.
|
||||
# 'setup' -- installed after initial setup of the device has been performed.
|
||||
# This allows the module to utilize assets deployed during the
|
||||
# setup stage for example 'Busybox'.
|
||||
stage = 'connected'
|
||||
|
||||
@staticmethod
|
||||
@@ -56,10 +61,10 @@ class Module(object):
|
||||
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
self.logger = logging.getLogger(self.__class__.__name__)
|
||||
self.logger = logging.getLogger(self.name)
|
||||
|
||||
|
||||
class HardRestModule(Module): # pylint: disable=R0921
|
||||
class HardRestModule(Module):
|
||||
|
||||
kind = 'hard_reset'
|
||||
|
||||
@@ -67,7 +72,7 @@ class HardRestModule(Module): # pylint: disable=R0921
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class BootModule(Module): # pylint: disable=R0921
|
||||
class BootModule(Module):
|
||||
|
||||
kind = 'boot'
|
||||
|
||||
@@ -75,7 +80,7 @@ class BootModule(Module): # pylint: disable=R0921
|
||||
raise NotImplementedError()
|
||||
|
||||
def update(self, **kwargs):
|
||||
for name, value in kwargs.iteritems():
|
||||
for name, value in kwargs.items():
|
||||
if not hasattr(self, name):
|
||||
raise ValueError('Unknown parameter "{}" for {}'.format(name, self.name))
|
||||
self.logger.debug('Updating "{}" to "{}"'.format(name, value))
|
||||
@@ -117,6 +122,6 @@ def register_module(mod):
|
||||
|
||||
def __load_cache():
|
||||
for module in walk_modules('devlib.module'):
|
||||
for obj in vars(module).itervalues():
|
||||
for obj in vars(module).values():
|
||||
if isclass(obj) and issubclass(obj, Module) and obj.name:
|
||||
register_module(obj)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -63,7 +63,7 @@ class FastbootFlashModule(FlashModule):
|
||||
image_bundle = expand_path(image_bundle)
|
||||
to_flash = self._bundle_to_images(image_bundle)
|
||||
to_flash = merge_dicts(to_flash, images or {}, should_normalize=False)
|
||||
for partition, image_path in to_flash.iteritems():
|
||||
for partition, image_path in to_flash.items():
|
||||
self.logger.debug('flashing {}'.format(partition))
|
||||
self._flash_image(self.target, partition, expand_path(image_path))
|
||||
fastboot_command('reboot')
|
||||
@@ -125,4 +125,3 @@ def get_mapping(base_dir, partition_file):
|
||||
HostError('file {} was not found in the bundle or was misplaced'.format(pair[1]))
|
||||
mapping[pair[0]] = image_path
|
||||
return mapping
|
||||
|
||||
|
@@ -1,3 +1,18 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from devlib.module import Module
|
||||
|
||||
|
||||
@@ -44,79 +59,151 @@ class BigLittleModule(Module):
|
||||
# cpufreq
|
||||
|
||||
def list_bigs_frequencies(self):
|
||||
return self.target.cpufreq.list_frequencies(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.list_frequencies(bigs_online[0])
|
||||
|
||||
def list_bigs_governors(self):
|
||||
return self.target.cpufreq.list_governors(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.list_governors(bigs_online[0])
|
||||
|
||||
def list_bigs_governor_tunables(self):
|
||||
return self.target.cpufreq.list_governor_tunables(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.list_governor_tunables(bigs_online[0])
|
||||
|
||||
def list_littles_frequencies(self):
|
||||
return self.target.cpufreq.list_frequencies(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
return self.target.cpufreq.list_frequencies(littles_online[0])
|
||||
|
||||
def list_littles_governors(self):
|
||||
return self.target.cpufreq.list_governors(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
return self.target.cpufreq.list_governors(littles_online[0])
|
||||
|
||||
def list_littles_governor_tunables(self):
|
||||
return self.target.cpufreq.list_governor_tunables(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
return self.target.cpufreq.list_governor_tunables(littles_online[0])
|
||||
|
||||
def get_bigs_governor(self):
|
||||
return self.target.cpufreq.get_governor(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.get_governor(bigs_online[0])
|
||||
|
||||
def get_bigs_governor_tunables(self):
|
||||
return self.target.cpufreq.get_governor_tunables(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.get_governor_tunables(bigs_online[0])
|
||||
|
||||
def get_bigs_frequency(self):
|
||||
return self.target.cpufreq.get_frequency(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.get_frequency(bigs_online[0])
|
||||
|
||||
def get_bigs_min_frequency(self):
|
||||
return self.target.cpufreq.get_min_frequency(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.get_min_frequency(bigs_online[0])
|
||||
|
||||
def get_bigs_max_frequency(self):
|
||||
return self.target.cpufreq.get_max_frequency(self.bigs_online[0])
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.get_max_frequency(bigs_online[0])
|
||||
|
||||
def get_littles_governor(self):
|
||||
return self.target.cpufreq.get_governor(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
return self.target.cpufreq.get_governor(littles_online[0])
|
||||
|
||||
def get_littles_governor_tunables(self):
|
||||
return self.target.cpufreq.get_governor_tunables(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
return self.target.cpufreq.get_governor_tunables(littles_online[0])
|
||||
|
||||
def get_littles_frequency(self):
|
||||
return self.target.cpufreq.get_frequency(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
return self.target.cpufreq.get_frequency(littles_online[0])
|
||||
|
||||
def get_littles_min_frequency(self):
|
||||
return self.target.cpufreq.get_min_frequency(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
return self.target.cpufreq.get_min_frequency(littles_online[0])
|
||||
|
||||
def get_littles_max_frequency(self):
|
||||
return self.target.cpufreq.get_max_frequency(self.littles_online[0])
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
return self.target.cpufreq.get_max_frequency(littles_online[0])
|
||||
|
||||
def set_bigs_governor(self, governor, **kwargs):
|
||||
self.target.cpufreq.set_governor(self.bigs_online[0], governor, **kwargs)
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
self.target.cpufreq.set_governor(bigs_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_governor_tunables(self, governor, **kwargs):
|
||||
self.target.cpufreq.set_governor_tunables(self.bigs_online[0], governor, **kwargs)
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
self.target.cpufreq.set_governor_tunables(bigs_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_frequency(self.bigs_online[0], frequency, exact)
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
self.target.cpufreq.set_frequency(bigs_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_min_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_min_frequency(self.bigs_online[0], frequency, exact)
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
self.target.cpufreq.set_min_frequency(bigs_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_max_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_max_frequency(self.bigs_online[0], frequency, exact)
|
||||
bigs_online = self.bigs_online
|
||||
if bigs_online:
|
||||
self.target.cpufreq.set_max_frequency(bigs_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_littles_governor(self, governor, **kwargs):
|
||||
self.target.cpufreq.set_governor(self.littles_online[0], governor, **kwargs)
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
self.target.cpufreq.set_governor(littles_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_governor_tunables(self, governor, **kwargs):
|
||||
self.target.cpufreq.set_governor_tunables(self.littles_online[0], governor, **kwargs)
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
self.target.cpufreq.set_governor_tunables(littles_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_frequency(self.littles_online[0], frequency, exact)
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
self.target.cpufreq.set_frequency(littles_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_min_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_min_frequency(self.littles_online[0], frequency, exact)
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
self.target.cpufreq.set_min_frequency(littles_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_max_frequency(self, frequency, exact=True):
|
||||
self.target.cpufreq.set_max_frequency(self.littles_online[0], frequency, exact)
|
||||
littles_online = self.littles_online
|
||||
if littles_online:
|
||||
self.target.cpufreq.set_max_frequency(littles_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,10 +14,11 @@
|
||||
#
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
import logging
|
||||
import re
|
||||
from collections import namedtuple
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.utils.misc import list_to_ranges, isiterable
|
||||
from devlib.utils.types import boolean
|
||||
|
||||
@@ -102,7 +103,7 @@ class Controller(object):
|
||||
.format(self.kind))
|
||||
if name not in self._cgroups:
|
||||
self._cgroups[name] = CGroup(self, name, create=False)
|
||||
return self._cgroups[name].existe()
|
||||
return self._cgroups[name].exists()
|
||||
|
||||
def list_all(self):
|
||||
self.logger.debug('Listing groups for %s controller', self.kind)
|
||||
@@ -120,18 +121,20 @@ class Controller(object):
|
||||
cgroups.append(cg)
|
||||
return cgroups
|
||||
|
||||
def move_tasks(self, source, dest, exclude=[]):
|
||||
def move_tasks(self, source, dest, exclude=None):
|
||||
if exclude is None:
|
||||
exclude = []
|
||||
try:
|
||||
srcg = self._cgroups[source]
|
||||
dstg = self._cgroups[dest]
|
||||
except KeyError as e:
|
||||
raise ValueError('Unkown group: {}'.format(e))
|
||||
output = self.target._execute_util(
|
||||
raise ValueError('Unknown group: {}'.format(e))
|
||||
self.target._execute_util( # pylint: disable=protected-access
|
||||
'cgroups_tasks_move {} {} \'{}\''.format(
|
||||
srcg.directory, dstg.directory, exclude),
|
||||
as_root=True)
|
||||
|
||||
def move_all_tasks_to(self, dest, exclude=[]):
|
||||
def move_all_tasks_to(self, dest, exclude=None):
|
||||
"""
|
||||
Move all the tasks to the specified CGroup
|
||||
|
||||
@@ -144,8 +147,10 @@ class Controller(object):
|
||||
tasks.
|
||||
|
||||
:param exclude: list of commands to keep in the root CGroup
|
||||
:type exlude: list(str)
|
||||
:type exclude: list(str)
|
||||
"""
|
||||
if exclude is None:
|
||||
exclude = []
|
||||
|
||||
if isinstance(exclude, str):
|
||||
exclude = [exclude]
|
||||
@@ -168,33 +173,73 @@ class Controller(object):
|
||||
if cgroup != dest:
|
||||
self.move_tasks(cgroup, dest, grep_filters)
|
||||
|
||||
def tasks(self, cgroup):
|
||||
# pylint: disable=too-many-locals
|
||||
def tasks(self, cgroup,
|
||||
filter_tid='',
|
||||
filter_tname='',
|
||||
filter_tcmdline=''):
|
||||
"""
|
||||
Report the tasks that are included in a cgroup. The tasks can be
|
||||
filtered by their tid, tname or tcmdline if filter_tid, filter_tname or
|
||||
filter_tcmdline are defined respectively. In this case, the reported
|
||||
tasks are the ones in the cgroup that match these patterns.
|
||||
|
||||
Example of tasks format:
|
||||
TID,tname,tcmdline
|
||||
903,cameraserver,/system/bin/cameraserver
|
||||
|
||||
:params filter_tid: regexp pattern to filter by TID
|
||||
:type filter_tid: str
|
||||
|
||||
:params filter_tname: regexp pattern to filter by tname
|
||||
:type filter_tname: str
|
||||
|
||||
:params filter_tcmdline: regexp pattern to filter by tcmdline
|
||||
:type filter_tcmdline: str
|
||||
|
||||
:returns: a dictionary in the form: {tid:(tname, tcmdline)}
|
||||
"""
|
||||
if not isinstance(filter_tid, str):
|
||||
raise TypeError('filter_tid should be a str')
|
||||
if not isinstance(filter_tname, str):
|
||||
raise TypeError('filter_tname should be a str')
|
||||
if not isinstance(filter_tcmdline, str):
|
||||
raise TypeError('filter_tcmdline should be a str')
|
||||
try:
|
||||
cg = self._cgroups[cgroup]
|
||||
except KeyError as e:
|
||||
raise ValueError('Unkown group: {}'.format(e))
|
||||
output = self.target._execute_util(
|
||||
raise ValueError('Unknown group: {}'.format(e))
|
||||
output = self.target._execute_util( # pylint: disable=protected-access
|
||||
'cgroups_tasks_in {}'.format(cg.directory),
|
||||
as_root=True)
|
||||
entries = output.splitlines()
|
||||
tasks = {}
|
||||
for task in entries:
|
||||
tid = task.split(',')[0]
|
||||
try:
|
||||
tname = task.split(',')[1]
|
||||
except: continue
|
||||
try:
|
||||
tcmdline = task.split(',')[2]
|
||||
except:
|
||||
fields = task.split(',', 2)
|
||||
nr_fields = len(fields)
|
||||
if nr_fields < 2:
|
||||
continue
|
||||
elif nr_fields == 2:
|
||||
tid_str, tname = fields
|
||||
tcmdline = ''
|
||||
tasks[int(tid)] = (tname, tcmdline)
|
||||
else:
|
||||
tid_str, tname, tcmdline = fields
|
||||
|
||||
if not re.search(filter_tid, tid_str):
|
||||
continue
|
||||
if not re.search(filter_tname, tname):
|
||||
continue
|
||||
if not re.search(filter_tcmdline, tcmdline):
|
||||
continue
|
||||
|
||||
tasks[int(tid_str)] = (tname, tcmdline)
|
||||
return tasks
|
||||
|
||||
def tasks_count(self, cgroup):
|
||||
try:
|
||||
cg = self._cgroups[cgroup]
|
||||
except KeyError as e:
|
||||
raise ValueError('Unkown group: {}'.format(e))
|
||||
raise ValueError('Unknown group: {}'.format(e))
|
||||
output = self.target.execute(
|
||||
'{} wc -l {}/tasks'.format(
|
||||
self.target.busybox, cg.directory),
|
||||
@@ -217,8 +262,9 @@ class CGroup(object):
|
||||
|
||||
# Control cgroup path
|
||||
self.directory = controller.mount_point
|
||||
|
||||
if name != '/':
|
||||
self.directory = self.target.path.join(controller.mount_point, name[1:])
|
||||
self.directory = self.target.path.join(controller.mount_point, name.strip('/'))
|
||||
|
||||
# Setup path for tasks file
|
||||
self.tasks_file = self.target.path.join(self.directory, 'tasks')
|
||||
@@ -236,7 +282,7 @@ class CGroup(object):
|
||||
self.target.execute('[ -d {0} ]'\
|
||||
.format(self.directory), as_root=True)
|
||||
return True
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
return False
|
||||
|
||||
def get(self):
|
||||
@@ -246,7 +292,7 @@ class CGroup(object):
|
||||
self.controller.kind)
|
||||
logging.debug(' %s',
|
||||
self.directory)
|
||||
output = self.target._execute_util(
|
||||
output = self.target._execute_util( # pylint: disable=protected-access
|
||||
'cgroups_get_attributes {} {}'.format(
|
||||
self.directory, self.controller.kind),
|
||||
as_root=True)
|
||||
@@ -262,7 +308,7 @@ class CGroup(object):
|
||||
if isiterable(attrs[idx]):
|
||||
attrs[idx] = list_to_ranges(attrs[idx])
|
||||
# Build attribute path
|
||||
if self.controller._noprefix:
|
||||
if self.controller._noprefix: # pylint: disable=protected-access
|
||||
attr_name = '{}'.format(idx)
|
||||
else:
|
||||
attr_name = '{}.{}'.format(self.controller.kind, idx)
|
||||
@@ -274,7 +320,7 @@ class CGroup(object):
|
||||
# Set the attribute value
|
||||
try:
|
||||
self.target.write_value(path, attrs[idx])
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
# Check if the error is due to a non-existing attribute
|
||||
attrs = self.get()
|
||||
if idx not in attrs:
|
||||
@@ -285,7 +331,7 @@ class CGroup(object):
|
||||
def get_tasks(self):
|
||||
task_ids = self.target.read_value(self.tasks_file).split()
|
||||
logging.debug('Tasks: %s', task_ids)
|
||||
return map(int, task_ids)
|
||||
return list(map(int, task_ids))
|
||||
|
||||
def add_task(self, tid):
|
||||
self.target.write_value(self.tasks_file, tid, verify=False)
|
||||
@@ -323,7 +369,7 @@ class CgroupsModule(Module):
|
||||
|
||||
# Get the list of the available controllers
|
||||
subsys = self.list_subsystems()
|
||||
if len(subsys) == 0:
|
||||
if not subsys:
|
||||
self.logger.warning('No CGroups controller available')
|
||||
return
|
||||
|
||||
@@ -344,9 +390,9 @@ class CgroupsModule(Module):
|
||||
controller = Controller(ss.name, hid, hierarchy[hid])
|
||||
try:
|
||||
controller.mount(self.target, self.cgroup_root)
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
message = 'Failed to mount "{}" controller'
|
||||
raise TargetError(message.format(controller.kind))
|
||||
raise TargetStableError(message.format(controller.kind))
|
||||
self.logger.info(' %-12s : %s', controller.kind,
|
||||
controller.mount_point)
|
||||
self.controllers[ss.name] = controller
|
||||
@@ -354,7 +400,7 @@ class CgroupsModule(Module):
|
||||
def list_subsystems(self):
|
||||
subsystems = []
|
||||
for line in self.target.execute('{} cat /proc/cgroups'\
|
||||
.format(self.target.busybox)).splitlines()[1:]:
|
||||
.format(self.target.busybox), as_root=self.target.is_rooted).splitlines()[1:]:
|
||||
line = line.strip()
|
||||
if not line or line.startswith('#'):
|
||||
continue
|
||||
@@ -380,20 +426,27 @@ class CgroupsModule(Module):
|
||||
:param cgroup: Name of cgroup to run command into
|
||||
:returns: A command to run `cmdline` into `cgroup`
|
||||
"""
|
||||
if not cgroup.startswith('/'):
|
||||
message = 'cgroup name "{}" must start with "/"'.format(cgroup)
|
||||
raise ValueError(message)
|
||||
return 'CGMOUNT={} {} cgroups_run_into {} {}'\
|
||||
.format(self.cgroup_root, self.target.shutils,
|
||||
cgroup, cmdline)
|
||||
|
||||
def run_into(self, cgroup, cmdline):
|
||||
def run_into(self, cgroup, cmdline, as_root=None):
|
||||
"""
|
||||
Run the specified command into the specified CGroup
|
||||
|
||||
:param cmdline: Command to be run into cgroup
|
||||
:param cgroup: Name of cgroup to run command into
|
||||
:param as_root: Specify whether to run the command as root, if not
|
||||
specified will default to whether the target is rooted.
|
||||
:returns: Output of command.
|
||||
"""
|
||||
if as_root is None:
|
||||
as_root = self.target.is_rooted
|
||||
cmd = self.run_into_cmd(cgroup, cmdline)
|
||||
raw_output = self.target.execute(cmd)
|
||||
raw_output = self.target.execute(cmd, as_root=as_root)
|
||||
|
||||
# First line of output comes from shutils; strip it out.
|
||||
return raw_output.split('\n', 1)[1]
|
||||
@@ -404,11 +457,11 @@ class CgroupsModule(Module):
|
||||
A regexps of tasks names can be used to defined tasks which should not
|
||||
be moved.
|
||||
"""
|
||||
return self.target._execute_util(
|
||||
return self.target._execute_util( # pylint: disable=protected-access
|
||||
'cgroups_tasks_move {} {} {}'.format(srcg, dstg, exclude),
|
||||
as_root=True)
|
||||
|
||||
def isolate(self, cpus, exclude=[]):
|
||||
def isolate(self, cpus, exclude=None):
|
||||
"""
|
||||
Remove all userspace tasks from specified CPUs.
|
||||
|
||||
@@ -425,6 +478,8 @@ class CgroupsModule(Module):
|
||||
sandbox is the CGroup of sandboxed CPUs
|
||||
isolated is the CGroup of isolated CPUs
|
||||
"""
|
||||
if exclude is None:
|
||||
exclude = []
|
||||
all_cpus = set(range(self.target.number_of_cpus))
|
||||
sbox_cpus = list(all_cpus - set(cpus))
|
||||
isol_cpus = list(all_cpus - set(sbox_cpus))
|
||||
@@ -443,7 +498,7 @@ class CgroupsModule(Module):
|
||||
|
||||
return sbox_cg, isol_cg
|
||||
|
||||
def freeze(self, exclude=[], thaw=False):
|
||||
def freeze(self, exclude=None, thaw=False):
|
||||
"""
|
||||
Freeze all user-space tasks but the specified ones
|
||||
|
||||
@@ -461,16 +516,20 @@ class CgroupsModule(Module):
|
||||
:type thaw: bool
|
||||
"""
|
||||
|
||||
if exclude is None:
|
||||
exclude = []
|
||||
|
||||
# Create Freezer CGroup
|
||||
freezer = self.controller('freezer')
|
||||
if freezer is None:
|
||||
raise RuntimeError('freezer cgroup controller not present')
|
||||
freezer_cg = freezer.cgroup('/DEVLIB_FREEZER')
|
||||
thawed_cg = freezer.cgroup('/')
|
||||
cmd = 'cgroups_freezer_set_state {{}} {}'.format(freezer_cg.directory)
|
||||
|
||||
if thaw:
|
||||
# Restart froozen tasks
|
||||
freezer_cg.set(state='THAWED')
|
||||
# Restart frozen tasks
|
||||
# pylint: disable=protected-access
|
||||
freezer.target._execute_util(cmd.format('THAWED'), as_root=True)
|
||||
# Remove all tasks from freezer
|
||||
freezer.move_all_tasks_to('/')
|
||||
return
|
||||
@@ -482,7 +541,7 @@ class CgroupsModule(Module):
|
||||
tasks = freezer.tasks('/')
|
||||
|
||||
# Freeze all tasks
|
||||
freezer_cg.set(state='FROZEN')
|
||||
# pylint: disable=protected-access
|
||||
freezer.target._execute_util(cmd.format('FROZEN'), as_root=True)
|
||||
|
||||
return tasks
|
||||
|
||||
|
@@ -37,12 +37,14 @@ class MbedFanActiveCoolingModule(Module):
|
||||
with open_serial_connection(timeout=self.timeout,
|
||||
port=self.port,
|
||||
baudrate=self.baud) as target:
|
||||
# pylint: disable=no-member
|
||||
target.sendline('motor_{}_1'.format(self.fan_pin))
|
||||
|
||||
def stop(self):
|
||||
with open_serial_connection(timeout=self.timeout,
|
||||
port=self.port,
|
||||
baudrate=self.baud) as target:
|
||||
# pylint: disable=no-member
|
||||
target.sendline('motor_{}_0'.format(self.fan_pin))
|
||||
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,8 +12,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from contextlib import contextmanager
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
|
||||
@@ -37,7 +39,7 @@ class CpufreqModule(Module):
|
||||
return True
|
||||
|
||||
# Generic CPUFreq support (single policy)
|
||||
path = '/sys/devices/system/cpu/cpufreq'
|
||||
path = '/sys/devices/system/cpu/cpufreq/policy0'
|
||||
if target.file_exists(path):
|
||||
return True
|
||||
|
||||
@@ -82,7 +84,7 @@ class CpufreqModule(Module):
|
||||
Setting the governor on any core in a cluster will also set it on all
|
||||
other cores in that cluster.
|
||||
|
||||
:raises: TargetError if governor is not supported by the CPU, or if,
|
||||
:raises: TargetStableError if governor is not supported by the CPU, or if,
|
||||
for some reason, the governor could not be set.
|
||||
|
||||
"""
|
||||
@@ -90,11 +92,52 @@ class CpufreqModule(Module):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
supported = self.list_governors(cpu)
|
||||
if governor not in supported:
|
||||
raise TargetError('Governor {} not supported for cpu {}'.format(governor, cpu))
|
||||
raise TargetStableError('Governor {} not supported for cpu {}'.format(governor, cpu))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
|
||||
self.target.write_value(sysfile, governor)
|
||||
self.set_governor_tunables(cpu, governor, **kwargs)
|
||||
|
||||
@contextmanager
|
||||
def use_governor(self, governor, cpus=None, **kwargs):
|
||||
"""
|
||||
Use a given governor, then restore previous governor(s)
|
||||
|
||||
:param governor: Governor to use on all targeted CPUs (see :meth:`set_governor`)
|
||||
:type governor: str
|
||||
|
||||
:param cpus: CPUs affected by the governor change (all by default)
|
||||
:type cpus: list
|
||||
|
||||
:Keyword Arguments: Governor tunables, See :meth:`set_governor_tunables`
|
||||
"""
|
||||
if not cpus:
|
||||
cpus = range(self.target.number_of_cpus)
|
||||
|
||||
# Setting a governor & tunables for a cpu will set them for all cpus
|
||||
# in the same clock domain, so only manipulating one cpu per domain
|
||||
# is enough
|
||||
domains = set(self.get_affected_cpus(cpu)[0] for cpu in cpus)
|
||||
prev_governors = {cpu : (self.get_governor(cpu), self.get_governor_tunables(cpu))
|
||||
for cpu in domains}
|
||||
|
||||
# Special case for userspace, frequency is not seen as a tunable
|
||||
userspace_freqs = {}
|
||||
for cpu, (prev_gov, _) in prev_governors.items():
|
||||
if prev_gov == "userspace":
|
||||
userspace_freqs[cpu] = self.get_frequency(cpu)
|
||||
|
||||
for cpu in domains:
|
||||
self.set_governor(cpu, governor, **kwargs)
|
||||
|
||||
try:
|
||||
yield
|
||||
|
||||
finally:
|
||||
for cpu, (prev_gov, tunables) in prev_governors.items():
|
||||
self.set_governor(cpu, prev_gov, **tunables)
|
||||
if prev_gov == "userspace":
|
||||
self.set_frequency(cpu, userspace_freqs[cpu])
|
||||
|
||||
def list_governor_tunables(self, cpu):
|
||||
"""Returns a list of tunables available for the governor on the specified CPU."""
|
||||
if isinstance(cpu, int):
|
||||
@@ -104,11 +147,11 @@ class CpufreqModule(Module):
|
||||
try:
|
||||
tunables_path = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(cpu, governor)
|
||||
self._governor_tunables[governor] = self.target.list_directory(tunables_path)
|
||||
except TargetError: # probably an older kernel
|
||||
except TargetStableError: # probably an older kernel
|
||||
try:
|
||||
tunables_path = '/sys/devices/system/cpu/cpufreq/{}'.format(governor)
|
||||
self._governor_tunables[governor] = self.target.list_directory(tunables_path)
|
||||
except TargetError: # governor does not support tunables
|
||||
except TargetStableError: # governor does not support tunables
|
||||
self._governor_tunables[governor] = []
|
||||
return self._governor_tunables[governor]
|
||||
|
||||
@@ -122,7 +165,7 @@ class CpufreqModule(Module):
|
||||
try:
|
||||
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
|
||||
tunables[tunable] = self.target.read_value(path)
|
||||
except TargetError: # May be an older kernel
|
||||
except TargetStableError: # May be an older kernel
|
||||
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
|
||||
tunables[tunable] = self.target.read_value(path)
|
||||
return tunables
|
||||
@@ -140,7 +183,7 @@ class CpufreqModule(Module):
|
||||
The rest should be keyword parameters mapping tunable name onto the value to
|
||||
be set for it.
|
||||
|
||||
:raises: TargetError if governor specified is not a valid governor name, or if
|
||||
:raises: TargetStableError if governor specified is not a valid governor name, or if
|
||||
a tunable specified is not valid for the governor, or if could not set
|
||||
tunable.
|
||||
|
||||
@@ -150,12 +193,12 @@ class CpufreqModule(Module):
|
||||
if governor is None:
|
||||
governor = self.get_governor(cpu)
|
||||
valid_tunables = self.list_governor_tunables(cpu)
|
||||
for tunable, value in kwargs.iteritems():
|
||||
for tunable, value in kwargs.items():
|
||||
if tunable in valid_tunables:
|
||||
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
|
||||
try:
|
||||
self.target.write_value(path, value)
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
if self.target.file_exists(path):
|
||||
# File exists but we did something wrong
|
||||
raise
|
||||
@@ -165,7 +208,7 @@ class CpufreqModule(Module):
|
||||
else:
|
||||
message = 'Unexpected tunable {} for governor {} on {}.\n'.format(tunable, governor, cpu)
|
||||
message += 'Available tunables are: {}'.format(valid_tunables)
|
||||
raise TargetError(message)
|
||||
raise TargetStableError(message)
|
||||
|
||||
@memoized
|
||||
def list_frequencies(self, cpu):
|
||||
@@ -176,16 +219,41 @@ class CpufreqModule(Module):
|
||||
try:
|
||||
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu)
|
||||
output = self.target.execute(cmd)
|
||||
available_frequencies = map(int, output.strip().split()) # pylint: disable=E1103
|
||||
except TargetError:
|
||||
available_frequencies = list(map(int, output.strip().split())) # pylint: disable=E1103
|
||||
except TargetStableError:
|
||||
# On some devices scaling_frequencies is not generated.
|
||||
# http://adrynalyne-teachtofish.blogspot.co.uk/2011/11/how-to-enable-scalingavailablefrequenci.html
|
||||
# Fall back to parsing stats/time_in_state
|
||||
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/stats/time_in_state'.format(cpu)
|
||||
out_iter = iter(self.target.execute(cmd).strip().split())
|
||||
available_frequencies = map(int, reversed([f for f, _ in zip(out_iter, out_iter)]))
|
||||
path = '/sys/devices/system/cpu/{}/cpufreq/stats/time_in_state'.format(cpu)
|
||||
try:
|
||||
out_iter = iter(self.target.read_value(path).split())
|
||||
except TargetStableError:
|
||||
if not self.target.file_exists(path):
|
||||
# Probably intel_pstate. Can't get available freqs.
|
||||
return []
|
||||
raise
|
||||
|
||||
available_frequencies = list(map(int, reversed([f for f, _ in zip(out_iter, out_iter)])))
|
||||
return available_frequencies
|
||||
|
||||
@memoized
|
||||
def get_max_available_frequency(self, cpu):
|
||||
"""
|
||||
Returns the maximum available frequency for a given core or None if
|
||||
could not be found.
|
||||
"""
|
||||
freqs = self.list_frequencies(cpu)
|
||||
return max(freqs) if freqs else None
|
||||
|
||||
@memoized
|
||||
def get_min_available_frequency(self, cpu):
|
||||
"""
|
||||
Returns the minimum available frequency for a given core or None if
|
||||
could not be found.
|
||||
"""
|
||||
freqs = self.list_frequencies(cpu)
|
||||
return min(freqs) if freqs else None
|
||||
|
||||
def get_min_frequency(self, cpu):
|
||||
"""
|
||||
Returns the min frequency currently set for the specified CPU.
|
||||
@@ -194,7 +262,7 @@ class CpufreqModule(Module):
|
||||
try to read the minimum frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
@@ -214,7 +282,7 @@ class CpufreqModule(Module):
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: TargetError if the frequency is not supported by the CPU, or if, for
|
||||
:raises: TargetStableError if the frequency is not supported by the CPU, or if, for
|
||||
some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
@@ -225,7 +293,7 @@ class CpufreqModule(Module):
|
||||
try:
|
||||
value = int(frequency)
|
||||
if exact and available_frequencies and value not in available_frequencies:
|
||||
raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
|
||||
@@ -241,7 +309,7 @@ class CpufreqModule(Module):
|
||||
try to read the current frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
@@ -263,7 +331,7 @@ class CpufreqModule(Module):
|
||||
|
||||
on the device (if it exists).
|
||||
|
||||
:raises: TargetError if the frequency is not supported by the CPU, or if, for
|
||||
:raises: TargetStableError if the frequency is not supported by the CPU, or if, for
|
||||
some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
@@ -275,11 +343,11 @@ class CpufreqModule(Module):
|
||||
if exact:
|
||||
available_frequencies = self.list_frequencies(cpu)
|
||||
if available_frequencies and value not in available_frequencies:
|
||||
raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
value,
|
||||
available_frequencies))
|
||||
if self.get_governor(cpu) != 'userspace':
|
||||
raise TargetError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
|
||||
raise TargetStableError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_setspeed'.format(cpu)
|
||||
self.target.write_value(sysfile, value, verify=False)
|
||||
except ValueError:
|
||||
@@ -293,7 +361,7 @@ class CpufreqModule(Module):
|
||||
try to read the maximum frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
@@ -312,7 +380,7 @@ class CpufreqModule(Module):
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: TargetError if the frequency is not supported by the CPU, or if, for
|
||||
:raises: TargetStableError if the frequency is not supported by the CPU, or if, for
|
||||
some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
@@ -323,7 +391,7 @@ class CpufreqModule(Module):
|
||||
try:
|
||||
value = int(frequency)
|
||||
if exact and available_frequencies and value not in available_frequencies:
|
||||
raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
|
||||
@@ -355,6 +423,7 @@ class CpufreqModule(Module):
|
||||
"""
|
||||
Set the specified (minimum) frequency for all the (online) CPUs
|
||||
"""
|
||||
# pylint: disable=protected-access
|
||||
return self.target._execute_util(
|
||||
'cpufreq_set_all_frequencies {}'.format(freq),
|
||||
as_root=True)
|
||||
@@ -363,6 +432,7 @@ class CpufreqModule(Module):
|
||||
"""
|
||||
Get the current frequency for all the (online) CPUs
|
||||
"""
|
||||
# pylint: disable=protected-access
|
||||
output = self.target._execute_util(
|
||||
'cpufreq_get_all_frequencies', as_root=True)
|
||||
frequencies = {}
|
||||
@@ -378,14 +448,17 @@ class CpufreqModule(Module):
|
||||
Set the specified governor for all the (online) CPUs
|
||||
"""
|
||||
try:
|
||||
# pylint: disable=protected-access
|
||||
return self.target._execute_util(
|
||||
'cpufreq_set_all_governors {}'.format(governor),
|
||||
as_root=True)
|
||||
except TargetError as e:
|
||||
if "echo: I/O error" in str(e):
|
||||
except TargetStableError as e:
|
||||
if ("echo: I/O error" in str(e) or
|
||||
"write error: Invalid argument" in str(e)):
|
||||
|
||||
cpus_unsupported = [c for c in self.target.list_online_cpus()
|
||||
if governor not in self.list_governors(c)]
|
||||
raise TargetError("Governor {} unsupported for CPUs {}".format(
|
||||
raise TargetStableError("Governor {} unsupported for CPUs {}".format(
|
||||
governor, cpus_unsupported))
|
||||
else:
|
||||
raise
|
||||
@@ -394,6 +467,7 @@ class CpufreqModule(Module):
|
||||
"""
|
||||
Get the current governor for all the (online) CPUs
|
||||
"""
|
||||
# pylint: disable=protected-access
|
||||
output = self.target._execute_util(
|
||||
'cpufreq_get_all_governors', as_root=True)
|
||||
governors = {}
|
||||
@@ -408,12 +482,12 @@ class CpufreqModule(Module):
|
||||
"""
|
||||
Report current frequencies on trace file
|
||||
"""
|
||||
# pylint: disable=protected-access
|
||||
return self.target._execute_util('cpufreq_trace_all_frequencies', as_root=True)
|
||||
|
||||
@memoized
|
||||
def get_domain_cpus(self, cpu):
|
||||
def get_affected_cpus(self, cpu):
|
||||
"""
|
||||
Get the CPUs that share a frequency domain with the given CPU
|
||||
Get the online CPUs that share a frequency domain with the given CPU
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
@@ -421,3 +495,38 @@ class CpufreqModule(Module):
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/affected_cpus'.format(cpu)
|
||||
|
||||
return [int(c) for c in self.target.read_value(sysfile).split()]
|
||||
|
||||
@memoized
|
||||
def get_related_cpus(self, cpu):
|
||||
"""
|
||||
Get the CPUs that share a frequency domain with the given CPU
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/related_cpus'.format(cpu)
|
||||
|
||||
return [int(c) for c in self.target.read_value(sysfile).split()]
|
||||
|
||||
@memoized
|
||||
def get_driver(self, cpu):
|
||||
"""
|
||||
Get the name of the driver used by this cpufreq policy.
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_driver'.format(cpu)
|
||||
|
||||
return self.target.read_value(sysfile).strip()
|
||||
|
||||
def iter_domains(self):
|
||||
"""
|
||||
Iterate over the frequency domains in the system
|
||||
"""
|
||||
cpus = set(range(self.target.number_of_cpus))
|
||||
while cpus:
|
||||
cpu = next(iter(cpus)) # pylint: disable=stop-iteration-return
|
||||
domain = self.target.cpufreq.get_related_cpus(cpu)
|
||||
yield domain
|
||||
cpus = cpus.difference(domain)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -13,8 +13,9 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.utils.misc import memoized
|
||||
from devlib.utils.types import integer, boolean
|
||||
|
||||
|
||||
@@ -41,51 +42,18 @@ class CpuidleState(object):
|
||||
raise ValueError('invalid idle state name: "{}"'.format(self.id))
|
||||
return int(self.id[i:])
|
||||
|
||||
def __init__(self, target, index, path):
|
||||
def __init__(self, target, index, path, name, desc, power, latency, residency):
|
||||
self.target = target
|
||||
self.index = index
|
||||
self.path = path
|
||||
self.name = name
|
||||
self.desc = desc
|
||||
self.power = power
|
||||
self.latency = latency
|
||||
self.residency = residency
|
||||
self.id = self.target.path.basename(self.path)
|
||||
self.cpu = self.target.path.basename(self.target.path.dirname(path))
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def desc(self):
|
||||
return self.get('desc')
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def name(self):
|
||||
return self.get('name')
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def latency(self):
|
||||
"""Exit latency in uS"""
|
||||
return self.get('latency')
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def power(self):
|
||||
"""Power usage in mW
|
||||
|
||||
..note::
|
||||
|
||||
This value is not always populated by the kernel and may be garbage.
|
||||
"""
|
||||
return self.get('power')
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def target_residency(self):
|
||||
"""Target residency in uS
|
||||
|
||||
This is the amount of time in the state required to 'break even' on
|
||||
power - the system should avoid entering the state for less time than
|
||||
this.
|
||||
"""
|
||||
return self.get('residency')
|
||||
|
||||
def enable(self):
|
||||
self.set('disable', 0)
|
||||
|
||||
@@ -126,23 +94,47 @@ class Cpuidle(Module):
|
||||
def probe(target):
|
||||
return target.file_exists(Cpuidle.root_path)
|
||||
|
||||
def get_driver(self):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_driver'))
|
||||
def __init__(self, target):
|
||||
super(Cpuidle, self).__init__(target)
|
||||
self._states = {}
|
||||
|
||||
def get_governor(self):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_governor_ro'))
|
||||
basepath = '/sys/devices/system/cpu/'
|
||||
values_tree = self.target.read_tree_values(basepath, depth=4, check_exit_code=False)
|
||||
i = 0
|
||||
cpu_id = 'cpu{}'.format(i)
|
||||
while cpu_id in values_tree:
|
||||
cpu_node = values_tree[cpu_id]
|
||||
|
||||
if 'cpuidle' in cpu_node:
|
||||
idle_node = cpu_node['cpuidle']
|
||||
self._states[cpu_id] = []
|
||||
j = 0
|
||||
state_id = 'state{}'.format(j)
|
||||
while state_id in idle_node:
|
||||
state_node = idle_node[state_id]
|
||||
state = CpuidleState(
|
||||
self.target,
|
||||
index=j,
|
||||
path=self.target.path.join(basepath, cpu_id, 'cpuidle', state_id),
|
||||
name=state_node['name'],
|
||||
desc=state_node['desc'],
|
||||
power=int(state_node['power']),
|
||||
latency=int(state_node['latency']),
|
||||
residency=int(state_node['residency']) if 'residency' in state_node else None,
|
||||
)
|
||||
msg = 'Adding {} state {}: {} {}'
|
||||
self.logger.debug(msg.format(cpu_id, j, state.name, state.desc))
|
||||
self._states[cpu_id].append(state)
|
||||
j += 1
|
||||
state_id = 'state{}'.format(j)
|
||||
|
||||
i += 1
|
||||
cpu_id = 'cpu{}'.format(i)
|
||||
|
||||
@memoized
|
||||
def get_states(self, cpu=0):
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
states_dir = self.target.path.join(self.target.path.dirname(self.root_path), cpu, 'cpuidle')
|
||||
idle_states = []
|
||||
for state in self.target.list_directory(states_dir):
|
||||
if state.startswith('state'):
|
||||
index = int(state[5:])
|
||||
idle_states.append(CpuidleState(self.target, index, self.target.path.join(states_dir, state)))
|
||||
return idle_states
|
||||
return self._states.get(cpu, [])
|
||||
|
||||
def get_state(self, state, cpu=0):
|
||||
if isinstance(state, int):
|
||||
@@ -174,5 +166,11 @@ class Cpuidle(Module):
|
||||
"""
|
||||
Momentarily wake each CPU. Ensures cpu_idle events in trace file.
|
||||
"""
|
||||
output = self.target._execute_util('cpuidle_wake_all_cpus')
|
||||
print(output)
|
||||
# pylint: disable=protected-access
|
||||
self.target._execute_util('cpuidle_wake_all_cpus')
|
||||
|
||||
def get_driver(self):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_driver'))
|
||||
|
||||
def get_governor(self):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_governor_ro'))
|
||||
|
260
devlib/module/devfreq.py
Normal file
260
devlib/module/devfreq.py
Normal file
@@ -0,0 +1,260 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from devlib.module import Module
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
class DevfreqModule(Module):
|
||||
|
||||
name = 'devfreq'
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
path = '/sys/class/devfreq/'
|
||||
if not target.file_exists(path):
|
||||
return False
|
||||
|
||||
# Check that at least one policy is implemented
|
||||
if not target.list_directory(path):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@memoized
|
||||
def list_devices(self):
|
||||
"""Returns a list of devfreq devices supported by the target platform."""
|
||||
sysfile = '/sys/class/devfreq/'
|
||||
return self.target.list_directory(sysfile)
|
||||
|
||||
@memoized
|
||||
def list_governors(self, device):
|
||||
"""Returns a list of governors supported by the device."""
|
||||
sysfile = '/sys/class/devfreq/{}/available_governors'.format(device)
|
||||
output = self.target.read_value(sysfile)
|
||||
return output.strip().split()
|
||||
|
||||
def get_governor(self, device):
|
||||
"""Returns the governor currently set for the specified device."""
|
||||
if isinstance(device, int):
|
||||
device = 'device{}'.format(device)
|
||||
sysfile = '/sys/class/devfreq/{}/governor'.format(device)
|
||||
return self.target.read_value(sysfile)
|
||||
|
||||
def set_governor(self, device, governor):
|
||||
"""
|
||||
Set the governor for the specified device.
|
||||
|
||||
:param device: The device for which the governor is to be set. This must be
|
||||
the full name as it appears in sysfs, e.g. "e82c0000.mali".
|
||||
:param governor: The name of the governor to be used. This must be
|
||||
supported by the specific device.
|
||||
|
||||
Additional keyword arguments can be used to specify governor tunables for
|
||||
governors that support them.
|
||||
|
||||
:raises: TargetStableError if governor is not supported by the device, or if,
|
||||
for some reason, the governor could not be set.
|
||||
|
||||
"""
|
||||
supported = self.list_governors(device)
|
||||
if governor not in supported:
|
||||
raise TargetStableError('Governor {} not supported for device {}'.format(governor, device))
|
||||
sysfile = '/sys/class/devfreq/{}/governor'.format(device)
|
||||
self.target.write_value(sysfile, governor)
|
||||
|
||||
@memoized
|
||||
def list_frequencies(self, device):
|
||||
"""
|
||||
Returns a list of frequencies supported by the device or an empty list
|
||||
if could not be found.
|
||||
"""
|
||||
cmd = 'cat /sys/class/devfreq/{}/available_frequencies'.format(device)
|
||||
output = self.target.execute(cmd)
|
||||
available_frequencies = [int(freq) for freq in output.strip().split()]
|
||||
|
||||
return available_frequencies
|
||||
|
||||
def get_min_frequency(self, device):
|
||||
"""
|
||||
Returns the min frequency currently set for the specified device.
|
||||
|
||||
Warning, this method does not check if the device is present or not. It
|
||||
will try to read the minimum frequency and the following exception will
|
||||
be raised ::
|
||||
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
sysfile = '/sys/class/devfreq/{}/min_freq'.format(device)
|
||||
return self.target.read_int(sysfile)
|
||||
|
||||
def set_min_frequency(self, device, frequency, exact=True):
|
||||
"""
|
||||
Sets the minimum value for device frequency. Actual frequency will
|
||||
depend on the thermal governor used and may vary during execution. The
|
||||
value should be either an int or a string representing an integer. The
|
||||
Value must also be supported by the device. The available frequencies
|
||||
can be obtained by calling list_frequencies() or examining
|
||||
|
||||
/sys/class/devfreq/<device_name>/available_frequencies
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: TargetStableError if the frequency is not supported by the device, or if, for
|
||||
some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
"""
|
||||
available_frequencies = self.list_frequencies(device)
|
||||
try:
|
||||
value = int(frequency)
|
||||
if exact and available_frequencies and value not in available_frequencies:
|
||||
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(device,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/class/devfreq/{}/min_freq'.format(device)
|
||||
self.target.write_value(sysfile, value)
|
||||
except ValueError:
|
||||
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
|
||||
|
||||
def get_frequency(self, device):
|
||||
"""
|
||||
Returns the current frequency currently set for the specified device.
|
||||
|
||||
Warning, this method does not check if the device is present or not. It
|
||||
will try to read the current frequency and the following exception will
|
||||
be raised ::
|
||||
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
sysfile = '/sys/class/devfreq/{}/cur_freq'.format(device)
|
||||
return self.target.read_int(sysfile)
|
||||
|
||||
def get_max_frequency(self, device):
|
||||
"""
|
||||
Returns the max frequency currently set for the specified device.
|
||||
|
||||
Warning, this method does not check if the device is online or not. It will
|
||||
try to read the maximum frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
"""
|
||||
sysfile = '/sys/class/devfreq/{}/max_freq'.format(device)
|
||||
return self.target.read_int(sysfile)
|
||||
|
||||
def set_max_frequency(self, device, frequency, exact=True):
|
||||
"""
|
||||
Sets the maximum value for device frequency. Actual frequency will
|
||||
depend on the Governor used and may vary during execution. The value
|
||||
should be either an int or a string representing an integer. The Value
|
||||
must also be supported by the device. The available frequencies can be
|
||||
obtained by calling get_frequencies() or examining
|
||||
|
||||
/sys/class/devfreq/<device_name>/available_frequencies
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: TargetStableError if the frequency is not supported by the device, or
|
||||
if, for some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
"""
|
||||
available_frequencies = self.list_frequencies(device)
|
||||
try:
|
||||
value = int(frequency)
|
||||
except ValueError:
|
||||
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
|
||||
|
||||
if exact and value not in available_frequencies:
|
||||
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(device,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/class/devfreq/{}/max_freq'.format(device)
|
||||
self.target.write_value(sysfile, value)
|
||||
|
||||
def set_governor_for_devices(self, devices, governor):
|
||||
"""
|
||||
Set the governor for the specified list of devices.
|
||||
|
||||
:param devices: The list of device for which the governor is to be set.
|
||||
"""
|
||||
for device in devices:
|
||||
self.set_governor(device, governor)
|
||||
|
||||
def set_all_governors(self, governor):
|
||||
"""
|
||||
Set the specified governor for all the (available) devices
|
||||
"""
|
||||
try:
|
||||
return self.target._execute_util( # pylint: disable=protected-access
|
||||
'devfreq_set_all_governors {}'.format(governor), as_root=True)
|
||||
except TargetStableError as e:
|
||||
if ("echo: I/O error" in str(e) or
|
||||
"write error: Invalid argument" in str(e)):
|
||||
|
||||
devs_unsupported = [d for d in self.target.list_devices()
|
||||
if governor not in self.list_governors(d)]
|
||||
raise TargetStableError("Governor {} unsupported for devices {}".format(
|
||||
governor, devs_unsupported))
|
||||
else:
|
||||
raise
|
||||
|
||||
def get_all_governors(self):
|
||||
"""
|
||||
Get the current governor for all the (online) CPUs
|
||||
"""
|
||||
output = self.target._execute_util( # pylint: disable=protected-access
|
||||
'devfreq_get_all_governors', as_root=True)
|
||||
governors = {}
|
||||
for x in output.splitlines():
|
||||
kv = x.split(' ')
|
||||
if kv[0] == '':
|
||||
break
|
||||
governors[kv[0]] = kv[1]
|
||||
return governors
|
||||
|
||||
def set_frequency_for_devices(self, devices, freq, exact=False):
|
||||
"""
|
||||
Set the frequency for the specified list of devices.
|
||||
|
||||
:param devices: The list of device for which the frequency has to be set.
|
||||
"""
|
||||
for device in devices:
|
||||
self.set_max_frequency(device, freq, exact)
|
||||
self.set_min_frequency(device, freq, exact)
|
||||
|
||||
def set_all_frequencies(self, freq):
|
||||
"""
|
||||
Set the specified (minimum) frequency for all the (available) devices
|
||||
"""
|
||||
return self.target._execute_util( # pylint: disable=protected-access
|
||||
'devfreq_set_all_frequencies {}'.format(freq),
|
||||
as_root=True)
|
||||
|
||||
def get_all_frequencies(self):
|
||||
"""
|
||||
Get the current frequency for all the (available) devices
|
||||
"""
|
||||
output = self.target._execute_util( # pylint: disable=protected-access
|
||||
'devfreq_get_all_frequencies', as_root=True)
|
||||
frequencies = {}
|
||||
for x in output.splitlines():
|
||||
kv = x.split(' ')
|
||||
if kv[0] == '':
|
||||
break
|
||||
frequencies[kv[0]] = kv[1]
|
||||
return frequencies
|
250
devlib/module/gem5stats.py
Normal file
250
devlib/module/gem5stats.py
Normal file
@@ -0,0 +1,250 @@
|
||||
# Copyright 2017-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
import sys
|
||||
import os.path
|
||||
from collections import defaultdict
|
||||
|
||||
from devlib.exception import TargetStableError, HostError
|
||||
from devlib.module import Module
|
||||
from devlib.platform.gem5 import Gem5SimulationPlatform
|
||||
from devlib.utils.gem5 import iter_statistics_dump, GEM5STATS_ROI_NUMBER
|
||||
|
||||
|
||||
class Gem5ROI:
|
||||
def __init__(self, number, target):
|
||||
self.target = target
|
||||
self.number = number
|
||||
self.running = False
|
||||
self.field = 'ROI::{}'.format(number)
|
||||
|
||||
def start(self):
|
||||
if self.running:
|
||||
return False
|
||||
self.target.execute('m5 roistart {}'.format(self.number))
|
||||
self.running = True
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
if not self.running:
|
||||
return False
|
||||
self.target.execute('m5 roiend {}'.format(self.number))
|
||||
self.running = False
|
||||
return True
|
||||
|
||||
class Gem5StatsModule(Module):
|
||||
'''
|
||||
Module controlling Region of Interest (ROIs) markers, satistics dump
|
||||
frequency and parsing statistics log file when using gem5 platforms.
|
||||
|
||||
ROIs are identified by user-defined labels and need to be booked prior to
|
||||
use. The translation of labels into gem5 ROI numbers will be performed
|
||||
internally in order to avoid conflicts between multiple clients.
|
||||
'''
|
||||
name = 'gem5stats'
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
return isinstance(target.platform, Gem5SimulationPlatform)
|
||||
|
||||
def __init__(self, target):
|
||||
super(Gem5StatsModule, self).__init__(target)
|
||||
self._current_origin = 0
|
||||
self._stats_file_path = os.path.join(target.platform.gem5_out_dir,
|
||||
'stats.txt')
|
||||
self.rois = {}
|
||||
self._dump_pos_cache = {0: 0}
|
||||
|
||||
def book_roi(self, label):
|
||||
if label in self.rois:
|
||||
raise KeyError('ROI label {} already used'.format(label))
|
||||
if len(self.rois) >= GEM5STATS_ROI_NUMBER:
|
||||
raise RuntimeError('Too many ROIs reserved')
|
||||
all_rois = set(range(GEM5STATS_ROI_NUMBER))
|
||||
used_rois = set([roi.number for roi in self.rois.values()])
|
||||
avail_rois = all_rois - used_rois
|
||||
self.rois[label] = Gem5ROI(list(avail_rois)[0], self.target)
|
||||
|
||||
def free_roi(self, label):
|
||||
if label not in self.rois:
|
||||
raise KeyError('ROI label {} not reserved yet'.format(label))
|
||||
self.rois[label].stop()
|
||||
del self.rois[label]
|
||||
|
||||
def roi_start(self, label):
|
||||
if label not in self.rois:
|
||||
raise KeyError('Incorrect ROI label: {}'.format(label))
|
||||
if not self.rois[label].start():
|
||||
raise TargetStableError('ROI {} was already running'.format(label))
|
||||
|
||||
def roi_end(self, label):
|
||||
if label not in self.rois:
|
||||
raise KeyError('Incorrect ROI label: {}'.format(label))
|
||||
if not self.rois[label].stop():
|
||||
raise TargetStableError('ROI {} was not running'.format(label))
|
||||
|
||||
def start_periodic_dump(self, delay_ns=0, period_ns=10000000):
|
||||
# Default period is 10ms because it's roughly what's needed to have
|
||||
# accurate power estimations
|
||||
if delay_ns < 0 or period_ns < 0:
|
||||
msg = 'Delay ({}) and period ({}) for periodic dumps must be positive'
|
||||
raise ValueError(msg.format(delay_ns, period_ns))
|
||||
self.target.execute('m5 dumpresetstats {} {}'.format(delay_ns, period_ns))
|
||||
|
||||
def match(self, keys, rois_labels, base_dump=0):
|
||||
'''
|
||||
Extract specific values from the statistics log file of gem5
|
||||
|
||||
:param keys: a list of key name or regular expression patterns that
|
||||
will be matched in the fields of the statistics file. ``match()``
|
||||
returns only the values of fields matching at least one these
|
||||
keys.
|
||||
:type keys: list
|
||||
|
||||
:param rois_labels: list of ROIs labels. ``match()`` returns the
|
||||
values of the specified fields only during dumps spanned by at
|
||||
least one of these ROIs.
|
||||
:type rois_label: list
|
||||
|
||||
:param base_dump: dump number from which ``match()`` should operate. By
|
||||
specifying a non-zero dump number, one can virtually truncate
|
||||
the head of the stats file and ignore all dumps before a specific
|
||||
instant. The value of ``base_dump`` will typically (but not
|
||||
necessarily) be the result of a previous call to ``next_dump_no``.
|
||||
Default value is 0.
|
||||
:type base_dump: int
|
||||
|
||||
:returns: a dict indexed by key parameters containing a dict indexed by
|
||||
ROI labels containing an in-order list of records for the key under
|
||||
consideration during the active intervals of the ROI.
|
||||
|
||||
Example of return value:
|
||||
* Result of match(['sim_'],['roi_1']):
|
||||
{
|
||||
'sim_inst':
|
||||
{
|
||||
'roi_1': [265300176, 267975881]
|
||||
}
|
||||
'sim_ops':
|
||||
{
|
||||
'roi_1': [324395787, 327699419]
|
||||
}
|
||||
'sim_seconds':
|
||||
{
|
||||
'roi_1': [0.199960, 0.199897]
|
||||
}
|
||||
'sim_freq':
|
||||
{
|
||||
'roi_1': [1000000000000, 1000000000000]
|
||||
}
|
||||
'sim_ticks':
|
||||
{
|
||||
'roi_1': [199960234227, 199896897330]
|
||||
}
|
||||
}
|
||||
'''
|
||||
records = defaultdict(lambda: defaultdict(list))
|
||||
for record, active_rois in self.match_iter(keys, rois_labels, base_dump):
|
||||
for key in record:
|
||||
for roi_label in active_rois:
|
||||
records[key][roi_label].append(record[key])
|
||||
return records
|
||||
|
||||
def match_iter(self, keys, rois_labels, base_dump=0):
|
||||
'''
|
||||
Yield specific values dump-by-dump from the statistics log file of gem5
|
||||
|
||||
:param keys: same as ``match()``
|
||||
:param rois_labels: same as ``match()``
|
||||
:param base_dump: same as ``match()``
|
||||
:returns: a pair containing:
|
||||
1. a dict storing the values corresponding to each of the found keys
|
||||
2. the list of currently active ROIs among those passed as parameters
|
||||
|
||||
Example of return value:
|
||||
* Result of match_iter(['sim_'],['roi_1', 'roi_2']).next()
|
||||
(
|
||||
{
|
||||
'sim_inst': 265300176,
|
||||
'sim_ops': 324395787,
|
||||
'sim_seconds': 0.199960,
|
||||
'sim_freq': 1000000000000,
|
||||
'sim_ticks': 199960234227,
|
||||
},
|
||||
[ 'roi_1 ' ]
|
||||
)
|
||||
'''
|
||||
for label in rois_labels:
|
||||
if label not in self.rois:
|
||||
raise KeyError('Impossible to match ROI label {}'.format(label))
|
||||
if self.rois[label].running:
|
||||
self.logger.warning('Trying to match records in statistics file'
|
||||
' while ROI {} is running'.format(label))
|
||||
|
||||
# Construct one large regex that concatenates all keys because
|
||||
# matching one large expression is more efficient than several smaller
|
||||
all_keys_re = re.compile('|'.join(keys))
|
||||
|
||||
def roi_active(roi_label, dump):
|
||||
roi = self.rois[roi_label]
|
||||
return (roi.field in dump) and (int(dump[roi.field]) == 1)
|
||||
|
||||
with open(self._stats_file_path, 'r') as stats_file:
|
||||
self._goto_dump(stats_file, base_dump)
|
||||
for dump in iter_statistics_dump(stats_file):
|
||||
active_rois = [l for l in rois_labels if roi_active(l, dump)]
|
||||
if active_rois:
|
||||
rec = {k: dump[k] for k in dump if all_keys_re.search(k)}
|
||||
yield (rec, active_rois)
|
||||
|
||||
def next_dump_no(self):
|
||||
'''
|
||||
Returns the number of the next dump to be written to the stats file.
|
||||
|
||||
For example, if next_dump_no is called while there are 5 (0 to 4) full
|
||||
dumps in the stats file, it will return 5. This will be usefull to know
|
||||
from which dump one should match() in the future to get only data from
|
||||
now on.
|
||||
'''
|
||||
with open(self._stats_file_path, 'r') as stats_file:
|
||||
# _goto_dump reach EOF and returns the total number of dumps + 1
|
||||
return self._goto_dump(stats_file, sys.maxsize)
|
||||
|
||||
def _goto_dump(self, stats_file, target_dump):
|
||||
if target_dump < 0:
|
||||
raise HostError('Cannot go to dump {}'.format(target_dump))
|
||||
|
||||
# Go to required dump quickly if it was visited before
|
||||
if target_dump in self._dump_pos_cache:
|
||||
stats_file.seek(self._dump_pos_cache[target_dump])
|
||||
return target_dump
|
||||
# Or start from the closest dump already visited before the required one
|
||||
prev_dumps = filter(lambda x: x < target_dump, self._dump_pos_cache.keys())
|
||||
curr_dump = max(prev_dumps)
|
||||
curr_pos = self._dump_pos_cache[curr_dump]
|
||||
stats_file.seek(curr_pos)
|
||||
|
||||
# And iterate until target_dump
|
||||
dump_iterator = iter_statistics_dump(stats_file)
|
||||
while curr_dump < target_dump:
|
||||
try:
|
||||
next(dump_iterator)
|
||||
except StopIteration:
|
||||
break
|
||||
# End of passed dump is beginning og next one
|
||||
curr_pos = stats_file.tell()
|
||||
curr_dump += 1
|
||||
self._dump_pos_cache[curr_dump] = curr_pos
|
||||
return curr_dump
|
89
devlib/module/gpufreq.py
Normal file
89
devlib/module/gpufreq.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Copyright 2017 Google, ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
from devlib.module import Module
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
class GpufreqModule(Module):
|
||||
|
||||
name = 'gpufreq'
|
||||
path = ''
|
||||
|
||||
def __init__(self, target):
|
||||
super(GpufreqModule, self).__init__(target)
|
||||
frequencies_str = self.target.read_value("/sys/kernel/gpu/gpu_freq_table")
|
||||
self.frequencies = list(map(int, frequencies_str.split(" ")))
|
||||
self.frequencies.sort()
|
||||
self.governors = self.target.read_value("/sys/kernel/gpu/gpu_available_governor").split(" ")
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
# kgsl/Adreno
|
||||
probe_path = '/sys/kernel/gpu/'
|
||||
if target.file_exists(probe_path):
|
||||
model = target.read_value(probe_path + "gpu_model")
|
||||
if re.search('adreno', model, re.IGNORECASE):
|
||||
return True
|
||||
return False
|
||||
|
||||
def set_governor(self, governor):
|
||||
if governor not in self.governors:
|
||||
raise TargetStableError('Governor {} not supported for gpu'.format(governor))
|
||||
self.target.write_value("/sys/kernel/gpu/gpu_governor", governor)
|
||||
|
||||
def get_frequencies(self):
|
||||
"""
|
||||
Returns the list of frequencies that the GPU can have
|
||||
"""
|
||||
return self.frequencies
|
||||
|
||||
def get_current_frequency(self):
|
||||
"""
|
||||
Returns the current frequency currently set for the GPU.
|
||||
|
||||
Warning, this method does not check if the gpu is online or not. It will
|
||||
try to read the current frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
return int(self.target.read_value("/sys/kernel/gpu/gpu_clock"))
|
||||
|
||||
@memoized
|
||||
def get_model_name(self):
|
||||
"""
|
||||
Returns the model name reported by the GPU.
|
||||
"""
|
||||
try:
|
||||
return self.target.read_value("/sys/kernel/gpu/gpu_model")
|
||||
except: # pylint: disable=bare-except
|
||||
return "unknown"
|
@@ -1,3 +1,18 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from devlib.module import Module
|
||||
|
||||
|
||||
@@ -20,8 +35,13 @@ class HotplugModule(Module):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
return target.path.join(cls.base_path, cpu, 'online')
|
||||
|
||||
def list_hotpluggable_cpus(self):
|
||||
return [cpu for cpu in range(self.target.number_of_cpus)
|
||||
if self.target.file_exists(self._cpu_path(self.target, cpu))]
|
||||
|
||||
def online_all(self):
|
||||
self.online(*range(self.target.number_of_cpus))
|
||||
self.target._execute_util('hotplug_online_all', # pylint: disable=protected-access
|
||||
as_root=self.target.is_rooted)
|
||||
|
||||
def online(self, *args):
|
||||
for cpu in args:
|
||||
@@ -37,4 +57,3 @@ class HotplugModule(Module):
|
||||
return
|
||||
value = 1 if online else 0
|
||||
self.target.write_value(path, value)
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,6 +15,7 @@
|
||||
import re
|
||||
from collections import defaultdict
|
||||
|
||||
from devlib import TargetStableError
|
||||
from devlib.module import Module
|
||||
from devlib.utils.types import integer
|
||||
|
||||
@@ -73,20 +74,19 @@ class HwmonDevice(object):
|
||||
@property
|
||||
def sensors(self):
|
||||
all_sensors = []
|
||||
for sensors_of_kind in self._sensors.itervalues():
|
||||
all_sensors.extend(sensors_of_kind.values())
|
||||
for sensors_of_kind in self._sensors.values():
|
||||
all_sensors.extend(list(sensors_of_kind.values()))
|
||||
return all_sensors
|
||||
|
||||
def __init__(self, target, path):
|
||||
def __init__(self, target, path, name, fields):
|
||||
self.target = target
|
||||
self.path = path
|
||||
self.name = self.target.read_value(self.target.path.join(self.path, 'name'))
|
||||
self.name = name
|
||||
self._sensors = defaultdict(dict)
|
||||
path = self.path
|
||||
if not path.endswith(self.target.path.sep):
|
||||
path += self.target.path.sep
|
||||
for entry in self.target.list_directory(path,
|
||||
as_root=self.target.is_rooted):
|
||||
for entry in fields:
|
||||
match = HWMON_FILE_REGEX.search(entry)
|
||||
if match:
|
||||
kind = match.group('kind')
|
||||
@@ -99,7 +99,7 @@ class HwmonDevice(object):
|
||||
|
||||
def get(self, kind, number=None):
|
||||
if number is None:
|
||||
return [s for _, s in sorted(self._sensors[kind].iteritems(),
|
||||
return [s for _, s in sorted(self._sensors[kind].items(),
|
||||
key=lambda x: x[0])]
|
||||
else:
|
||||
return self._sensors[kind].get(number)
|
||||
@@ -116,7 +116,12 @@ class HwmonModule(Module):
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
return target.file_exists(HWMON_ROOT)
|
||||
try:
|
||||
target.list_directory(HWMON_ROOT, as_root=target.is_rooted)
|
||||
except TargetStableError:
|
||||
# Doesn't exist or no permissions
|
||||
return False
|
||||
return True
|
||||
|
||||
@property
|
||||
def sensors(self):
|
||||
@@ -132,11 +137,12 @@ class HwmonModule(Module):
|
||||
self.scan()
|
||||
|
||||
def scan(self):
|
||||
for entry in self.target.list_directory(self.root,
|
||||
as_root=self.target.is_rooted):
|
||||
if entry.startswith('hwmon'):
|
||||
entry_path = self.target.path.join(self.root, entry)
|
||||
if self.target.file_exists(self.target.path.join(entry_path, 'name')):
|
||||
device = HwmonDevice(self.target, entry_path)
|
||||
self.devices.append(device)
|
||||
|
||||
values_tree = self.target.read_tree_values(self.root, depth=3, tar=True)
|
||||
for entry_id, fields in values_tree.items():
|
||||
path = self.target.path.join(self.root, entry_id)
|
||||
name = fields.pop('name', None)
|
||||
if name is None:
|
||||
continue
|
||||
self.logger.debug('Adding device {}'.format(name))
|
||||
device = HwmonDevice(self.target, path, name, fields)
|
||||
self.devices.append(device)
|
||||
|
460
devlib/module/sched.py
Normal file
460
devlib/module/sched.py
Normal file
@@ -0,0 +1,460 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import re
|
||||
from enum import Enum
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.utils.misc import memoized
|
||||
from devlib.utils.types import boolean
|
||||
|
||||
|
||||
class SchedProcFSNode(object):
|
||||
"""
|
||||
Represents a sched_domain procfs node
|
||||
|
||||
:param nodes: Dictionnary view of the underlying procfs nodes
|
||||
(as returned by devlib.read_tree_values())
|
||||
:type nodes: dict
|
||||
|
||||
|
||||
Say you want to represent this path/data:
|
||||
$ cat /proc/sys/kernel/sched_domain/cpu0/domain*/name
|
||||
MC
|
||||
DIE
|
||||
|
||||
Taking cpu0 as a root, this can be defined as:
|
||||
>>> data = {"domain0" : {"name" : "MC"}, "domain1" : {"name" : "DIE"}}
|
||||
|
||||
>>> repr = SchedProcFSNode(data)
|
||||
>>> print repr.domains[0].name
|
||||
MC
|
||||
|
||||
The "raw" dict remains available under the `procfs` field:
|
||||
>>> print repr.procfs["domain0"]["name"]
|
||||
MC
|
||||
"""
|
||||
|
||||
_re_procfs_node = re.compile(r"(?P<name>.*\D)(?P<digits>\d+)$")
|
||||
|
||||
@staticmethod
|
||||
def _ends_with_digits(node):
|
||||
if not isinstance(node, basestring):
|
||||
return False
|
||||
|
||||
return re.search(SchedProcFSNode._re_procfs_node, node) != None
|
||||
|
||||
@staticmethod
|
||||
def _node_digits(node):
|
||||
"""
|
||||
:returns: The ending digits of the procfs node
|
||||
"""
|
||||
return int(re.search(SchedProcFSNode._re_procfs_node, node).group("digits"))
|
||||
|
||||
@staticmethod
|
||||
def _node_name(node):
|
||||
"""
|
||||
:returns: The name of the procfs node
|
||||
"""
|
||||
return re.search(SchedProcFSNode._re_procfs_node, node).group("name")
|
||||
|
||||
@staticmethod
|
||||
def _packable(node, entries):
|
||||
"""
|
||||
:returns: Whether it makes sense to pack a node into a common entry
|
||||
"""
|
||||
return (SchedProcFSNode._ends_with_digits(node) and
|
||||
any([SchedProcFSNode._ends_with_digits(x) and
|
||||
SchedProcFSNode._node_digits(x) != SchedProcFSNode._node_digits(node) and
|
||||
SchedProcFSNode._node_name(x) == SchedProcFSNode._node_name(node)
|
||||
for x in entries]))
|
||||
|
||||
@staticmethod
|
||||
def _build_directory(node_name, node_data):
|
||||
if node_name.startswith("domain"):
|
||||
return SchedDomain(node_data)
|
||||
else:
|
||||
return SchedProcFSNode(node_data)
|
||||
|
||||
@staticmethod
|
||||
def _build_entry(node_data):
|
||||
value = node_data
|
||||
|
||||
# Most nodes just contain numerical data, try to convert
|
||||
try:
|
||||
value = int(value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return value
|
||||
|
||||
@staticmethod
|
||||
def _build_node(node_name, node_data):
|
||||
if isinstance(node_data, dict):
|
||||
return SchedProcFSNode._build_directory(node_name, node_data)
|
||||
else:
|
||||
return SchedProcFSNode._build_entry(node_data)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return self._dyn_attrs[name]
|
||||
|
||||
def __init__(self, nodes):
|
||||
self.procfs = nodes
|
||||
# First, reduce the procs fields by packing them if possible
|
||||
# Find which entries can be packed into a common entry
|
||||
packables = {
|
||||
node : SchedProcFSNode._node_name(node) + "s"
|
||||
for node in list(nodes.keys()) if SchedProcFSNode._packable(node, list(nodes.keys()))
|
||||
}
|
||||
|
||||
self._dyn_attrs = {}
|
||||
|
||||
for dest in set(packables.values()):
|
||||
self._dyn_attrs[dest] = {}
|
||||
|
||||
# Pack common entries
|
||||
for key, dest in packables.items():
|
||||
i = SchedProcFSNode._node_digits(key)
|
||||
self._dyn_attrs[dest][i] = self._build_node(key, nodes[key])
|
||||
|
||||
# Build the other nodes
|
||||
for key in nodes.keys():
|
||||
if key in packables:
|
||||
continue
|
||||
|
||||
self._dyn_attrs[key] = self._build_node(key, nodes[key])
|
||||
|
||||
|
||||
class DocInt(int):
|
||||
|
||||
# See https://stackoverflow.com/a/50473952/5096023
|
||||
def __new__(cls, value, doc):
|
||||
new = super(DocInt, cls).__new__(cls, value)
|
||||
new.__doc__ = doc
|
||||
return new
|
||||
|
||||
|
||||
class SchedDomainFlag(DocInt, Enum):
|
||||
"""
|
||||
Represents a sched domain flag
|
||||
"""
|
||||
# pylint: disable=bad-whitespace
|
||||
# Domain flags obtained from include/linux/sched/topology.h on v4.17
|
||||
# https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux/+/v4.17/include/linux/sched/topology.h#20
|
||||
SD_LOAD_BALANCE = 0x0001, "Do load balancing on this domain"
|
||||
SD_BALANCE_NEWIDLE = 0x0002, "Balance when about to become idle"
|
||||
SD_BALANCE_EXEC = 0x0004, "Balance on exec"
|
||||
SD_BALANCE_FORK = 0x0008, "Balance on fork, clone"
|
||||
SD_BALANCE_WAKE = 0x0010, "Balance on wakeup"
|
||||
SD_WAKE_AFFINE = 0x0020, "Wake task to waking CPU"
|
||||
SD_ASYM_CPUCAPACITY = 0x0040, "Groups have different max cpu capacities"
|
||||
SD_SHARE_CPUCAPACITY = 0x0080, "Domain members share cpu capacity"
|
||||
SD_SHARE_POWERDOMAIN = 0x0100, "Domain members share power domain"
|
||||
SD_SHARE_PKG_RESOURCES = 0x0200, "Domain members share cpu pkg resources"
|
||||
SD_SERIALIZE = 0x0400, "Only a single load balancing instance"
|
||||
SD_ASYM_PACKING = 0x0800, "Place busy groups earlier in the domain"
|
||||
SD_PREFER_SIBLING = 0x1000, "Prefer to place tasks in a sibling domain"
|
||||
SD_OVERLAP = 0x2000, "Sched_domains of this level overlap"
|
||||
SD_NUMA = 0x4000, "Cross-node balancing"
|
||||
# Only defined in Android
|
||||
# https://android.googlesource.com/kernel/common/+/android-4.14/include/linux/sched/topology.h#29
|
||||
SD_SHARE_CAP_STATES = 0x8000, "(Android only) Domain members share capacity state"
|
||||
|
||||
@classmethod
|
||||
def check_version(cls, target, logger):
|
||||
"""
|
||||
Check the target and see if its kernel version matches our view of the world
|
||||
"""
|
||||
parts = target.kernel_version.parts
|
||||
# Checked to be valid from v4.4
|
||||
# Not saved as a class attribute else it'll be converted to an enum
|
||||
ref_parts = (4, 4, 0)
|
||||
if parts < ref_parts:
|
||||
logger.warn(
|
||||
"Sched domain flags are defined for kernels v{} and up, "
|
||||
"but target is running v{}".format(ref_parts, parts)
|
||||
)
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
|
||||
class SchedDomain(SchedProcFSNode):
|
||||
"""
|
||||
Represents a sched domain as seen through procfs
|
||||
"""
|
||||
def __init__(self, nodes):
|
||||
super(SchedDomain, self).__init__(nodes)
|
||||
|
||||
obj_flags = set()
|
||||
for flag in list(SchedDomainFlag):
|
||||
if self.flags & flag.value == flag.value:
|
||||
obj_flags.add(flag)
|
||||
|
||||
self.flags = obj_flags
|
||||
|
||||
|
||||
class SchedProcFSData(SchedProcFSNode):
|
||||
"""
|
||||
Root class for creating & storing SchedProcFSNode instances
|
||||
"""
|
||||
_read_depth = 6
|
||||
sched_domain_root = '/proc/sys/kernel/sched_domain'
|
||||
|
||||
@staticmethod
|
||||
def available(target):
|
||||
path = SchedProcFSData.sched_domain_root
|
||||
cpus = target.list_directory(path) if target.file_exists(path) else []
|
||||
|
||||
if not cpus:
|
||||
return False
|
||||
|
||||
# Even if we have a CPU entry, it can be empty (e.g. hotplugged out)
|
||||
# Make sure some data is there
|
||||
for cpu in cpus:
|
||||
if target.file_exists(target.path.join(path, cpu, "domain0", "name")):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def __init__(self, target, path=None):
|
||||
if not path:
|
||||
path = self.sched_domain_root
|
||||
|
||||
procfs = target.read_tree_values(path, depth=self._read_depth)
|
||||
super(SchedProcFSData, self).__init__(procfs)
|
||||
|
||||
|
||||
class SchedModule(Module):
|
||||
|
||||
name = 'sched'
|
||||
|
||||
cpu_sysfs_root = '/sys/devices/system/cpu'
|
||||
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
logger = logging.getLogger(SchedModule.name)
|
||||
SchedDomainFlag.check_version(target, logger)
|
||||
|
||||
return SchedProcFSData.available(target)
|
||||
|
||||
def get_kernel_attributes(self, matching=None, check_exit_code=True):
|
||||
"""
|
||||
Get the value of scheduler attributes.
|
||||
|
||||
:param matching: an (optional) substring to filter the scheduler
|
||||
attributes to be returned.
|
||||
|
||||
The scheduler exposes a list of tunable attributes under:
|
||||
/proc/sys/kernel
|
||||
all starting with the "sched_" prefix.
|
||||
|
||||
This method returns a dictionary of all the "sched_" attributes exposed
|
||||
by the target kernel, within the prefix removed.
|
||||
It's possible to restrict the list of attributes by specifying a
|
||||
substring to be matched.
|
||||
|
||||
returns: a dictionary of scheduler tunables
|
||||
"""
|
||||
command = 'sched_get_kernel_attributes {}'.format(
|
||||
matching if matching else ''
|
||||
)
|
||||
output = self.target._execute_util(command, as_root=self.target.is_rooted,
|
||||
check_exit_code=check_exit_code)
|
||||
result = {}
|
||||
for entry in output.strip().split('\n'):
|
||||
if ':' not in entry:
|
||||
continue
|
||||
path, value = entry.strip().split(':', 1)
|
||||
if value in ['0', '1']:
|
||||
value = bool(int(value))
|
||||
elif value.isdigit():
|
||||
value = int(value)
|
||||
result[path] = value
|
||||
return result
|
||||
|
||||
def set_kernel_attribute(self, attr, value, verify=True):
|
||||
"""
|
||||
Set the value of a scheduler attribute.
|
||||
|
||||
:param attr: the attribute to set, without the "sched_" prefix
|
||||
:param value: the value to set
|
||||
:param verify: true to check that the requested value has been set
|
||||
|
||||
:raise TargetError: if the attribute cannot be set
|
||||
"""
|
||||
if isinstance(value, bool):
|
||||
value = '1' if value else '0'
|
||||
elif isinstance(value, int):
|
||||
value = str(value)
|
||||
path = '/proc/sys/kernel/sched_' + attr
|
||||
self.target.write_value(path, value, verify)
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def has_debug(self):
|
||||
if self.target.config.get('SCHED_DEBUG') != 'y':
|
||||
return False;
|
||||
return self.target.file_exists('/sys/kernel/debug/sched_features')
|
||||
|
||||
def get_features(self):
|
||||
"""
|
||||
Get the status of each sched feature
|
||||
|
||||
:returns: a dictionary of features and their "is enabled" status
|
||||
"""
|
||||
if not self.has_debug:
|
||||
raise RuntimeError("sched_features not available")
|
||||
feats = self.target.read_value('/sys/kernel/debug/sched_features')
|
||||
features = {}
|
||||
for feat in feats.split():
|
||||
value = True
|
||||
if feat.startswith('NO'):
|
||||
feat = feat.replace('NO_', '', 1)
|
||||
value = False
|
||||
features[feat] = value
|
||||
return features
|
||||
|
||||
def set_feature(self, feature, enable, verify=True):
|
||||
"""
|
||||
Set the status of a specified scheduler feature
|
||||
|
||||
:param feature: the feature name to set
|
||||
:param enable: true to enable the feature, false otherwise
|
||||
|
||||
:raise ValueError: if the specified enable value is not bool
|
||||
:raise RuntimeError: if the specified feature cannot be set
|
||||
"""
|
||||
if not self.has_debug:
|
||||
raise RuntimeError("sched_features not available")
|
||||
feature = feature.upper()
|
||||
feat_value = feature
|
||||
if not boolean(enable):
|
||||
feat_value = 'NO_' + feat_value
|
||||
self.target.write_value('/sys/kernel/debug/sched_features',
|
||||
feat_value, verify=False)
|
||||
if not verify:
|
||||
return
|
||||
msg = 'Failed to set {}, feature not supported?'.format(feat_value)
|
||||
features = self.get_features()
|
||||
feat_value = features.get(feature, not enable)
|
||||
if feat_value != enable:
|
||||
raise RuntimeError(msg)
|
||||
|
||||
def get_cpu_sd_info(self, cpu):
|
||||
"""
|
||||
:returns: An object view of /proc/sys/kernel/sched_domain/cpu<cpu>/*
|
||||
"""
|
||||
path = self.target.path.join(
|
||||
SchedProcFSData.sched_domain_root,
|
||||
"cpu{}".format(cpu)
|
||||
)
|
||||
|
||||
return SchedProcFSData(self.target, path)
|
||||
|
||||
def get_sd_info(self):
|
||||
"""
|
||||
:returns: An object view of /proc/sys/kernel/sched_domain/*
|
||||
"""
|
||||
return SchedProcFSData(self.target)
|
||||
|
||||
def get_capacity(self, cpu):
|
||||
"""
|
||||
:returns: The capacity of 'cpu'
|
||||
"""
|
||||
return self.get_capacities()[cpu]
|
||||
|
||||
@memoized
|
||||
def has_em(self, cpu, sd=None):
|
||||
"""
|
||||
:returns: Whether energy model data is available for 'cpu'
|
||||
"""
|
||||
if not sd:
|
||||
sd = SchedProcFSData(self.target, cpu)
|
||||
|
||||
return sd.procfs["domain0"].get("group0", {}).get("energy", {}).get("cap_states") != None
|
||||
|
||||
@memoized
|
||||
def has_dmips_capacity(self, cpu):
|
||||
"""
|
||||
:returns: Whether dmips capacity data is available for 'cpu'
|
||||
"""
|
||||
return self.target.file_exists(
|
||||
self.target.path.join(self.cpu_sysfs_root, 'cpu{}/cpu_capacity'.format(cpu))
|
||||
)
|
||||
|
||||
@memoized
|
||||
def get_em_capacity(self, cpu, sd=None):
|
||||
"""
|
||||
:returns: The maximum capacity value exposed by the EAS energy model
|
||||
"""
|
||||
if not sd:
|
||||
sd = SchedProcFSData(self.target, cpu)
|
||||
|
||||
cap_states = sd.domains[0].groups[0].energy.cap_states
|
||||
return int(cap_states.split('\t')[-2])
|
||||
|
||||
@memoized
|
||||
def get_dmips_capacity(self, cpu):
|
||||
"""
|
||||
:returns: The capacity value generated from the capacity-dmips-mhz DT entry
|
||||
"""
|
||||
return self.target.read_value(
|
||||
self.target.path.join(
|
||||
self.cpu_sysfs_root,
|
||||
'cpu{}/cpu_capacity'.format(cpu)
|
||||
),
|
||||
int
|
||||
)
|
||||
|
||||
@memoized
|
||||
def get_capacities(self, default=None):
|
||||
"""
|
||||
:param default: Default capacity value to find if no data is
|
||||
found in procfs
|
||||
|
||||
:returns: a dictionnary of the shape {cpu : capacity}
|
||||
|
||||
:raises RuntimeError: Raised when no capacity information is
|
||||
found and 'default' is None
|
||||
"""
|
||||
cpus = list(range(self.target.number_of_cpus))
|
||||
|
||||
capacities = {}
|
||||
sd_info = self.get_sd_info()
|
||||
|
||||
for cpu in cpus:
|
||||
if self.has_em(cpu, sd_info.cpus[cpu]):
|
||||
capacities[cpu] = self.get_em_capacity(cpu, sd_info.cpus[cpu])
|
||||
elif self.has_dmips_capacity(cpu):
|
||||
capacities[cpu] = self.get_dmips_capacity(cpu)
|
||||
else:
|
||||
if default != None:
|
||||
capacities[cpu] = default
|
||||
else:
|
||||
raise RuntimeError('No capacity data for cpu{}'.format(cpu))
|
||||
|
||||
return capacities
|
||||
|
||||
@memoized
|
||||
def get_hz(self):
|
||||
"""
|
||||
:returns: The scheduler tick frequency on the target
|
||||
"""
|
||||
return int(self.target.config.get('CONFIG_HZ', strict=True))
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -61,8 +61,8 @@ class ThermalZone(object):
|
||||
value = self.target.read_value(self.target.path.join(self.path, 'mode'))
|
||||
return value == 'enabled'
|
||||
|
||||
def set_mode(self, enable):
|
||||
value = 'enabled' if enable else 'disabled'
|
||||
def set_enabled(self, enabled=True):
|
||||
value = 'enabled' if enabled else 'disabled'
|
||||
self.target.write_value(self.target.path.join(self.path, 'mode'), value)
|
||||
|
||||
def get_temperature(self):
|
||||
@@ -100,5 +100,5 @@ class ThermalModule(Module):
|
||||
|
||||
def disable_all_zones(self):
|
||||
"""Disables all the thermal zones in the target"""
|
||||
for zone in self.zones:
|
||||
zone.set_mode('disabled')
|
||||
for zone in self.zones.values():
|
||||
zone.set_enabled(False)
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,15 +17,17 @@ import os
|
||||
import time
|
||||
import tarfile
|
||||
import shutil
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
from devlib.module import HardRestModule, BootModule, FlashModule
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.exception import TargetError, TargetStableError, HostError
|
||||
from devlib.utils.serial_port import open_serial_connection, pulse_dtr, write_characters
|
||||
from devlib.utils.uefi import UefiMenu, UefiConfig
|
||||
from devlib.utils.uboot import UbootMenu
|
||||
|
||||
|
||||
AUTOSTART_MESSAGE = 'Press Enter to stop auto boot...'
|
||||
OLD_AUTOSTART_MESSAGE = 'Press Enter to stop auto boot...'
|
||||
AUTOSTART_MESSAGE = 'Hit any key to stop autoboot:'
|
||||
POWERUP_MESSAGE = 'Powering up system...'
|
||||
DEFAULT_MCC_PROMPT = 'Cmd>'
|
||||
|
||||
@@ -51,7 +53,7 @@ class VexpressDtrHardReset(HardRestModule):
|
||||
try:
|
||||
if self.target.is_connected:
|
||||
self.target.execute('sync')
|
||||
except TargetError:
|
||||
except (TargetError, CalledProcessError):
|
||||
pass
|
||||
with open_serial_connection(port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
@@ -87,7 +89,7 @@ class VexpressReboottxtHardReset(HardRestModule):
|
||||
try:
|
||||
if self.target.is_connected:
|
||||
self.target.execute('sync')
|
||||
except TargetError:
|
||||
except (TargetError, CalledProcessError):
|
||||
pass
|
||||
|
||||
if not os.path.exists(self.path):
|
||||
@@ -136,18 +138,20 @@ class VexpressBootModule(BootModule):
|
||||
def get_through_early_boot(self, tty):
|
||||
self.logger.debug('Establishing initial state...')
|
||||
tty.sendline('')
|
||||
i = tty.expect([AUTOSTART_MESSAGE, POWERUP_MESSAGE, self.mcc_prompt])
|
||||
if i == 2:
|
||||
i = tty.expect([AUTOSTART_MESSAGE, OLD_AUTOSTART_MESSAGE, POWERUP_MESSAGE, self.mcc_prompt])
|
||||
if i == 3:
|
||||
self.logger.debug('Saw MCC prompt.')
|
||||
time.sleep(self.short_delay)
|
||||
tty.sendline('reboot')
|
||||
elif i == 1:
|
||||
elif i == 2:
|
||||
self.logger.debug('Saw powering up message (assuming soft reboot).')
|
||||
else:
|
||||
self.logger.debug('Saw auto boot message.')
|
||||
tty.sendline('')
|
||||
time.sleep(self.short_delay)
|
||||
# could be either depending on where in the boot we are
|
||||
tty.sendline('reboot')
|
||||
tty.sendline('reset')
|
||||
|
||||
def get_uefi_menu(self, tty):
|
||||
menu = UefiMenu(tty)
|
||||
@@ -205,6 +209,7 @@ class VexpressUefiShellBoot(VexpressBootModule):
|
||||
|
||||
name = 'vexpress-uefi-shell'
|
||||
|
||||
# pylint: disable=keyword-arg-before-vararg
|
||||
def __init__(self, target, uefi_entry='^Shell$',
|
||||
efi_shell_prompt='Shell>',
|
||||
image='kernel', bootargs=None,
|
||||
@@ -220,7 +225,7 @@ class VexpressUefiShellBoot(VexpressBootModule):
|
||||
try:
|
||||
menu.select(self.uefi_entry)
|
||||
except LookupError:
|
||||
raise TargetError('Did not see "{}" UEFI entry.'.format(self.uefi_entry))
|
||||
raise TargetStableError('Did not see "{}" UEFI entry.'.format(self.uefi_entry))
|
||||
tty.expect(self.efi_shell_prompt, timeout=self.timeout)
|
||||
if self.bootargs:
|
||||
tty.sendline('') # stop default boot
|
||||
@@ -235,6 +240,7 @@ class VexpressUBoot(VexpressBootModule):
|
||||
|
||||
name = 'vexpress-u-boot'
|
||||
|
||||
# pylint: disable=keyword-arg-before-vararg
|
||||
def __init__(self, target, env=None,
|
||||
*args, **kwargs):
|
||||
super(VexpressUBoot, self).__init__(target, *args, **kwargs)
|
||||
@@ -247,7 +253,7 @@ class VexpressUBoot(VexpressBootModule):
|
||||
menu = UbootMenu(tty)
|
||||
self.logger.debug('Waiting for U-Boot prompt...')
|
||||
menu.open(timeout=120)
|
||||
for var, value in self.env.iteritems():
|
||||
for var, value in self.env.items():
|
||||
menu.setenv(var, value)
|
||||
menu.boot()
|
||||
|
||||
@@ -256,6 +262,7 @@ class VexpressBootmon(VexpressBootModule):
|
||||
|
||||
name = 'vexpress-bootmon'
|
||||
|
||||
# pylint: disable=keyword-arg-before-vararg
|
||||
def __init__(self, target,
|
||||
image, fdt, initrd, bootargs,
|
||||
uses_bootscript=False,
|
||||
@@ -278,11 +285,11 @@ class VexpressBootmon(VexpressBootModule):
|
||||
with open_serial_connection(port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=self.timeout,
|
||||
init_dtr=0) as tty:
|
||||
write_characters(tty, 'fl linux fdt {}'.format(self.fdt))
|
||||
write_characters(tty, 'fl linux initrd {}'.format(self.initrd))
|
||||
write_characters(tty, 'fl linux boot {} {}'.format(self.image,
|
||||
self.bootargs))
|
||||
init_dtr=0) as tty_conn:
|
||||
write_characters(tty_conn, 'fl linux fdt {}'.format(self.fdt))
|
||||
write_characters(tty_conn, 'fl linux initrd {}'.format(self.initrd))
|
||||
write_characters(tty_conn, 'fl linux boot {} {}'.format(self.image,
|
||||
self.bootargs))
|
||||
|
||||
|
||||
class VersatileExpressFlashModule(FlashModule):
|
||||
@@ -324,9 +331,10 @@ class VersatileExpressFlashModule(FlashModule):
|
||||
baudrate=self.target.platform.baudrate,
|
||||
timeout=self.timeout,
|
||||
init_dtr=0) as tty:
|
||||
i = tty.expect([self.mcc_prompt, AUTOSTART_MESSAGE])
|
||||
# pylint: disable=no-member
|
||||
i = tty.expect([self.mcc_prompt, AUTOSTART_MESSAGE, OLD_AUTOSTART_MESSAGE])
|
||||
if i:
|
||||
tty.sendline('')
|
||||
tty.sendline('') # pylint: disable=no-member
|
||||
wait_for_vemsd(self.vemsd_mount, tty, self.mcc_prompt, self.short_delay)
|
||||
try:
|
||||
if image_bundle:
|
||||
@@ -334,9 +342,9 @@ class VersatileExpressFlashModule(FlashModule):
|
||||
if images:
|
||||
self._overlay_images(images)
|
||||
os.system('sync')
|
||||
except (IOError, OSError), e:
|
||||
except (IOError, OSError) as e:
|
||||
msg = 'Could not deploy images to {}; got: {}'
|
||||
raise TargetError(msg.format(self.vemsd_mount, e))
|
||||
raise TargetStableError(msg.format(self.vemsd_mount, e))
|
||||
self.target.boot()
|
||||
self.target.connect(timeout=30)
|
||||
|
||||
@@ -348,7 +356,7 @@ class VersatileExpressFlashModule(FlashModule):
|
||||
tar.extractall(self.vemsd_mount)
|
||||
|
||||
def _overlay_images(self, images):
|
||||
for dest, src in images.iteritems():
|
||||
for dest, src in images.items():
|
||||
dest = os.path.join(self.vemsd_mount, dest)
|
||||
self.logger.debug('Copying {} to {}'.format(src, dest))
|
||||
shutil.copy(src, dest)
|
||||
@@ -375,12 +383,11 @@ def wait_for_vemsd(vemsd_mount, tty, mcc_prompt=DEFAULT_MCC_PROMPT, short_delay=
|
||||
path = os.path.join(vemsd_mount, 'config.txt')
|
||||
if os.path.exists(path):
|
||||
return
|
||||
for _ in xrange(attempts):
|
||||
for _ in range(attempts):
|
||||
tty.sendline('') # clear any garbage
|
||||
tty.expect(mcc_prompt, timeout=short_delay)
|
||||
tty.sendline('usb_on')
|
||||
time.sleep(short_delay * 3)
|
||||
if os.path.exists(path):
|
||||
return
|
||||
raise TargetError('Could not mount {}'.format(vemsd_mount))
|
||||
|
||||
raise TargetStableError('Could not mount {}'.format(vemsd_mount))
|
||||
|
@@ -1,7 +1,22 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
BIG_CPUS = ['A15', 'A57', 'A72']
|
||||
BIG_CPUS = ['A15', 'A57', 'A72', 'A73']
|
||||
|
||||
|
||||
class Platform(object):
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,15 +14,17 @@
|
||||
#
|
||||
from __future__ import division
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import csv
|
||||
import time
|
||||
import pexpect
|
||||
|
||||
from devlib.platform import Platform
|
||||
from devlib.instrument import Instrument, InstrumentChannel, MeasurementsCsv, Measurement, CONTINUOUS, INSTANTANEOUS
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.exception import HostError, TargetTransientError
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.instrument import (Instrument, InstrumentChannel, MeasurementsCsv,
|
||||
Measurement, CONTINUOUS, INSTANTANEOUS)
|
||||
from devlib.platform import Platform
|
||||
from devlib.utils.csvutil import csvreader, csvwriter
|
||||
from devlib.utils.serial_port import open_serial_connection
|
||||
|
||||
|
||||
@@ -33,6 +35,7 @@ class VersatileExpressPlatform(Platform):
|
||||
core_names=None,
|
||||
core_clusters=None,
|
||||
big_core=None,
|
||||
model=None,
|
||||
modules=None,
|
||||
|
||||
# serial settings
|
||||
@@ -61,6 +64,7 @@ class VersatileExpressPlatform(Platform):
|
||||
core_names,
|
||||
core_clusters,
|
||||
big_core,
|
||||
model,
|
||||
modules)
|
||||
self.serial_port = serial_port
|
||||
self.baudrate = baudrate
|
||||
@@ -86,6 +90,9 @@ class VersatileExpressPlatform(Platform):
|
||||
def _init_android_target(self, target):
|
||||
if target.connection_settings.get('device') is None:
|
||||
addr = self._get_target_ip_address(target)
|
||||
if sys.version_info[0] == 3:
|
||||
# Convert bytes to string for Python3 compatibility
|
||||
addr = addr.decode("utf-8")
|
||||
target.connection_settings['device'] = addr + ':5555'
|
||||
|
||||
def _init_linux_target(self, target):
|
||||
@@ -93,27 +100,32 @@ class VersatileExpressPlatform(Platform):
|
||||
addr = self._get_target_ip_address(target)
|
||||
target.connection_settings['host'] = addr
|
||||
|
||||
# pylint: disable=no-member
|
||||
def _get_target_ip_address(self, target):
|
||||
with open_serial_connection(port=self.serial_port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=30,
|
||||
init_dtr=0) as tty:
|
||||
tty.sendline('')
|
||||
tty.sendline('su') # this is, apprently, required to query network device
|
||||
# info by name on recent Juno builds...
|
||||
self.logger.debug('Waiting for the Android shell prompt.')
|
||||
tty.expect(target.shell_prompt)
|
||||
|
||||
self.logger.debug('Waiting for IP address...')
|
||||
wait_start_time = time.time()
|
||||
while True:
|
||||
tty.sendline('ip addr list eth0')
|
||||
time.sleep(1)
|
||||
try:
|
||||
tty.expect(r'inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
|
||||
return tty.match.group(1)
|
||||
except pexpect.TIMEOUT:
|
||||
pass # We have our own timeout -- see below.
|
||||
if (time.time() - wait_start_time) > self.ready_timeout:
|
||||
raise TargetError('Could not acquire IP address.')
|
||||
try:
|
||||
while True:
|
||||
tty.sendline('ip addr list eth0')
|
||||
time.sleep(1)
|
||||
try:
|
||||
tty.expect(r'inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
|
||||
return tty.match.group(1)
|
||||
except pexpect.TIMEOUT:
|
||||
pass # We have our own timeout -- see below.
|
||||
if (time.time() - wait_start_time) > self.ready_timeout:
|
||||
raise TargetTransientError('Could not acquire IP address.')
|
||||
finally:
|
||||
tty.sendline('exit') # exit shell created by "su" call at the start
|
||||
|
||||
def _set_hard_reset_method(self, hard_reset_method):
|
||||
if hard_reset_method == 'dtr':
|
||||
@@ -210,22 +222,22 @@ class JunoEnergyInstrument(Instrument):
|
||||
mode = CONTINUOUS | INSTANTANEOUS
|
||||
|
||||
_channels = [
|
||||
InstrumentChannel('sys_curr', 'sys', 'current'),
|
||||
InstrumentChannel('a57_curr', 'a57', 'current'),
|
||||
InstrumentChannel('a53_curr', 'a53', 'current'),
|
||||
InstrumentChannel('gpu_curr', 'gpu', 'current'),
|
||||
InstrumentChannel('sys_volt', 'sys', 'voltage'),
|
||||
InstrumentChannel('a57_volt', 'a57', 'voltage'),
|
||||
InstrumentChannel('a53_volt', 'a53', 'voltage'),
|
||||
InstrumentChannel('gpu_volt', 'gpu', 'voltage'),
|
||||
InstrumentChannel('sys_pow', 'sys', 'power'),
|
||||
InstrumentChannel('a57_pow', 'a57', 'power'),
|
||||
InstrumentChannel('a53_pow', 'a53', 'power'),
|
||||
InstrumentChannel('gpu_pow', 'gpu', 'power'),
|
||||
InstrumentChannel('sys_cenr', 'sys', 'energy'),
|
||||
InstrumentChannel('a57_cenr', 'a57', 'energy'),
|
||||
InstrumentChannel('a53_cenr', 'a53', 'energy'),
|
||||
InstrumentChannel('gpu_cenr', 'gpu', 'energy'),
|
||||
InstrumentChannel('sys', 'current'),
|
||||
InstrumentChannel('a57', 'current'),
|
||||
InstrumentChannel('a53', 'current'),
|
||||
InstrumentChannel('gpu', 'current'),
|
||||
InstrumentChannel('sys', 'voltage'),
|
||||
InstrumentChannel('a57', 'voltage'),
|
||||
InstrumentChannel('a53', 'voltage'),
|
||||
InstrumentChannel('gpu', 'voltage'),
|
||||
InstrumentChannel('sys', 'power'),
|
||||
InstrumentChannel('a57', 'power'),
|
||||
InstrumentChannel('a53', 'power'),
|
||||
InstrumentChannel('gpu', 'power'),
|
||||
InstrumentChannel('sys', 'energy'),
|
||||
InstrumentChannel('a57', 'energy'),
|
||||
InstrumentChannel('a53', 'energy'),
|
||||
InstrumentChannel('gpu', 'energy'),
|
||||
]
|
||||
|
||||
def __init__(self, target):
|
||||
@@ -240,12 +252,14 @@ class JunoEnergyInstrument(Instrument):
|
||||
self.command = '{} -o {}'.format(self.binary, self.on_target_file)
|
||||
self.command2 = '{}'.format(self.binary)
|
||||
|
||||
def setup(self):
|
||||
def setup(self): # pylint: disable=arguments-differ
|
||||
self.binary = self.target.install(os.path.join(PACKAGE_BIN_DIRECTORY,
|
||||
self.target.abi, self.binname))
|
||||
self.command = '{} -o {}'.format(self.binary, self.on_target_file)
|
||||
self.command2 = '{}'.format(self.binary)
|
||||
|
||||
def reset(self, sites=None, kinds=None):
|
||||
super(JunoEnergyInstrument, self).reset(sites, kinds)
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(JunoEnergyInstrument, self).reset(sites, kinds, channels)
|
||||
self.target.killall(self.binname, as_root=True)
|
||||
|
||||
def start(self):
|
||||
@@ -254,14 +268,14 @@ class JunoEnergyInstrument(Instrument):
|
||||
def stop(self):
|
||||
self.target.killall(self.binname, signal='TERM', as_root=True)
|
||||
|
||||
# pylint: disable=arguments-differ
|
||||
def get_data(self, output_file):
|
||||
temp_file = tempfile.mktemp()
|
||||
self.target.pull(self.on_target_file, temp_file)
|
||||
self.target.remove(self.on_target_file)
|
||||
|
||||
with open(temp_file, 'rb') as fh:
|
||||
reader = csv.reader(fh)
|
||||
headings = reader.next()
|
||||
with csvreader(temp_file) as reader:
|
||||
headings = next(reader)
|
||||
|
||||
# Figure out which columns from the collected csv we actually want
|
||||
select_columns = []
|
||||
@@ -271,25 +285,23 @@ class JunoEnergyInstrument(Instrument):
|
||||
except ValueError:
|
||||
raise HostError('Channel "{}" is not in {}'.format(chan.name, temp_file))
|
||||
|
||||
with open(output_file, 'wb') as wfh:
|
||||
with csvwriter(output_file) as writer:
|
||||
write_headings = ['{}_{}'.format(c.site, c.kind)
|
||||
for c in self.active_channels]
|
||||
writer = csv.writer(wfh)
|
||||
writer.writerow(write_headings)
|
||||
for row in reader:
|
||||
write_row = [row[c] for c in select_columns]
|
||||
writer.writerow(write_row)
|
||||
|
||||
return MeasurementsCsv(output_file, self.active_channels)
|
||||
return MeasurementsCsv(output_file, self.active_channels, sample_rate_hz=10)
|
||||
|
||||
def take_measurement(self):
|
||||
result = []
|
||||
output = self.target.execute(self.command2).split()
|
||||
reader=csv.reader(output)
|
||||
headings=reader.next()
|
||||
values = reader.next()
|
||||
for chan in self.active_channels:
|
||||
value = values[headings.index(chan.name)]
|
||||
result.append(Measurement(value, chan))
|
||||
with csvreader(output) as reader:
|
||||
headings = next(reader)
|
||||
values = next(reader)
|
||||
for chan in self.active_channels:
|
||||
value = values[headings.index(chan.name)]
|
||||
result.append(Measurement(value, chan))
|
||||
return result
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2016 ARM Limited
|
||||
# Copyright 2016-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,12 +15,13 @@
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import shutil
|
||||
import time
|
||||
import types
|
||||
import shlex
|
||||
from pipes import quote
|
||||
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.platform import Platform
|
||||
from devlib.utils.ssh import AndroidGem5Connection, LinuxGem5Connection
|
||||
@@ -55,7 +56,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
self.stdout_file = None
|
||||
self.stderr_file = None
|
||||
self.stderr_filename = None
|
||||
if self.gem5_port is None:
|
||||
if self.gem5_port is None: # pylint: disable=simplifiable-if-statement
|
||||
# Allows devlib to pick up already running simulations
|
||||
self.start_gem5_simulation = True
|
||||
else:
|
||||
@@ -63,13 +64,12 @@ class Gem5SimulationPlatform(Platform):
|
||||
|
||||
# Find the first one that does not exist. Ensures that we do not re-use
|
||||
# the directory used by someone else.
|
||||
for i in xrange(sys.maxint):
|
||||
i = 0
|
||||
directory = os.path.join(self.gem5_interact_dir, "wa_{}".format(i))
|
||||
while os.path.exists(directory):
|
||||
i += 1
|
||||
directory = os.path.join(self.gem5_interact_dir, "wa_{}".format(i))
|
||||
try:
|
||||
os.stat(directory)
|
||||
continue
|
||||
except OSError:
|
||||
break
|
||||
|
||||
self.gem5_interact_dir = directory
|
||||
self.logger.debug("Using {} as the temporary directory."
|
||||
.format(self.gem5_interact_dir))
|
||||
@@ -88,12 +88,12 @@ class Gem5SimulationPlatform(Platform):
|
||||
Check if the command to start gem5 makes sense
|
||||
"""
|
||||
if self.gem5args_binary is None:
|
||||
raise TargetError('Please specify a gem5 binary.')
|
||||
raise TargetStableError('Please specify a gem5 binary.')
|
||||
if self.gem5args_args is None:
|
||||
raise TargetError('Please specify the arguments passed on to gem5.')
|
||||
raise TargetStableError('Please specify the arguments passed on to gem5.')
|
||||
self.gem5args_virtio = str(self.gem5args_virtio).format(self.gem5_interact_dir)
|
||||
if self.gem5args_virtio is None:
|
||||
raise TargetError('Please specify arguments needed for virtIO.')
|
||||
raise TargetStableError('Please specify arguments needed for virtIO.')
|
||||
|
||||
def _start_interaction_gem5(self):
|
||||
"""
|
||||
@@ -112,7 +112,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
if not os.path.exists(self.stats_directory):
|
||||
os.mkdir(self.stats_directory)
|
||||
if os.path.exists(self.gem5_out_dir):
|
||||
raise TargetError("The gem5 stats directory {} already "
|
||||
raise TargetStableError("The gem5 stats directory {} already "
|
||||
"exists.".format(self.gem5_out_dir))
|
||||
else:
|
||||
os.mkdir(self.gem5_out_dir)
|
||||
@@ -131,11 +131,11 @@ class Gem5SimulationPlatform(Platform):
|
||||
self.logger.info("Starting the gem5 simulator")
|
||||
|
||||
command_line = "{} --outdir={} {} {}".format(self.gem5args_binary,
|
||||
self.gem5_out_dir,
|
||||
quote(self.gem5_out_dir),
|
||||
self.gem5args_args,
|
||||
self.gem5args_virtio)
|
||||
self.logger.debug("gem5 command line: {}".format(command_line))
|
||||
self.gem5 = subprocess.Popen(command_line.split(),
|
||||
self.gem5 = subprocess.Popen(shlex.split(command_line),
|
||||
stdout=self.stdout_file,
|
||||
stderr=self.stderr_file)
|
||||
|
||||
@@ -155,7 +155,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
e.g. pid, input directory etc
|
||||
"""
|
||||
self.logger("This functionality is not yet implemented")
|
||||
raise TargetError()
|
||||
raise TargetStableError()
|
||||
|
||||
def _intercept_telnet_port(self):
|
||||
"""
|
||||
@@ -163,17 +163,22 @@ class Gem5SimulationPlatform(Platform):
|
||||
"""
|
||||
|
||||
if self.gem5 is None:
|
||||
raise TargetError('The platform has no gem5 simulation! '
|
||||
raise TargetStableError('The platform has no gem5 simulation! '
|
||||
'Something went wrong')
|
||||
while self.gem5_port is None:
|
||||
# Check that gem5 is running!
|
||||
if self.gem5.poll():
|
||||
raise TargetError("The gem5 process has crashed with error code {}!".format(self.gem5.poll()))
|
||||
message = "The gem5 process has crashed with error code {}!\n\tPlease see {} for details."
|
||||
raise TargetStableError(message.format(self.gem5.poll(), self.stderr_file.name))
|
||||
|
||||
# Open the stderr file
|
||||
with open(self.stderr_filename, 'r') as f:
|
||||
for line in f:
|
||||
# Look for two different strings, exact wording depends on
|
||||
# version of gem5
|
||||
m = re.search(r"Listening for system connection on port (?P<port>\d+)", line)
|
||||
if not m:
|
||||
m = re.search(r"Listening for connections on port (?P<port>\d+)", line)
|
||||
if m:
|
||||
port = int(m.group('port'))
|
||||
if port >= 3456 and port < 5900:
|
||||
@@ -182,7 +187,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
# Check if the sockets are not disabled
|
||||
m = re.search(r"Sockets disabled, not accepting terminal connections", line)
|
||||
if m:
|
||||
raise TargetError("The sockets have been disabled!"
|
||||
raise TargetStableError("The sockets have been disabled!"
|
||||
"Pass --listener-mode=on to gem5")
|
||||
else:
|
||||
time.sleep(1)
|
||||
@@ -200,9 +205,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
"""
|
||||
Deploy m5 if not yet installed
|
||||
"""
|
||||
m5_path = target.get_installed('m5')
|
||||
if m5_path is None:
|
||||
m5_path = self._deploy_m5(target)
|
||||
m5_path = self._deploy_m5(target)
|
||||
target.conn.m5_path = m5_path
|
||||
|
||||
# Set the terminal settings for the connection to gem5
|
||||
@@ -232,6 +235,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
# Call the general update_from_target implementation
|
||||
super(Gem5SimulationPlatform, self).update_from_target(target)
|
||||
|
||||
|
||||
def gem5_capture_screen(self, filepath):
|
||||
file_list = os.listdir(self.gem5_out_dir)
|
||||
screen_caps = []
|
||||
@@ -239,6 +243,12 @@ class Gem5SimulationPlatform(Platform):
|
||||
if '.bmp' in f:
|
||||
screen_caps.append(f)
|
||||
|
||||
if '{ts}' in filepath:
|
||||
cmd = '{} date -u -Iseconds'
|
||||
# pylint: disable=no-member
|
||||
ts = self.target.execute(cmd.format(self.target.busybox)).strip()
|
||||
filepath = filepath.format(ts=ts)
|
||||
|
||||
successful_capture = False
|
||||
if len(screen_caps) == 1:
|
||||
# Bail out if we do not have image, and resort to the slower, built
|
||||
@@ -251,6 +261,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
im.save(temp_image, "PNG")
|
||||
shutil.copy(temp_image, filepath)
|
||||
os.remove(temp_image)
|
||||
# pylint: disable=undefined-variable
|
||||
gem5_logger.info("capture_screen: using gem5 screencap")
|
||||
successful_capture = True
|
||||
|
||||
@@ -259,12 +270,14 @@ class Gem5SimulationPlatform(Platform):
|
||||
|
||||
return successful_capture
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def _deploy_m5(self, target):
|
||||
# m5 is not yet installed so install it
|
||||
host_executable = os.path.join(PACKAGE_BIN_DIRECTORY,
|
||||
target.abi, 'm5')
|
||||
return target.install(host_executable)
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def _resize_shell(self, target):
|
||||
"""
|
||||
Resize the shell to avoid line wrapping issues.
|
||||
@@ -275,18 +288,16 @@ class Gem5SimulationPlatform(Platform):
|
||||
target.execute('reset', check_exit_code=False)
|
||||
|
||||
# Methods that will be monkey-patched onto the target
|
||||
def _overwritten_reset(self):
|
||||
raise TargetError('Resetting is not allowed on gem5 platforms!')
|
||||
def _overwritten_reset(self): # pylint: disable=unused-argument
|
||||
raise TargetStableError('Resetting is not allowed on gem5 platforms!')
|
||||
|
||||
def _overwritten_reboot(self):
|
||||
raise TargetError('Rebooting is not allowed on gem5 platforms!')
|
||||
def _overwritten_reboot(self): # pylint: disable=unused-argument
|
||||
raise TargetStableError('Rebooting is not allowed on gem5 platforms!')
|
||||
|
||||
def _overwritten_capture_screen(self, filepath):
|
||||
connection_screencapped = self.platform.gem5_capture_screen(filepath)
|
||||
if connection_screencapped == False:
|
||||
if not connection_screencapped:
|
||||
# The connection was not able to capture the screen so use the target
|
||||
# implementation
|
||||
self.logger.debug('{} was not able to screen cap, using the original target implementation'.format(self.platform.__class__.__name__))
|
||||
self.target_impl_capture_screen(filepath)
|
||||
|
||||
|
||||
|
1310
devlib/target.py
1310
devlib/target.py
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,18 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
@@ -16,5 +31,13 @@ class TraceCollector(object):
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
self.reset()
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.stop()
|
||||
|
||||
def get_trace(self, outfile):
|
||||
pass
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
# Copyright 2015-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -19,10 +19,11 @@ import json
|
||||
import time
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.exception import TargetStableError, HostError
|
||||
from devlib.utils.misc import check_output, which
|
||||
|
||||
|
||||
@@ -49,6 +50,7 @@ STATS_RE = re.compile(r'([^ ]*) +([0-9]+) +([0-9.]+) us +([0-9.]+) us +([0-9.]+)
|
||||
|
||||
class FtraceCollector(TraceCollector):
|
||||
|
||||
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
|
||||
def __init__(self, target,
|
||||
events=None,
|
||||
functions=None,
|
||||
@@ -60,6 +62,7 @@ class FtraceCollector(TraceCollector):
|
||||
autoview=False,
|
||||
no_install=False,
|
||||
strict=False,
|
||||
report_on_target=False,
|
||||
):
|
||||
super(FtraceCollector, self).__init__(target)
|
||||
self.events = events if events is not None else DEFAULT_EVENTS
|
||||
@@ -70,7 +73,10 @@ class FtraceCollector(TraceCollector):
|
||||
self.automark = automark
|
||||
self.autoreport = autoreport
|
||||
self.autoview = autoview
|
||||
self.target_output_file = os.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
|
||||
self.report_on_target = report_on_target
|
||||
self.target_output_file = target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
|
||||
text_file_name = target.path.splitext(OUTPUT_TRACE_FILE)[0] + '.txt'
|
||||
self.target_text_file = target.path.join(self.target.working_directory, text_file_name)
|
||||
self.target_binary = None
|
||||
self.host_binary = None
|
||||
self.start_time = None
|
||||
@@ -79,6 +85,7 @@ class FtraceCollector(TraceCollector):
|
||||
self.function_string = None
|
||||
self._reset_needed = True
|
||||
|
||||
# pylint: disable=bad-whitespace
|
||||
# Setup tracing paths
|
||||
self.available_events_file = self.target.path.join(self.tracing_path, 'available_events')
|
||||
self.available_functions_file = self.target.path.join(self.tracing_path, 'available_filter_functions')
|
||||
@@ -92,8 +99,8 @@ class FtraceCollector(TraceCollector):
|
||||
self.kernelshark = which('kernelshark')
|
||||
|
||||
if not self.target.is_rooted:
|
||||
raise TargetError('trace-cmd instrument cannot be used on an unrooted device.')
|
||||
if self.autoreport and self.host_binary is None:
|
||||
raise TargetStableError('trace-cmd instrument cannot be used on an unrooted device.')
|
||||
if self.autoreport and not self.report_on_target and self.host_binary is None:
|
||||
raise HostError('trace-cmd binary must be installed on the host if autoreport=True.')
|
||||
if self.autoview and self.kernelshark is None:
|
||||
raise HostError('kernelshark binary must be installed on the host if autoview=True.')
|
||||
@@ -102,7 +109,7 @@ class FtraceCollector(TraceCollector):
|
||||
self.target_binary = self.target.install(host_file)
|
||||
else:
|
||||
if not self.target.is_installed('trace-cmd'):
|
||||
raise TargetError('No trace-cmd found on device and no_install=True is specified.')
|
||||
raise TargetStableError('No trace-cmd found on device and no_install=True is specified.')
|
||||
self.target_binary = 'trace-cmd'
|
||||
|
||||
# Validate required events to be traced
|
||||
@@ -117,10 +124,10 @@ class FtraceCollector(TraceCollector):
|
||||
_event = '*' + event
|
||||
event_re = re.compile(_event.replace('*', '.*'))
|
||||
# Select events matching the required ones
|
||||
if len(filter(event_re.match, available_events)) == 0:
|
||||
if not list(filter(event_re.match, available_events)):
|
||||
message = 'Event [{}] not available for tracing'.format(event)
|
||||
if strict:
|
||||
raise TargetError(message)
|
||||
raise TargetStableError(message)
|
||||
self.target.logger.warning(message)
|
||||
else:
|
||||
selected_events.append(event)
|
||||
@@ -128,14 +135,14 @@ class FtraceCollector(TraceCollector):
|
||||
# Thus, if not other events have been specified, try to add at least
|
||||
# a tracepoint which is always available and possibly triggered few
|
||||
# times.
|
||||
if self.functions and len(selected_events) == 0:
|
||||
if self.functions and not selected_events:
|
||||
selected_events = ['sched_wakeup_new']
|
||||
self.event_string = _build_trace_events(selected_events)
|
||||
|
||||
# Check for function tracing support
|
||||
if self.functions:
|
||||
if not self.target.file_exists(self.function_profile_file):
|
||||
raise TargetError('Function profiling not supported. '\
|
||||
raise TargetStableError('Function profiling not supported. '\
|
||||
'A kernel build with CONFIG_FUNCTION_PROFILER enable is required')
|
||||
# Validate required functions to be traced
|
||||
available_functions = self.target.execute(
|
||||
@@ -146,7 +153,7 @@ class FtraceCollector(TraceCollector):
|
||||
if function not in available_functions:
|
||||
message = 'Function [{}] not available for profiling'.format(function)
|
||||
if strict:
|
||||
raise TargetError(message)
|
||||
raise TargetStableError(message)
|
||||
self.target.logger.warning(message)
|
||||
else:
|
||||
selected_functions.append(function)
|
||||
@@ -202,21 +209,27 @@ class FtraceCollector(TraceCollector):
|
||||
|
||||
def get_trace(self, outfile):
|
||||
if os.path.isdir(outfile):
|
||||
outfile = os.path.join(outfile, os.path.dirname(self.target_output_file))
|
||||
self.target.execute('{} extract -o {}'.format(self.target_binary, self.target_output_file),
|
||||
outfile = os.path.join(outfile, os.path.basename(self.target_output_file))
|
||||
self.target.execute('{0} extract -o {1}; chmod 666 {1}'.format(self.target_binary,
|
||||
self.target_output_file),
|
||||
timeout=TIMEOUT, as_root=True)
|
||||
|
||||
# The size of trace.dat will depend on how long trace-cmd was running.
|
||||
# Therefore timout for the pull command must also be adjusted
|
||||
# accordingly.
|
||||
pull_timeout = 5 * (self.stop_time - self.start_time)
|
||||
pull_timeout = 10 * (self.stop_time - self.start_time)
|
||||
self.target.pull(self.target_output_file, outfile, timeout=pull_timeout)
|
||||
if not os.path.isfile(outfile):
|
||||
self.logger.warning('Binary trace not pulled from device.')
|
||||
else:
|
||||
if self.autoreport:
|
||||
textfile = os.path.splitext(outfile)[0] + '.txt'
|
||||
self.report(outfile, textfile)
|
||||
if self.report_on_target:
|
||||
self.generate_report_on_target()
|
||||
self.target.pull(self.target_text_file,
|
||||
textfile, timeout=pull_timeout)
|
||||
else:
|
||||
self.report(outfile, textfile)
|
||||
if self.autoview:
|
||||
self.view(outfile)
|
||||
|
||||
@@ -226,6 +239,7 @@ class FtraceCollector(TraceCollector):
|
||||
|
||||
if os.path.isdir(outfile):
|
||||
outfile = os.path.join(outfile, OUTPUT_PROFILE_FILE)
|
||||
# pylint: disable=protected-access
|
||||
output = self.target._execute_util('ftrace_get_function_stats',
|
||||
as_root=True)
|
||||
|
||||
@@ -253,7 +267,7 @@ class FtraceCollector(TraceCollector):
|
||||
|
||||
self.logger.debug("FTrace stats output [%s]...", outfile)
|
||||
with open(outfile, 'w') as fh:
|
||||
json.dump(function_stats, fh, indent=4)
|
||||
json.dump(function_stats, fh, indent=4)
|
||||
self.logger.debug("FTrace function stats save in [%s]", outfile)
|
||||
|
||||
return function_stats
|
||||
@@ -266,8 +280,10 @@ class FtraceCollector(TraceCollector):
|
||||
self.logger.debug(command)
|
||||
process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
|
||||
_, error = process.communicate()
|
||||
if sys.version_info[0] == 3:
|
||||
error = error.decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
if process.returncode:
|
||||
raise TargetError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
|
||||
raise TargetStableError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
|
||||
if error:
|
||||
# logged at debug level, as trace-cmd always outputs some
|
||||
# errors that seem benign.
|
||||
@@ -286,6 +302,12 @@ class FtraceCollector(TraceCollector):
|
||||
except OSError:
|
||||
raise HostError('Could not find trace-cmd. Please make sure it is installed and is in PATH.')
|
||||
|
||||
def generate_report_on_target(self):
|
||||
command = '{} report {} > {}'.format(self.target_binary,
|
||||
self.target_output_file,
|
||||
self.target_text_file)
|
||||
self.target.execute(command, timeout=TIMEOUT)
|
||||
|
||||
def view(self, binfile):
|
||||
check_output('{} {}'.format(self.kernelshark, binfile), shell=True)
|
||||
|
||||
|
73
devlib/trace/logcat.py
Normal file
73
devlib/trace/logcat.py
Normal file
@@ -0,0 +1,73 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.utils.android import LogcatMonitor
|
||||
|
||||
class LogcatCollector(TraceCollector):
|
||||
|
||||
def __init__(self, target, regexps=None):
|
||||
super(LogcatCollector, self).__init__(target)
|
||||
self.regexps = regexps
|
||||
self._collecting = False
|
||||
self._prev_log = None
|
||||
self._monitor = None
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Clear Collector data but do not interrupt collection
|
||||
"""
|
||||
if not self._monitor:
|
||||
return
|
||||
|
||||
if self._collecting:
|
||||
self._monitor.clear_log()
|
||||
elif self._prev_log:
|
||||
os.remove(self._prev_log)
|
||||
self._prev_log = None
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start collecting logcat lines
|
||||
"""
|
||||
self._monitor = LogcatMonitor(self.target, self.regexps)
|
||||
if self._prev_log:
|
||||
# Append new data collection to previous collection
|
||||
self._monitor.start(self._prev_log)
|
||||
else:
|
||||
self._monitor.start()
|
||||
|
||||
self._collecting = True
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop collecting logcat lines
|
||||
"""
|
||||
if not self._collecting:
|
||||
raise RuntimeError('Logcat monitor not running, nothing to stop')
|
||||
|
||||
self._monitor.stop()
|
||||
self._collecting = False
|
||||
self._prev_log = self._monitor.logfile
|
||||
|
||||
def get_trace(self, outfile):
|
||||
"""
|
||||
Output collected logcat lines to designated file
|
||||
"""
|
||||
# copy self._monitor.logfile to outfile
|
||||
shutil.copy(self._monitor.logfile, outfile)
|
137
devlib/trace/perf.py
Normal file
137
devlib/trace/perf.py
Normal file
@@ -0,0 +1,137 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import os
|
||||
import re
|
||||
from past.builtins import basestring, zip
|
||||
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.utils.misc import ensure_file_directory_exists as _f
|
||||
|
||||
|
||||
PERF_COMMAND_TEMPLATE = '{} stat {} {} sleep 1000 > {} 2>&1 '
|
||||
|
||||
PERF_COUNT_REGEX = re.compile(r'^(CPU\d+)?\s*(\d+)\s*(.*?)\s*(\[\s*\d+\.\d+%\s*\])?\s*$')
|
||||
|
||||
DEFAULT_EVENTS = [
|
||||
'migrations',
|
||||
'cs',
|
||||
]
|
||||
|
||||
|
||||
class PerfCollector(TraceCollector):
|
||||
"""
|
||||
Perf is a Linux profiling with performance counters.
|
||||
|
||||
Performance counters are CPU hardware registers that count hardware events
|
||||
such as instructions executed, cache-misses suffered, or branches
|
||||
mispredicted. They form a basis for profiling applications to trace dynamic
|
||||
control flow and identify hotspots.
|
||||
|
||||
pref accepts options and events. If no option is given the default '-a' is
|
||||
used. For events, the default events are migrations and cs. They both can
|
||||
be specified in the config file.
|
||||
|
||||
Events must be provided as a list that contains them and they will look like
|
||||
this ::
|
||||
|
||||
perf_events = ['migrations', 'cs']
|
||||
|
||||
Events can be obtained by typing the following in the command line on the
|
||||
device ::
|
||||
|
||||
perf list
|
||||
|
||||
Whereas options, they can be provided as a single string as following ::
|
||||
|
||||
perf_options = '-a -i'
|
||||
|
||||
Options can be obtained by running the following in the command line ::
|
||||
|
||||
man perf-stat
|
||||
"""
|
||||
|
||||
def __init__(self, target,
|
||||
events=None,
|
||||
optionstring=None,
|
||||
labels=None,
|
||||
force_install=False):
|
||||
super(PerfCollector, self).__init__(target)
|
||||
self.events = events if events else DEFAULT_EVENTS
|
||||
self.force_install = force_install
|
||||
self.labels = labels
|
||||
|
||||
# Validate parameters
|
||||
if isinstance(optionstring, list):
|
||||
self.optionstrings = optionstring
|
||||
else:
|
||||
self.optionstrings = [optionstring]
|
||||
if self.events and isinstance(self.events, basestring):
|
||||
self.events = [self.events]
|
||||
if not self.labels:
|
||||
self.labels = ['perf_{}'.format(i) for i in range(len(self.optionstrings))]
|
||||
if len(self.labels) != len(self.optionstrings):
|
||||
raise ValueError('The number of labels must match the number of optstrings provided for perf.')
|
||||
|
||||
self.binary = self.target.get_installed('perf')
|
||||
if self.force_install or not self.binary:
|
||||
self.binary = self._deploy_perf()
|
||||
|
||||
self.commands = self._build_commands()
|
||||
|
||||
def reset(self):
|
||||
self.target.killall('perf', as_root=self.target.is_rooted)
|
||||
for label in self.labels:
|
||||
filepath = self._get_target_outfile(label)
|
||||
self.target.remove(filepath)
|
||||
|
||||
def start(self):
|
||||
for command in self.commands:
|
||||
self.target.kick_off(command)
|
||||
|
||||
def stop(self):
|
||||
self.target.killall('sleep', as_root=self.target.is_rooted)
|
||||
|
||||
# pylint: disable=arguments-differ
|
||||
def get_trace(self, outdir):
|
||||
for label in self.labels:
|
||||
target_file = self._get_target_outfile(label)
|
||||
host_relpath = os.path.basename(target_file)
|
||||
host_file = _f(os.path.join(outdir, host_relpath))
|
||||
self.target.pull(target_file, host_file)
|
||||
|
||||
def _deploy_perf(self):
|
||||
host_executable = os.path.join(PACKAGE_BIN_DIRECTORY,
|
||||
self.target.abi, 'perf')
|
||||
return self.target.install(host_executable)
|
||||
|
||||
def _build_commands(self):
|
||||
commands = []
|
||||
for opts, label in zip(self.optionstrings, self.labels):
|
||||
commands.append(self._build_perf_command(opts, self.events, label))
|
||||
return commands
|
||||
|
||||
def _get_target_outfile(self, label):
|
||||
return self.target.get_workpath('{}.out'.format(label))
|
||||
|
||||
def _build_perf_command(self, options, events, label):
|
||||
event_string = ' '.join(['-e {}'.format(e) for e in events])
|
||||
command = PERF_COMMAND_TEMPLATE.format(self.binary,
|
||||
options or '',
|
||||
event_string,
|
||||
self._get_target_outfile(label))
|
||||
return command
|
98
devlib/trace/screencapture.py
Normal file
98
devlib/trace/screencapture.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.exception import WorkerThreadError
|
||||
|
||||
|
||||
class ScreenCapturePoller(threading.Thread):
|
||||
|
||||
def __init__(self, target, period, output_path=None, timeout=30):
|
||||
super(ScreenCapturePoller, self).__init__()
|
||||
self.target = target
|
||||
self.logger = logging.getLogger('screencapture')
|
||||
self.period = period
|
||||
self.timeout = timeout
|
||||
self.stop_signal = threading.Event()
|
||||
self.lock = threading.Lock()
|
||||
self.last_poll = 0
|
||||
self.daemon = True
|
||||
self.exc = None
|
||||
self.output_path = output_path
|
||||
|
||||
def run(self):
|
||||
self.logger.debug('Starting screen capture polling')
|
||||
try:
|
||||
while True:
|
||||
if self.stop_signal.is_set():
|
||||
break
|
||||
with self.lock:
|
||||
current_time = time.time()
|
||||
if (current_time - self.last_poll) >= self.period:
|
||||
self.poll()
|
||||
time.sleep(0.5)
|
||||
except Exception: # pylint: disable=W0703
|
||||
self.exc = WorkerThreadError(self.name, sys.exc_info())
|
||||
|
||||
def stop(self):
|
||||
self.logger.debug('Stopping screen capture polling')
|
||||
self.stop_signal.set()
|
||||
self.join(self.timeout)
|
||||
if self.is_alive():
|
||||
self.logger.error('Could not join screen capture poller thread.')
|
||||
if self.exc:
|
||||
raise self.exc # pylint: disable=E0702
|
||||
|
||||
def poll(self):
|
||||
self.last_poll = time.time()
|
||||
self.target.capture_screen(os.path.join(self.output_path, "screencap_{ts}.png"))
|
||||
|
||||
|
||||
class ScreenCaptureCollector(TraceCollector):
|
||||
|
||||
def __init__(self, target, output_path=None, period=None):
|
||||
super(ScreenCaptureCollector, self).__init__(target)
|
||||
self._collecting = False
|
||||
self.output_path = output_path
|
||||
self.period = period
|
||||
self.target = target
|
||||
self._poller = ScreenCapturePoller(self.target, self.period,
|
||||
self.output_path)
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start collecting the screenshots
|
||||
"""
|
||||
self._poller.start()
|
||||
self._collecting = True
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop collecting the screenshots
|
||||
"""
|
||||
if not self._collecting:
|
||||
raise RuntimeError('Screen capture collector is not running, nothing to stop')
|
||||
|
||||
self._poller.stop()
|
||||
self._collecting = False
|
94
devlib/trace/serial_trace.py
Normal file
94
devlib/trace/serial_trace.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import shutil
|
||||
from tempfile import NamedTemporaryFile
|
||||
from pexpect.exceptions import TIMEOUT
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.utils.serial_port import get_connection
|
||||
|
||||
|
||||
class SerialTraceCollector(TraceCollector):
|
||||
|
||||
@property
|
||||
def collecting(self):
|
||||
return self._collecting
|
||||
|
||||
def __init__(self, target, serial_port, baudrate, timeout=20):
|
||||
super(SerialTraceCollector, self).__init__(target)
|
||||
self.serial_port = serial_port
|
||||
self.baudrate = baudrate
|
||||
self.timeout = timeout
|
||||
|
||||
self._serial_target = None
|
||||
self._conn = None
|
||||
self._tmpfile = None
|
||||
self._collecting = False
|
||||
|
||||
def reset(self):
|
||||
if self._collecting:
|
||||
raise RuntimeError("reset was called whilst collecting")
|
||||
|
||||
if self._tmpfile:
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
||||
|
||||
def start(self):
|
||||
if self._collecting:
|
||||
raise RuntimeError("start was called whilst collecting")
|
||||
|
||||
|
||||
self._tmpfile = NamedTemporaryFile()
|
||||
start_marker = "-------- Starting serial logging --------\n"
|
||||
self._tmpfile.write(start_marker.encode('utf-8'))
|
||||
|
||||
self._serial_target, self._conn = get_connection(port=self.serial_port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=self.timeout,
|
||||
logfile=self._tmpfile,
|
||||
init_dtr=0)
|
||||
self._collecting = True
|
||||
|
||||
def stop(self):
|
||||
if not self._collecting:
|
||||
raise RuntimeError("stop was called whilst not collecting")
|
||||
|
||||
# We expect the below to fail, but we need to get pexpect to
|
||||
# do something so that it interacts with the serial device,
|
||||
# and hence updates the logfile.
|
||||
try:
|
||||
self._serial_target.expect(".", timeout=1)
|
||||
except TIMEOUT:
|
||||
pass
|
||||
|
||||
self._serial_target.close()
|
||||
del self._conn
|
||||
|
||||
stop_marker = "-------- Stopping serial logging --------\n"
|
||||
self._tmpfile.write(stop_marker.encode('utf-8'))
|
||||
|
||||
self._collecting = False
|
||||
|
||||
def get_trace(self, outfile):
|
||||
if self._collecting:
|
||||
raise RuntimeError("get_trace was called whilst collecting")
|
||||
|
||||
self._tmpfile.flush()
|
||||
|
||||
shutil.copy(self._tmpfile.name, outfile)
|
||||
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
159
devlib/trace/systrace.py
Normal file
159
devlib/trace/systrace.py
Normal file
@@ -0,0 +1,159 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from shutil import copyfile
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from devlib.exception import TargetStableError, HostError
|
||||
from devlib.trace import TraceCollector
|
||||
import devlib.utils.android
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
|
||||
DEFAULT_CATEGORIES = [
|
||||
'gfx',
|
||||
'view',
|
||||
'sched',
|
||||
'freq',
|
||||
'idle'
|
||||
]
|
||||
|
||||
class SystraceCollector(TraceCollector):
|
||||
"""
|
||||
A trace collector based on Systrace
|
||||
|
||||
For more details, see https://developer.android.com/studio/command-line/systrace
|
||||
|
||||
:param target: Devlib target
|
||||
:type target: AndroidTarget
|
||||
|
||||
:param outdir: Working directory to use on the host
|
||||
:type outdir: str
|
||||
|
||||
:param categories: Systrace categories to trace. See `available_categories`
|
||||
:type categories: list(str)
|
||||
|
||||
:param buffer_size: Buffer size in kb
|
||||
:type buffer_size: int
|
||||
|
||||
:param strict: Raise an exception if any of the requested categories
|
||||
are not available
|
||||
:type strict: bool
|
||||
"""
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def available_categories(self):
|
||||
lines = subprocess.check_output(
|
||||
[self.systrace_binary, '-l'], universal_newlines=True
|
||||
).splitlines()
|
||||
|
||||
return [line.split()[0] for line in lines if line]
|
||||
|
||||
def __init__(self, target,
|
||||
categories=None,
|
||||
buffer_size=None,
|
||||
strict=False):
|
||||
|
||||
super(SystraceCollector, self).__init__(target)
|
||||
|
||||
self.categories = categories or DEFAULT_CATEGORIES
|
||||
self.buffer_size = buffer_size
|
||||
|
||||
self._systrace_process = None
|
||||
self._tmpfile = None
|
||||
|
||||
# Try to find a systrace binary
|
||||
self.systrace_binary = None
|
||||
|
||||
platform_tools = devlib.utils.android.platform_tools
|
||||
systrace_binary_path = os.path.join(platform_tools, 'systrace', 'systrace.py')
|
||||
if not os.path.isfile(systrace_binary_path):
|
||||
raise HostError('Could not find any systrace binary under {}'.format(platform_tools))
|
||||
|
||||
self.systrace_binary = systrace_binary_path
|
||||
|
||||
# Filter the requested categories
|
||||
for category in self.categories:
|
||||
if category not in self.available_categories:
|
||||
message = 'Category [{}] not available for tracing'.format(category)
|
||||
if strict:
|
||||
raise TargetStableError(message)
|
||||
self.logger.warning(message)
|
||||
|
||||
self.categories = list(set(self.categories) & set(self.available_categories))
|
||||
if not self.categories:
|
||||
raise TargetStableError('None of the requested categories are available')
|
||||
|
||||
def __del__(self):
|
||||
self.reset()
|
||||
|
||||
def _build_cmd(self):
|
||||
self._tmpfile = NamedTemporaryFile()
|
||||
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.systrace_cmd = '{} -o {} -e {}'.format(
|
||||
self.systrace_binary,
|
||||
self._tmpfile.name,
|
||||
self.target.adb_name
|
||||
)
|
||||
|
||||
if self.buffer_size:
|
||||
self.systrace_cmd += ' -b {}'.format(self.buffer_size)
|
||||
|
||||
self.systrace_cmd += ' {}'.format(' '.join(self.categories))
|
||||
|
||||
def reset(self):
|
||||
if self._systrace_process:
|
||||
self.stop()
|
||||
|
||||
if self._tmpfile:
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
||||
|
||||
def start(self):
|
||||
if self._systrace_process:
|
||||
raise RuntimeError("Tracing is already underway, call stop() first")
|
||||
|
||||
self.reset()
|
||||
|
||||
self._build_cmd()
|
||||
|
||||
self._systrace_process = subprocess.Popen(
|
||||
self.systrace_cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
shell=True,
|
||||
universal_newlines=True
|
||||
)
|
||||
|
||||
def stop(self):
|
||||
if not self._systrace_process:
|
||||
raise RuntimeError("No tracing to stop, call start() first")
|
||||
|
||||
# Systrace expects <enter> to stop
|
||||
self._systrace_process.communicate('\n')
|
||||
self._systrace_process = None
|
||||
|
||||
def get_trace(self, outfile):
|
||||
if self._systrace_process:
|
||||
raise RuntimeError("Tracing is underway, call stop() first")
|
||||
|
||||
if not self._tmpfile:
|
||||
raise RuntimeError("No tracing data available")
|
||||
|
||||
copyfile(self._tmpfile.name, outfile)
|
@@ -12,5 +12,3 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
|
420
devlib/utils/android.py
Normal file → Executable file
420
devlib/utils/android.py
Normal file → Executable file
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
# Copyright 2013-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,25 +20,36 @@ Utility functions for working with Android devices through adb.
|
||||
"""
|
||||
# pylint: disable=E1103
|
||||
import os
|
||||
import time
|
||||
import subprocess
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import tempfile
|
||||
import subprocess
|
||||
from collections import defaultdict
|
||||
import pexpect
|
||||
import xml.etree.ElementTree
|
||||
import zipfile
|
||||
|
||||
from devlib.exception import TargetError, HostError, DevlibError
|
||||
from devlib.utils.misc import check_output, which, memoized
|
||||
from devlib.utils.misc import escape_single_quotes, escape_double_quotes
|
||||
from pipes import quote
|
||||
|
||||
from devlib.exception import TargetTransientError, TargetStableError, HostError
|
||||
from devlib.utils.misc import check_output, which, ABI_MAP
|
||||
|
||||
|
||||
logger = logging.getLogger('android')
|
||||
|
||||
MAX_ATTEMPTS = 5
|
||||
AM_START_ERROR = re.compile(r"Error: Activity class {[\w|.|/]*} does not exist")
|
||||
AM_START_ERROR = re.compile(r"Error: Activity.*")
|
||||
|
||||
# See:
|
||||
# http://developer.android.com/guide/topics/manifest/uses-sdk-element.html#ApiLevels
|
||||
ANDROID_VERSION_MAP = {
|
||||
28: 'P',
|
||||
27: 'OREO_MR1',
|
||||
26: 'OREO',
|
||||
25: 'NOUGAT_MR1',
|
||||
24: 'NOUGAT',
|
||||
23: 'MARSHMALLOW',
|
||||
22: 'LOLLYPOP_MR1',
|
||||
21: 'LOLLYPOP',
|
||||
@@ -64,6 +75,12 @@ ANDROID_VERSION_MAP = {
|
||||
1: 'BASE',
|
||||
}
|
||||
|
||||
# See https://developer.android.com/reference/android/content/Intent.html#setFlags(int)
|
||||
INTENT_FLAGS = {
|
||||
'ACTIVITY_NEW_TASK' : 0x10000000,
|
||||
'ACTIVITY_CLEAR_TASK' : 0x00008000
|
||||
}
|
||||
|
||||
|
||||
# Initialized in functions near the botton of the file
|
||||
android_home = None
|
||||
@@ -83,7 +100,7 @@ class AndroidProperties(object):
|
||||
self._properties = dict(re.findall(r'\[(.*?)\]:\s+\[(.*?)\]', text))
|
||||
|
||||
def iteritems(self):
|
||||
return self._properties.iteritems()
|
||||
return iter(self._properties.items())
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._properties)
|
||||
@@ -100,6 +117,7 @@ class AdbDevice(object):
|
||||
self.name = name
|
||||
self.status = status
|
||||
|
||||
# pylint: disable=undefined-variable
|
||||
def __cmp__(self, other):
|
||||
if isinstance(other, AdbDevice):
|
||||
return cmp(self.name, other.name)
|
||||
@@ -116,6 +134,8 @@ class ApkInfo(object):
|
||||
|
||||
version_regex = re.compile(r"name='(?P<name>[^']+)' versionCode='(?P<vcode>[^']+)' versionName='(?P<vname>[^']+)'")
|
||||
name_regex = re.compile(r"name='(?P<name>[^']+)'")
|
||||
permission_regex = re.compile(r"name='(?P<permission>[^']+)'")
|
||||
activity_regex = re.compile(r'\s*A:\s*android:name\(0x\d+\)=".(?P<name>\w+)"')
|
||||
|
||||
def __init__(self, path=None):
|
||||
self.path = path
|
||||
@@ -124,13 +144,14 @@ class ApkInfo(object):
|
||||
self.label = None
|
||||
self.version_name = None
|
||||
self.version_code = None
|
||||
self.native_code = None
|
||||
self.permissions = []
|
||||
self.parse(path)
|
||||
|
||||
# pylint: disable=too-many-branches
|
||||
def parse(self, apk_path):
|
||||
_check_env()
|
||||
command = [aapt, 'dump', 'badging', apk_path]
|
||||
logger.debug(' '.join(command))
|
||||
output = subprocess.check_output(command)
|
||||
output = self._run([aapt, 'dump', 'badging', apk_path])
|
||||
for line in output.split('\n'):
|
||||
if line.startswith('application-label:'):
|
||||
self.label = line.split(':')[1].strip().replace('\'', '')
|
||||
@@ -143,9 +164,70 @@ class ApkInfo(object):
|
||||
elif line.startswith('launchable-activity:'):
|
||||
match = self.name_regex.search(line)
|
||||
self.activity = match.group('name')
|
||||
elif line.startswith('native-code'):
|
||||
apk_abis = [entry.strip() for entry in line.split(':')[1].split("'") if entry.strip()]
|
||||
mapped_abis = []
|
||||
for apk_abi in apk_abis:
|
||||
found = False
|
||||
for abi, architectures in ABI_MAP.items():
|
||||
if apk_abi in architectures:
|
||||
mapped_abis.append(abi)
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
mapped_abis.append(apk_abi)
|
||||
self.native_code = mapped_abis
|
||||
elif line.startswith('uses-permission:'):
|
||||
match = self.permission_regex.search(line)
|
||||
if match:
|
||||
self.permissions.append(match.group('permission'))
|
||||
else:
|
||||
pass # not interested
|
||||
|
||||
self._apk_path = apk_path
|
||||
self._activities = None
|
||||
self._methods = None
|
||||
|
||||
@property
|
||||
def activities(self):
|
||||
if self._activities is None:
|
||||
cmd = [aapt, 'dump', 'xmltree', self._apk_path,
|
||||
'AndroidManifest.xml']
|
||||
matched_activities = self.activity_regex.finditer(self._run(cmd))
|
||||
self._activities = [m.group('name') for m in matched_activities]
|
||||
return self._activities
|
||||
|
||||
@property
|
||||
def methods(self):
|
||||
if self._methods is None:
|
||||
with zipfile.ZipFile(self._apk_path, 'r') as z:
|
||||
extracted = z.extract('classes.dex', tempfile.gettempdir())
|
||||
|
||||
dexdump = os.path.join(os.path.dirname(aapt), 'dexdump')
|
||||
command = [dexdump, '-l', 'xml', extracted]
|
||||
dump = self._run(command)
|
||||
|
||||
xml_tree = xml.etree.ElementTree.fromstring(dump)
|
||||
|
||||
package = next(i for i in xml_tree.iter('package')
|
||||
if i.attrib['name'] == self.package)
|
||||
|
||||
self._methods = [(meth.attrib['name'], klass.attrib['name'])
|
||||
for klass in package.iter('class')
|
||||
for meth in klass.iter('method')]
|
||||
return self._methods
|
||||
|
||||
def _run(self, command):
|
||||
logger.debug(' '.join(command))
|
||||
try:
|
||||
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
|
||||
if sys.version_info[0] == 3:
|
||||
output = output.decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise HostError('Error while running "{}":\n{}'
|
||||
.format(command, e.output))
|
||||
return output
|
||||
|
||||
|
||||
class AdbConnection(object):
|
||||
|
||||
@@ -159,18 +241,6 @@ class AdbConnection(object):
|
||||
def name(self):
|
||||
return self.device
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def newline_separator(self):
|
||||
output = adb_command(self.device,
|
||||
"shell '({}); echo \"\n$?\"'".format(self.ls_command))
|
||||
if output.endswith('\r\n'):
|
||||
return '\r\n'
|
||||
elif output.endswith('\n'):
|
||||
return '\n'
|
||||
else:
|
||||
raise DevlibError("Unknown line ending")
|
||||
|
||||
# Again, we need to handle boards where the default output format from ls is
|
||||
# single column *and* boards where the default output is multi-column.
|
||||
# We need to do this purely because the '-1' option causes errors on older
|
||||
@@ -178,7 +248,7 @@ class AdbConnection(object):
|
||||
def _setup_ls(self):
|
||||
command = "shell '(ls -1); echo \"\n$?\"'"
|
||||
try:
|
||||
output = adb_command(self.device, command, timeout=self.timeout)
|
||||
output = adb_command(self.device, command, timeout=self.timeout, adb_server=self.adb_server)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise HostError(
|
||||
'Failed to set up ls command on Android device. Output:\n'
|
||||
@@ -189,13 +259,15 @@ class AdbConnection(object):
|
||||
self.ls_command = 'ls -1'
|
||||
else:
|
||||
self.ls_command = 'ls'
|
||||
logger.info("ls command is set to {}".format(self.ls_command))
|
||||
logger.debug("ls command is set to {}".format(self.ls_command))
|
||||
|
||||
def __init__(self, device=None, timeout=None, platform=None):
|
||||
# pylint: disable=unused-argument
|
||||
def __init__(self, device=None, timeout=None, platform=None, adb_server=None):
|
||||
self.timeout = timeout if timeout is not None else self.default_timeout
|
||||
if device is None:
|
||||
device = adb_get_device(timeout=timeout)
|
||||
device = adb_get_device(timeout=timeout, adb_server=adb_server)
|
||||
self.device = device
|
||||
self.adb_server = adb_server
|
||||
adb_connect(self.device)
|
||||
AdbConnection.active_connections[self.device] += 1
|
||||
self._setup_ls()
|
||||
@@ -203,10 +275,10 @@ class AdbConnection(object):
|
||||
def push(self, source, dest, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
command = "push '{}' '{}'".format(source, dest)
|
||||
command = "push {} {}".format(quote(source), quote(dest))
|
||||
if not os.path.exists(source):
|
||||
raise HostError('No such file "{}"'.format(source))
|
||||
return adb_command(self.device, command, timeout=timeout)
|
||||
return adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
|
||||
def pull(self, source, dest, timeout=None):
|
||||
if timeout is None:
|
||||
@@ -215,21 +287,28 @@ class AdbConnection(object):
|
||||
if os.path.isdir(dest) and \
|
||||
('*' in source or '?' in source):
|
||||
command = 'shell {} {}'.format(self.ls_command, source)
|
||||
output = adb_command(self.device, command, timeout=timeout)
|
||||
output = adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
for line in output.splitlines():
|
||||
command = "pull '{}' '{}'".format(line.strip(), dest)
|
||||
adb_command(self.device, command, timeout=timeout)
|
||||
command = "pull {} {}".format(quote(line.strip()), quote(dest))
|
||||
adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
return
|
||||
command = "pull '{}' '{}'".format(source, dest)
|
||||
return adb_command(self.device, command, timeout=timeout)
|
||||
command = "pull {} {}".format(quote(source), quote(dest))
|
||||
return adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def execute(self, command, timeout=None, check_exit_code=False,
|
||||
as_root=False, strip_colors=True):
|
||||
return adb_shell(self.device, command, timeout, check_exit_code,
|
||||
as_root, self.newline_separator)
|
||||
as_root=False, strip_colors=True, will_succeed=False):
|
||||
try:
|
||||
return adb_shell(self.device, command, timeout, check_exit_code,
|
||||
as_root, adb_server=self.adb_server)
|
||||
except TargetStableError as e:
|
||||
if will_succeed:
|
||||
raise TargetTransientError(e)
|
||||
else:
|
||||
raise
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
return adb_background_shell(self.device, command, stdout, stderr, as_root)
|
||||
return adb_background_shell(self.device, command, stdout, stderr, as_root, adb_server=self.adb_server)
|
||||
|
||||
def close(self):
|
||||
AdbConnection.active_connections[self.device] -= 1
|
||||
@@ -246,7 +325,7 @@ class AdbConnection(object):
|
||||
|
||||
def fastboot_command(command, timeout=None, device=None):
|
||||
_check_env()
|
||||
target = '-s {}'.format(device) if device else ''
|
||||
target = '-s {}'.format(quote(device)) if device else ''
|
||||
full_command = 'fastboot {} {}'.format(target, command)
|
||||
logger.debug(full_command)
|
||||
output, _ = check_output(full_command, timeout, shell=True)
|
||||
@@ -254,11 +333,11 @@ def fastboot_command(command, timeout=None, device=None):
|
||||
|
||||
|
||||
def fastboot_flash_partition(partition, path_to_image):
|
||||
command = 'flash {} {}'.format(partition, path_to_image)
|
||||
command = 'flash {} {}'.format(quote(partition), quote(path_to_image))
|
||||
fastboot_command(command)
|
||||
|
||||
|
||||
def adb_get_device(timeout=None):
|
||||
def adb_get_device(timeout=None, adb_server=None):
|
||||
"""
|
||||
Returns the serial number of a connected android device.
|
||||
|
||||
@@ -267,13 +346,17 @@ def adb_get_device(timeout=None):
|
||||
"""
|
||||
# TODO this is a hacky way to issue a adb command to all listed devices
|
||||
|
||||
# Ensure server is started so the 'daemon started successfully' message
|
||||
# doesn't confuse the parsing below
|
||||
adb_command(None, 'start-server', adb_server=adb_server)
|
||||
|
||||
# The output of calling adb devices consists of a heading line then
|
||||
# a list of the devices sperated by new line
|
||||
# The last line is a blank new line. in otherwords, if there is a device found
|
||||
# then the output length is 2 + (1 for each device)
|
||||
start = time.time()
|
||||
while True:
|
||||
output = adb_command(None, "devices").splitlines() # pylint: disable=E1103
|
||||
output = adb_command(None, "devices", adb_server=adb_server).splitlines() # pylint: disable=E1103
|
||||
output_length = len(output)
|
||||
if output_length == 3:
|
||||
# output[1] is the 2nd line in the output which has the device name
|
||||
@@ -292,18 +375,15 @@ def adb_get_device(timeout=None):
|
||||
|
||||
def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS):
|
||||
_check_env()
|
||||
# Connect is required only for ADB-over-IP
|
||||
if "." not in device:
|
||||
logger.debug('Device connected via USB, connect not required')
|
||||
return
|
||||
tries = 0
|
||||
output = None
|
||||
while tries <= attempts:
|
||||
tries += 1
|
||||
if device:
|
||||
command = 'adb connect {}'.format(device)
|
||||
logger.debug(command)
|
||||
output, _ = check_output(command, shell=True, timeout=timeout)
|
||||
if "." in device: # Connect is required only for ADB-over-IP
|
||||
command = 'adb connect {}'.format(quote(device))
|
||||
logger.debug(command)
|
||||
output, _ = check_output(command, shell=True, timeout=timeout)
|
||||
if _ping(device):
|
||||
break
|
||||
time.sleep(10)
|
||||
@@ -323,27 +403,31 @@ def adb_disconnect(device):
|
||||
logger.debug(command)
|
||||
retval = subprocess.call(command, stdout=open(os.devnull, 'wb'), shell=True)
|
||||
if retval:
|
||||
raise TargetError('"{}" returned {}'.format(command, retval))
|
||||
raise TargetTransientError('"{}" returned {}'.format(command, retval))
|
||||
|
||||
|
||||
def _ping(device):
|
||||
_check_env()
|
||||
device_string = ' -s {}'.format(device) if device else ''
|
||||
command = "adb{} shell \"ls / > /dev/null\"".format(device_string)
|
||||
device_string = ' -s {}'.format(quote(device)) if device else ''
|
||||
command = "adb{} shell \"ls /data/local/tmp > /dev/null\"".format(device_string)
|
||||
logger.debug(command)
|
||||
result = subprocess.call(command, stderr=subprocess.PIPE, shell=True)
|
||||
if not result:
|
||||
if not result: # pylint: disable=simplifiable-if-statement
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
def adb_shell(device, command, timeout=None, check_exit_code=False,
|
||||
as_root=False, newline_separator='\r\n'): # NOQA
|
||||
as_root=False, adb_server=None): # NOQA
|
||||
_check_env()
|
||||
if as_root:
|
||||
command = 'echo \'{}\' | su'.format(escape_single_quotes(command))
|
||||
device_part = ['-s', device] if device else []
|
||||
command = 'echo {} | su'.format(quote(command))
|
||||
device_part = []
|
||||
if adb_server:
|
||||
device_part = ['-H', adb_server]
|
||||
device_part += ['-s', device] if device else []
|
||||
|
||||
# On older combinations of ADB/Android versions, the adb host command always
|
||||
# exits with 0 if it was able to run the command on the target, even if the
|
||||
@@ -353,12 +437,16 @@ def adb_shell(device, command, timeout=None, check_exit_code=False,
|
||||
adb_shell_command = '({}); echo \"\n$?\"'.format(command)
|
||||
actual_command = ['adb'] + device_part + ['shell', adb_shell_command]
|
||||
logger.debug('adb {} shell {}'.format(' '.join(device_part), command))
|
||||
raw_output, error = check_output(actual_command, timeout, shell=False)
|
||||
try:
|
||||
raw_output, _ = check_output(actual_command, timeout, shell=False, combined_output=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise TargetStableError(str(e))
|
||||
|
||||
if raw_output:
|
||||
try:
|
||||
output, exit_code, _ = raw_output.rsplit(newline_separator, 2)
|
||||
output, exit_code, _ = raw_output.replace('\r\n', '\n').replace('\r', '\n').rsplit('\n', 2)
|
||||
except ValueError:
|
||||
exit_code, _ = raw_output.rsplit(newline_separator, 1)
|
||||
exit_code, _ = raw_output.replace('\r\n', '\n').replace('\r', '\n').rsplit('\n', 1)
|
||||
output = ''
|
||||
else: # raw_output is empty
|
||||
exit_code = '969696' # just because
|
||||
@@ -366,23 +454,24 @@ def adb_shell(device, command, timeout=None, check_exit_code=False,
|
||||
|
||||
if check_exit_code:
|
||||
exit_code = exit_code.strip()
|
||||
re_search = AM_START_ERROR.findall(output)
|
||||
if exit_code.isdigit():
|
||||
if int(exit_code):
|
||||
message = ('Got exit code {}\nfrom target command: {}\n'
|
||||
'STDOUT: {}\nSTDERR: {}')
|
||||
raise TargetError(message.format(exit_code, command, output, error))
|
||||
elif AM_START_ERROR.findall(output):
|
||||
message = 'Could not start activity; got the following:'
|
||||
message += '\n{}'.format(AM_START_ERROR.findall(output)[0])
|
||||
raise TargetError(message)
|
||||
else: # not all digits
|
||||
if AM_START_ERROR.findall(output):
|
||||
'OUTPUT: {}')
|
||||
raise TargetStableError(message.format(exit_code, command, output))
|
||||
elif re_search:
|
||||
message = 'Could not start activity; got the following:\n{}'
|
||||
raise TargetError(message.format(AM_START_ERROR.findall(output)[0]))
|
||||
raise TargetStableError(message.format(re_search[0]))
|
||||
else: # not all digits
|
||||
if re_search:
|
||||
message = 'Could not start activity; got the following:\n{}'
|
||||
raise TargetStableError(message.format(re_search[0]))
|
||||
else:
|
||||
message = 'adb has returned early; did not get an exit code. '\
|
||||
'Was kill-server invoked?'
|
||||
raise TargetError(message)
|
||||
'Was kill-server invoked?\nOUTPUT:\n-----\n{}\n'\
|
||||
'-----'
|
||||
raise TargetTransientError(message.format(raw_output))
|
||||
|
||||
return output
|
||||
|
||||
@@ -390,19 +479,22 @@ def adb_shell(device, command, timeout=None, check_exit_code=False,
|
||||
def adb_background_shell(device, command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
as_root=False):
|
||||
as_root=False,
|
||||
adb_server=None):
|
||||
"""Runs the sepcified command in a subprocess, returning the the Popen object."""
|
||||
_check_env()
|
||||
if as_root:
|
||||
command = 'echo \'{}\' | su'.format(escape_single_quotes(command))
|
||||
device_string = ' -s {}'.format(device) if device else ''
|
||||
full_command = 'adb{} shell "{}"'.format(device_string, escape_double_quotes(command))
|
||||
command = 'echo {} | su'.format(quote(command))
|
||||
|
||||
device_string = ' -H {}'.format(adb_server) if adb_server else ''
|
||||
device_string += ' -s {}'.format(device) if device else ''
|
||||
full_command = 'adb{} shell {}'.format(device_string, quote(command))
|
||||
logger.debug(full_command)
|
||||
return subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
|
||||
|
||||
|
||||
def adb_list_devices():
|
||||
output = adb_command(None, 'devices')
|
||||
def adb_list_devices(adb_server=None):
|
||||
output = adb_command(None, 'devices', adb_server=adb_server)
|
||||
devices = []
|
||||
for line in output.splitlines():
|
||||
parts = [p.strip() for p in line.split()]
|
||||
@@ -411,14 +503,39 @@ def adb_list_devices():
|
||||
return devices
|
||||
|
||||
|
||||
def adb_command(device, command, timeout=None):
|
||||
def get_adb_command(device, command, adb_server=None):
|
||||
_check_env()
|
||||
device_string = ' -s {}'.format(device) if device else ''
|
||||
full_command = "adb{} {}".format(device_string, command)
|
||||
device_string = ""
|
||||
if adb_server != None:
|
||||
device_string = ' -H {}'.format(adb_server)
|
||||
device_string += ' -s {}'.format(device) if device else ''
|
||||
return "adb{} {}".format(device_string, command)
|
||||
|
||||
def adb_command(device, command, timeout=None, adb_server=None):
|
||||
full_command = get_adb_command(device, command, adb_server)
|
||||
logger.debug(full_command)
|
||||
output, _ = check_output(full_command, timeout, shell=True)
|
||||
return output
|
||||
|
||||
def grant_app_permissions(target, package):
|
||||
"""
|
||||
Grant an app all the permissions it may ask for
|
||||
"""
|
||||
dumpsys = target.execute('dumpsys package {}'.format(package))
|
||||
|
||||
permissions = re.search(
|
||||
'requested permissions:\s*(?P<permissions>(android.permission.+\s*)+)', dumpsys
|
||||
)
|
||||
if permissions is None:
|
||||
return
|
||||
permissions = permissions.group('permissions').replace(" ", "").splitlines()
|
||||
|
||||
for permission in permissions:
|
||||
try:
|
||||
target.execute('pm grant {} {}'.format(package, permission))
|
||||
except TargetStableError:
|
||||
logger.debug('Cannot grant {}'.format(permission))
|
||||
|
||||
|
||||
# Messy environment initialisation stuff...
|
||||
|
||||
@@ -486,3 +603,146 @@ def _check_env():
|
||||
platform_tools = _env.platform_tools
|
||||
adb = _env.adb
|
||||
aapt = _env.aapt
|
||||
|
||||
class LogcatMonitor(object):
|
||||
"""
|
||||
Helper class for monitoring Anroid's logcat
|
||||
|
||||
:param target: Android target to monitor
|
||||
:type target: :class:`AndroidTarget`
|
||||
|
||||
:param regexps: List of uncompiled regular expressions to filter on the
|
||||
device. Logcat entries that don't match any will not be
|
||||
seen. If omitted, all entries will be sent to host.
|
||||
:type regexps: list(str)
|
||||
"""
|
||||
|
||||
@property
|
||||
def logfile(self):
|
||||
return self._logfile
|
||||
|
||||
def __init__(self, target, regexps=None):
|
||||
super(LogcatMonitor, self).__init__()
|
||||
|
||||
self.target = target
|
||||
self._regexps = regexps
|
||||
self._logcat = None
|
||||
self._logfile = None
|
||||
|
||||
def start(self, outfile=None):
|
||||
"""
|
||||
Start logcat and begin monitoring
|
||||
|
||||
:param outfile: Optional path to file to store all logcat entries
|
||||
:type outfile: str
|
||||
"""
|
||||
if outfile:
|
||||
self._logfile = open(outfile, 'w')
|
||||
else:
|
||||
self._logfile = tempfile.NamedTemporaryFile()
|
||||
|
||||
self.target.clear_logcat()
|
||||
|
||||
logcat_cmd = 'logcat'
|
||||
|
||||
# Join all requested regexps with an 'or'
|
||||
if self._regexps:
|
||||
regexp = '{}'.format('|'.join(self._regexps))
|
||||
if len(self._regexps) > 1:
|
||||
regexp = '({})'.format(regexp)
|
||||
# Logcat on older version of android do not support the -e argument
|
||||
# so fall back to using grep.
|
||||
if self.target.get_sdk_version() > 23:
|
||||
logcat_cmd = '{} -e {}'.format(logcat_cmd, quote(regexp))
|
||||
else:
|
||||
logcat_cmd = '{} | grep {}'.format(logcat_cmd, quote(regexp))
|
||||
|
||||
logcat_cmd = get_adb_command(self.target.conn.device, logcat_cmd)
|
||||
|
||||
logger.debug('logcat command ="{}"'.format(logcat_cmd))
|
||||
self._logcat = pexpect.spawn(logcat_cmd, logfile=self._logfile)
|
||||
|
||||
def stop(self):
|
||||
self._logcat.terminate()
|
||||
self._logfile.close()
|
||||
|
||||
def get_log(self):
|
||||
"""
|
||||
Return the list of lines found by the monitor
|
||||
"""
|
||||
# Unless we tell pexect to 'expect' something, it won't read from
|
||||
# logcat's buffer or write into our logfile. We'll need to force it to
|
||||
# read any pending logcat output.
|
||||
while True:
|
||||
try:
|
||||
read_size = 1024 * 8
|
||||
# This will read up to read_size bytes, but only those that are
|
||||
# already ready (i.e. it won't block). If there aren't any bytes
|
||||
# already available it raises pexpect.TIMEOUT.
|
||||
buf = self._logcat.read_nonblocking(read_size, timeout=0)
|
||||
|
||||
# We can't just keep calling read_nonblocking until we get a
|
||||
# pexpect.TIMEOUT (i.e. until we don't find any available
|
||||
# bytes), because logcat might be writing bytes the whole time -
|
||||
# in that case we might never return from this function. In
|
||||
# fact, we only care about bytes that were written before we
|
||||
# entered this function. So, if we read read_size bytes (as many
|
||||
# as we were allowed to), then we'll assume there are more bytes
|
||||
# that have already been sitting in the output buffer of the
|
||||
# logcat command. If not, we'll assume we read everything that
|
||||
# had already been written.
|
||||
if len(buf) == read_size:
|
||||
continue
|
||||
else:
|
||||
break
|
||||
except pexpect.TIMEOUT:
|
||||
# No available bytes to read. No prob, logcat just hasn't
|
||||
# printed anything since pexpect last read from its buffer.
|
||||
break
|
||||
|
||||
with open(self._logfile.name) as fh:
|
||||
return [line for line in fh]
|
||||
|
||||
def clear_log(self):
|
||||
with open(self._logfile.name, 'w') as _:
|
||||
pass
|
||||
|
||||
def search(self, regexp):
|
||||
"""
|
||||
Search a line that matches a regexp in the logcat log
|
||||
Return immediatly
|
||||
"""
|
||||
return [line for line in self.get_log() if re.match(regexp, line)]
|
||||
|
||||
def wait_for(self, regexp, timeout=30):
|
||||
"""
|
||||
Search a line that matches a regexp in the logcat log
|
||||
Wait for it to appear if it's not found
|
||||
|
||||
:param regexp: regexp to search
|
||||
:type regexp: str
|
||||
|
||||
:param timeout: Timeout in seconds, before rasing RuntimeError.
|
||||
``None`` means wait indefinitely
|
||||
:type timeout: number
|
||||
|
||||
:returns: List of matched strings
|
||||
"""
|
||||
log = self.get_log()
|
||||
res = [line for line in log if re.match(regexp, line)]
|
||||
|
||||
# Found some matches, return them
|
||||
if res:
|
||||
return res
|
||||
|
||||
# Store the number of lines we've searched already, so we don't have to
|
||||
# re-grep them after 'expect' returns
|
||||
next_line_num = len(log)
|
||||
|
||||
try:
|
||||
self._logcat.expect(regexp, timeout=timeout)
|
||||
except pexpect.TIMEOUT:
|
||||
raise RuntimeError('Logcat monitor timeout ({}s)'.format(timeout))
|
||||
|
||||
return [line for line in self.get_log()[next_line_num:]
|
||||
if re.match(regexp, line)]
|
||||
|
100
devlib/utils/csvutil.py
Normal file
100
devlib/utils/csvutil.py
Normal file
@@ -0,0 +1,100 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
'''
|
||||
Due to the change in the nature of "binary mode" when opening files in
|
||||
Python 3, the way files need to be opened for ``csv.reader`` and ``csv.writer``
|
||||
is different from Python 2.
|
||||
|
||||
The functions in this module are intended to hide these differences allowing
|
||||
the rest of the code to create csv readers/writers without worrying about which
|
||||
Python version it is running under.
|
||||
|
||||
First up are ``csvwriter`` and ``csvreader`` context mangers that handle the
|
||||
opening and closing of the underlying file. These are intended to replace the
|
||||
most common usage pattern
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with open(filepath, 'wb') as wfh: # or open(filepath, 'w', newline='') in Python 3
|
||||
writer = csv.writer(wfh)
|
||||
writer.writerows(data)
|
||||
|
||||
|
||||
with
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with csvwriter(filepath) as writer:
|
||||
writer.writerows(data)
|
||||
|
||||
|
||||
``csvreader`` works in an analogous way. ``csvreader`` and ``writer`` can take
|
||||
additional arguments which will be passed directly to the
|
||||
``csv.reader``/``csv.writer`` calls.
|
||||
|
||||
In some cases, it is desirable not to use a context manager (e.g. if the
|
||||
reader/writer is intended to be returned from the function that creates it. For
|
||||
such cases, alternative functions, ``create_reader`` and ``create_writer``,
|
||||
exit. These return a two-tuple, with the created reader/writer as the first
|
||||
element, and the corresponding ``FileObject`` as the second. It is the
|
||||
responsibility of the calling code to ensure that the file is closed properly.
|
||||
|
||||
'''
|
||||
import csv
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
@contextmanager
|
||||
def csvwriter(filepath, *args, **kwargs):
|
||||
if sys.version_info[0] == 3:
|
||||
wfh = open(filepath, 'w', newline='')
|
||||
else:
|
||||
wfh = open(filepath, 'wb')
|
||||
|
||||
try:
|
||||
yield csv.writer(wfh, *args, **kwargs)
|
||||
finally:
|
||||
wfh.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def csvreader(filepath, *args, **kwargs):
|
||||
if sys.version_info[0] == 3:
|
||||
fh = open(filepath, 'r', newline='')
|
||||
else:
|
||||
fh = open(filepath, 'rb')
|
||||
|
||||
try:
|
||||
yield csv.reader(fh, *args, **kwargs)
|
||||
finally:
|
||||
fh.close()
|
||||
|
||||
|
||||
def create_writer(filepath, *args, **kwargs):
|
||||
if sys.version_info[0] == 3:
|
||||
wfh = open(filepath, 'w', newline='')
|
||||
else:
|
||||
wfh = open(filepath, 'wb')
|
||||
return csv.writer(wfh, *args, **kwargs), wfh
|
||||
|
||||
|
||||
def create_reader(filepath, *args, **kwargs):
|
||||
if sys.version_info[0] == 3:
|
||||
fh = open(filepath, 'r', newline='')
|
||||
else:
|
||||
fh = open(filepath, 'rb')
|
||||
return csv.reader(fh, *args, **kwargs), fh
|
52
devlib/utils/gem5.py
Normal file
52
devlib/utils/gem5.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# Copyright 2017-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
import logging
|
||||
|
||||
from devlib.utils.types import numeric
|
||||
|
||||
|
||||
GEM5STATS_FIELD_REGEX = re.compile("^(?P<key>[^- ]\S*) +(?P<value>[^#]+).+$")
|
||||
GEM5STATS_DUMP_HEAD = '---------- Begin Simulation Statistics ----------'
|
||||
GEM5STATS_DUMP_TAIL = '---------- End Simulation Statistics ----------'
|
||||
GEM5STATS_ROI_NUMBER = 8
|
||||
|
||||
logger = logging.getLogger('gem5')
|
||||
|
||||
|
||||
def iter_statistics_dump(stats_file):
|
||||
'''
|
||||
Yields statistics dumps as dicts. The parameter is assumed to be a stream
|
||||
reading from the statistics log file.
|
||||
'''
|
||||
cur_dump = {}
|
||||
while True:
|
||||
line = stats_file.readline()
|
||||
if not line:
|
||||
break
|
||||
if GEM5STATS_DUMP_TAIL in line:
|
||||
yield cur_dump
|
||||
cur_dump = {}
|
||||
else:
|
||||
res = GEM5STATS_FIELD_REGEX.match(line)
|
||||
if res:
|
||||
k = res.group("key")
|
||||
vtext = res.group("value")
|
||||
try:
|
||||
v = list(map(numeric, vtext.split()))
|
||||
cur_dump[k] = v[0] if len(v) == 1 else set(v)
|
||||
except ValueError:
|
||||
msg = 'Found non-numeric entry in gem5 stats ({}: {})'
|
||||
logger.warning(msg.format(k, vtext))
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
# Copyright 2013-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -19,29 +19,34 @@ Miscellaneous functions that don't fit anywhere else.
|
||||
|
||||
"""
|
||||
from __future__ import division
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import string
|
||||
import threading
|
||||
import signal
|
||||
import subprocess
|
||||
import pkgutil
|
||||
import logging
|
||||
import random
|
||||
import ctypes
|
||||
from operator import itemgetter
|
||||
from functools import partial, reduce
|
||||
from itertools import groupby
|
||||
from functools import partial
|
||||
from operator import itemgetter
|
||||
|
||||
import ctypes
|
||||
import logging
|
||||
import os
|
||||
import pkgutil
|
||||
import random
|
||||
import re
|
||||
import signal
|
||||
import string
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import wrapt
|
||||
import warnings
|
||||
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
# pylint: disable=redefined-builtin
|
||||
from devlib.exception import HostError, TimeoutError
|
||||
|
||||
|
||||
# ABI --> architectures list
|
||||
ABI_MAP = {
|
||||
'armeabi': ['armeabi', 'armv7', 'armv7l', 'armv7el', 'armv7lh'],
|
||||
'armeabi': ['armeabi', 'armv7', 'armv7l', 'armv7el', 'armv7lh', 'armeabi-v7a'],
|
||||
'arm64': ['arm64', 'armv8', 'arm64-v8a', 'aarch64'],
|
||||
}
|
||||
|
||||
@@ -79,9 +84,19 @@ CPU_PART_MAP = {
|
||||
0xd08: {None: 'A72'},
|
||||
0xd09: {None: 'A73'},
|
||||
},
|
||||
0x42: { # Broadcom
|
||||
0x516: {None: 'Vulcan'},
|
||||
},
|
||||
0x43: { # Cavium
|
||||
0x0a1: {None: 'Thunderx'},
|
||||
0x0a2: {None: 'Thunderx81xx'},
|
||||
},
|
||||
0x4e: { # Nvidia
|
||||
0x0: {None: 'Denver'},
|
||||
},
|
||||
0x50: { # AppliedMicro
|
||||
0x0: {None: 'xgene'},
|
||||
},
|
||||
0x51: { # Qualcomm
|
||||
0x02d: {None: 'Scorpion'},
|
||||
0x04d: {None: 'MSM8960'},
|
||||
@@ -91,6 +106,10 @@ CPU_PART_MAP = {
|
||||
},
|
||||
0x205: {0x1: 'KryoSilver'},
|
||||
0x211: {0x1: 'KryoGold'},
|
||||
0x800: {None: 'Falkor'},
|
||||
},
|
||||
0x53: { # Samsung LSI
|
||||
0x001: {0x1: 'MongooseM1'},
|
||||
},
|
||||
0x56: { # Marvell
|
||||
0x131: {
|
||||
@@ -121,9 +140,13 @@ def preexec_function():
|
||||
|
||||
|
||||
check_output_logger = logging.getLogger('check_output')
|
||||
# Popen is not thread safe. If two threads attempt to call it at the same time,
|
||||
# one may lock up. See https://bugs.python.org/issue12739.
|
||||
check_output_lock = threading.Lock()
|
||||
|
||||
|
||||
def check_output(command, timeout=None, ignore=None, inputtext=None, **kwargs):
|
||||
def check_output(command, timeout=None, ignore=None, inputtext=None,
|
||||
combined_output=False, **kwargs):
|
||||
"""This is a version of subprocess.check_output that adds a timeout parameter to kill
|
||||
the subprocess if it does not return within the specified time."""
|
||||
# pylint: disable=too-many-branches
|
||||
@@ -144,9 +167,14 @@ def check_output(command, timeout=None, ignore=None, inputtext=None, **kwargs):
|
||||
except OSError:
|
||||
pass # process may have already terminated.
|
||||
|
||||
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_function, **kwargs)
|
||||
with check_output_lock:
|
||||
stderr = subprocess.STDOUT if combined_output else subprocess.PIPE
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=stderr,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_function,
|
||||
**kwargs)
|
||||
|
||||
if timeout:
|
||||
timer = threading.Timer(timeout, callback, [process.pid, ])
|
||||
@@ -154,6 +182,11 @@ def check_output(command, timeout=None, ignore=None, inputtext=None, **kwargs):
|
||||
|
||||
try:
|
||||
output, error = process.communicate(inputtext)
|
||||
if sys.version_info[0] == 3:
|
||||
# Currently errors=replace is needed as 0x8c throws an error
|
||||
output = output.decode(sys.stdout.encoding or 'utf-8', "replace")
|
||||
if error:
|
||||
error = error.decode(sys.stderr.encoding or 'utf-8', "replace")
|
||||
finally:
|
||||
if timeout:
|
||||
timer.cancel()
|
||||
@@ -161,9 +194,9 @@ def check_output(command, timeout=None, ignore=None, inputtext=None, **kwargs):
|
||||
retcode = process.poll()
|
||||
if retcode:
|
||||
if retcode == -9: # killed, assume due to timeout callback
|
||||
raise TimeoutError(command, output='\n'.join([output, error]))
|
||||
raise TimeoutError(command, output='\n'.join([output or '', error or '']))
|
||||
elif ignore != 'all' and retcode not in ignore:
|
||||
raise subprocess.CalledProcessError(retcode, command, output='\n'.join([output, error]))
|
||||
raise subprocess.CalledProcessError(retcode, command, output='\n'.join([output or '', error or '']))
|
||||
return output, error
|
||||
|
||||
|
||||
@@ -235,8 +268,8 @@ def _merge_two_dicts(base, other, list_duplicates='all', match_types=False, # p
|
||||
dict_type=dict, should_normalize=True, should_merge_lists=True):
|
||||
"""Merge dicts normalizing their keys."""
|
||||
merged = dict_type()
|
||||
base_keys = base.keys()
|
||||
other_keys = other.keys()
|
||||
base_keys = list(base.keys())
|
||||
other_keys = list(other.keys())
|
||||
norm = normalize if should_normalize else lambda x, y: x
|
||||
|
||||
base_only = []
|
||||
@@ -368,7 +401,7 @@ def normalize(value, dict_type=dict):
|
||||
no surrounding whitespace, underscore-delimited strings."""
|
||||
if isinstance(value, dict):
|
||||
normalized = dict_type()
|
||||
for k, v in value.iteritems():
|
||||
for k, v in value.items():
|
||||
key = k.strip().lower().replace(' ', '_')
|
||||
normalized[key] = normalize(v, dict_type)
|
||||
return normalized
|
||||
@@ -384,27 +417,58 @@ def convert_new_lines(text):
|
||||
""" Convert new lines to a common format. """
|
||||
return text.replace('\r\n', '\n').replace('\r', '\n')
|
||||
|
||||
def sanitize_cmd_template(cmd):
|
||||
msg = (
|
||||
'''Quoted placeholder should not be used, as it will result in quoting the text twice. {} should be used instead of '{}' or "{}" in the template: '''
|
||||
)
|
||||
for unwanted in ('"{}"', "'{}'"):
|
||||
if unwanted in cmd:
|
||||
warnings.warn(msg + cmd, stacklevel=2)
|
||||
cmd = cmd.replace(unwanted, '{}')
|
||||
|
||||
return cmd
|
||||
|
||||
def escape_quotes(text):
|
||||
"""Escape quotes, and escaped quotes, in the specified text."""
|
||||
"""
|
||||
Escape quotes, and escaped quotes, in the specified text.
|
||||
|
||||
.. note:: :func:`pipes.quote` should be favored where possible.
|
||||
"""
|
||||
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\\\'').replace('\"', '\\\"')
|
||||
|
||||
|
||||
def escape_single_quotes(text):
|
||||
"""Escape single quotes, and escaped single quotes, in the specified text."""
|
||||
"""
|
||||
Escape single quotes, and escaped single quotes, in the specified text.
|
||||
|
||||
.. note:: :func:`pipes.quote` should be favored where possible.
|
||||
"""
|
||||
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\'\\\'\'')
|
||||
|
||||
|
||||
def escape_double_quotes(text):
|
||||
"""Escape double quotes, and escaped double quotes, in the specified text."""
|
||||
"""
|
||||
Escape double quotes, and escaped double quotes, in the specified text.
|
||||
|
||||
.. note:: :func:`pipes.quote` should be favored where possible.
|
||||
"""
|
||||
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\"', '\\\"')
|
||||
|
||||
|
||||
def escape_spaces(text):
|
||||
"""
|
||||
Escape spaces in the specified text
|
||||
|
||||
.. note:: :func:`pipes.quote` should be favored where possible.
|
||||
"""
|
||||
return text.replace(' ', '\ ')
|
||||
|
||||
|
||||
def getch(count=1):
|
||||
"""Read ``count`` characters from standard input."""
|
||||
if os.name == 'nt':
|
||||
import msvcrt # pylint: disable=F0401
|
||||
return ''.join([msvcrt.getch() for _ in xrange(count)])
|
||||
return ''.join([msvcrt.getch() for _ in range(count)])
|
||||
else: # assume Unix
|
||||
import tty # NOQA
|
||||
import termios # NOQA
|
||||
@@ -431,6 +495,19 @@ def as_relative(path):
|
||||
return path.lstrip(os.sep)
|
||||
|
||||
|
||||
def commonprefix(file_list, sep=os.sep):
|
||||
"""
|
||||
Find the lowest common base folder of a passed list of files.
|
||||
"""
|
||||
common_path = os.path.commonprefix(file_list)
|
||||
cp_split = common_path.split(sep)
|
||||
other_split = file_list[0].split(sep)
|
||||
last = len(cp_split) - 1
|
||||
if cp_split[last] != other_split[last]:
|
||||
cp_split = cp_split[:-1]
|
||||
return sep.join(cp_split)
|
||||
|
||||
|
||||
def get_cpu_mask(cores):
|
||||
"""Return a string with the hex for the cpu mask for the specified core numbers."""
|
||||
mask = 0
|
||||
@@ -460,8 +537,8 @@ def which(name):
|
||||
return None
|
||||
|
||||
|
||||
_bash_color_regex = re.compile('\x1b\\[[0-9;]+m')
|
||||
|
||||
# This matches most ANSI escape sequences, not just colors
|
||||
_bash_color_regex = re.compile(r'\x1b\[[0-9;]*[a-zA-Z]')
|
||||
|
||||
def strip_bash_colors(text):
|
||||
return _bash_color_regex.sub('', text)
|
||||
@@ -469,11 +546,17 @@ def strip_bash_colors(text):
|
||||
|
||||
def get_random_string(length):
|
||||
"""Returns a random ASCII string of the specified length)."""
|
||||
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(length))
|
||||
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
|
||||
|
||||
|
||||
class LoadSyntaxError(Exception):
|
||||
|
||||
@property
|
||||
def message(self):
|
||||
if self.args:
|
||||
return self.args[0]
|
||||
return str(self)
|
||||
|
||||
def __init__(self, message, filepath, lineno):
|
||||
super(LoadSyntaxError, self).__init__(message)
|
||||
self.filepath = filepath
|
||||
@@ -486,13 +569,19 @@ class LoadSyntaxError(Exception):
|
||||
|
||||
RAND_MOD_NAME_LEN = 30
|
||||
BAD_CHARS = string.punctuation + string.whitespace
|
||||
TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
|
||||
# pylint: disable=no-member
|
||||
if sys.version_info[0] == 3:
|
||||
TRANS_TABLE = str.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
|
||||
else:
|
||||
TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
|
||||
|
||||
|
||||
def to_identifier(text):
|
||||
"""Converts text to a valid Python identifier by replacing all
|
||||
whitespace and punctuation."""
|
||||
return re.sub('_+', '_', text.translate(TRANS_TABLE))
|
||||
whitespace and punctuation and adding a prefix if starting with a digit"""
|
||||
if text[:1].isdigit():
|
||||
text = '_' + text
|
||||
return re.sub('_+', '_', str(text).translate(TRANS_TABLE))
|
||||
|
||||
|
||||
def unique(alist):
|
||||
@@ -513,8 +602,8 @@ def ranges_to_list(ranges_string):
|
||||
values = []
|
||||
for rg in ranges_string.split(','):
|
||||
if '-' in rg:
|
||||
first, last = map(int, rg.split('-'))
|
||||
values.extend(xrange(first, last + 1))
|
||||
first, last = list(map(int, rg.split('-')))
|
||||
values.extend(range(first, last + 1))
|
||||
else:
|
||||
values.append(int(rg))
|
||||
return values
|
||||
@@ -523,8 +612,8 @@ def ranges_to_list(ranges_string):
|
||||
def list_to_ranges(values):
|
||||
"""Converts a list, e.g ``[0,2,3,4]``, into a sysfs-style ranges string, e.g. ``"0,2-4"``"""
|
||||
range_groups = []
|
||||
for _, g in groupby(enumerate(values), lambda (i, x): i - x):
|
||||
range_groups.append(map(itemgetter(1), g))
|
||||
for _, g in groupby(enumerate(values), lambda i_x: i_x[0] - i_x[1]):
|
||||
range_groups.append(list(map(itemgetter(1), g)))
|
||||
range_strings = []
|
||||
for group in range_groups:
|
||||
if len(group) == 1:
|
||||
@@ -547,7 +636,7 @@ def mask_to_list(mask):
|
||||
"""Converts the specfied integer bitmask into a list of
|
||||
indexes of bits that are set in the mask."""
|
||||
size = len(bin(mask)) - 2 # because of "0b"
|
||||
return [size - i - 1 for i in xrange(size)
|
||||
return [size - i - 1 for i in range(size)
|
||||
if mask & (1 << size - i - 1)]
|
||||
|
||||
|
||||
@@ -585,17 +674,24 @@ def __get_memo_id(obj):
|
||||
|
||||
|
||||
@wrapt.decorator
|
||||
def memoized(wrapped, instance, args, kwargs):
|
||||
"""A decorator for memoizing functions and methods."""
|
||||
def memoized(wrapped, instance, args, kwargs): # pylint: disable=unused-argument
|
||||
"""
|
||||
A decorator for memoizing functions and methods.
|
||||
|
||||
.. warning:: this may not detect changes to mutable types. As long as the
|
||||
memoized function was used with an object as an argument
|
||||
before, the cached result will be returned, even if the
|
||||
structure of the object (e.g. a list) has changed in the mean time.
|
||||
|
||||
"""
|
||||
func_id = repr(wrapped)
|
||||
|
||||
def memoize_wrapper(*args, **kwargs):
|
||||
id_string = func_id + ','.join([__get_memo_id(a) for a in args])
|
||||
id_string += ','.join('{}={}'.format(k, v)
|
||||
for k, v in kwargs.iteritems())
|
||||
id_string += ','.join('{}={}'.format(k, __get_memo_id(v))
|
||||
for k, v in kwargs.items())
|
||||
if id_string not in __memo_cache:
|
||||
__memo_cache[id_string] = wrapped(*args, **kwargs)
|
||||
return __memo_cache[id_string]
|
||||
|
||||
return memoize_wrapper(*args, **kwargs)
|
||||
|
||||
|
543
devlib/utils/parse_aep.py
Executable file
543
devlib/utils/parse_aep.py
Executable file
@@ -0,0 +1,543 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Copyright 2018 Linaro Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import getopt
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
|
||||
logger = logging.getLogger('aep-parser')
|
||||
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
class AepParser(object):
|
||||
prepared = False
|
||||
|
||||
@staticmethod
|
||||
def topology_from_data(array, topo):
|
||||
# Extract topology information for the data file
|
||||
# The header of a data file looks like this ('#' included):
|
||||
# configuration: <file path>
|
||||
# config_name: <file name>
|
||||
# trigger: 0.400000V (hyst 0.200000V) 0.000000W (hyst 0.200000W) 400us
|
||||
# date: Fri, 10 Jun 2016 11:25:07 +0200
|
||||
# host: <host name>
|
||||
#
|
||||
# CHN_0 Pretty_name_0 PARENT_0 Color0 Class0
|
||||
# CHN_1 Pretty_name_1 PARENT_1 Color1 Class1
|
||||
# CHN_2 Pretty_name_2 PARENT_2 Color2 Class2
|
||||
# CHN_3 Pretty_name_3 PARENT_3 Color3 Class3
|
||||
# ..
|
||||
# CHN_N Pretty_name_N PARENT_N ColorN ClassN
|
||||
#
|
||||
|
||||
info = {}
|
||||
|
||||
if len(array) == 6:
|
||||
info['name'] = array[1]
|
||||
info['parent'] = array[3]
|
||||
info['pretty'] = array[2]
|
||||
# add an entry for both name and pretty name in order to not parse
|
||||
# the whole dict when looking for a parent and the parent of parent
|
||||
topo[array[1]] = info
|
||||
topo[array[2]] = info
|
||||
return topo
|
||||
|
||||
@staticmethod
|
||||
def create_virtual(topo, label, hide, duplicate):
|
||||
# Create a list of virtual power domain that are the sum of others
|
||||
# A virtual domain is the parent of several channels but is not sampled by a
|
||||
# channel
|
||||
# This can be useful if a power domain is supplied by 2 power rails
|
||||
virtual = {}
|
||||
|
||||
# Create an entry for each virtual parent
|
||||
for supply in topo.keys():
|
||||
index = topo[supply]['index']
|
||||
# Don't care of hidden columns
|
||||
if hide[index]:
|
||||
continue
|
||||
|
||||
# Parent is in the topology
|
||||
parent = topo[supply]['parent']
|
||||
if parent in topo:
|
||||
continue
|
||||
|
||||
if parent not in virtual:
|
||||
virtual[parent] = {supply : index}
|
||||
|
||||
virtual[parent][supply] = index
|
||||
|
||||
# Remove parent with 1 child as they don't give more information than their
|
||||
# child
|
||||
for supply in list(virtual.keys()):
|
||||
if len(virtual[supply]) == 1:
|
||||
del virtual[supply]
|
||||
|
||||
for supply in list(virtual.keys()):
|
||||
# Add label, hide and duplicate columns for virtual domains
|
||||
hide.append(0)
|
||||
duplicate.append(1)
|
||||
label.append(supply)
|
||||
|
||||
return virtual
|
||||
|
||||
@staticmethod
|
||||
def get_label(array):
|
||||
# Get the label of each column
|
||||
# Remove unit '(X)' from the end of the label
|
||||
label = [""]*len(array)
|
||||
unit = [""]*len(array)
|
||||
|
||||
label[0] = array[0]
|
||||
unit[0] = "(S)"
|
||||
for i in range(1, len(array)):
|
||||
label[i] = array[i][:-3]
|
||||
unit[i] = array[i][-3:]
|
||||
|
||||
return label, unit
|
||||
|
||||
@staticmethod
|
||||
def filter_column(label, unit, topo):
|
||||
# Filter columns
|
||||
# We don't parse Volt and Amper columns: put in hide list
|
||||
# We don't add in Total a column that is the child of another one: put in duplicate list
|
||||
|
||||
# By default we hide all columns
|
||||
hide = [1] * len(label)
|
||||
# By default we assume that there is no child
|
||||
duplicate = [0] * len(label)
|
||||
|
||||
for i in range(len(label)): # pylint: disable=consider-using-enumerate
|
||||
# We only care about time and Watt
|
||||
if label[i] == 'time':
|
||||
hide[i] = 0
|
||||
continue
|
||||
|
||||
if '(W)' not in unit[i]:
|
||||
continue
|
||||
|
||||
hide[i] = 0
|
||||
|
||||
#label is pretty name
|
||||
pretty = label[i]
|
||||
|
||||
# We don't add a power domain that is already accounted by its parent
|
||||
if topo[pretty]['parent'] in topo:
|
||||
duplicate[i] = 1
|
||||
|
||||
# Set index, that will be used by virtual domain
|
||||
topo[topo[pretty]['name']]['index'] = i
|
||||
|
||||
# remove pretty element that is useless now
|
||||
del topo[pretty]
|
||||
|
||||
return hide, duplicate
|
||||
|
||||
@staticmethod
|
||||
def parse_text(array, hide):
|
||||
data = [0]*len(array)
|
||||
for i in range(len(array)): # pylint: disable=consider-using-enumerate
|
||||
if hide[i]:
|
||||
continue
|
||||
|
||||
try:
|
||||
data[i] = int(float(array[i])*1000000)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def add_virtual_data(data, virtual):
|
||||
# write virtual domain
|
||||
for parent in virtual.keys():
|
||||
power = 0
|
||||
for child in list(virtual[parent].values()):
|
||||
try:
|
||||
power += data[child]
|
||||
except IndexError:
|
||||
continue
|
||||
data.append(power)
|
||||
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def delta_nrj(array, delta, minimu, maximum, hide):
|
||||
# Compute the energy consumed in this time slice and add it
|
||||
# delta[0] is used to save the last time stamp
|
||||
|
||||
if delta[0] < 0:
|
||||
delta[0] = array[0]
|
||||
|
||||
time = array[0] - delta[0]
|
||||
if time <= 0:
|
||||
return delta
|
||||
|
||||
for i in range(len(array)): # pylint: disable=consider-using-enumerate
|
||||
if hide[i]:
|
||||
continue
|
||||
|
||||
try:
|
||||
data = array[i]
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if data < minimu[i]:
|
||||
minimu[i] = data
|
||||
if data > maximum[i]:
|
||||
maximum[i] = data
|
||||
delta[i] += time * data
|
||||
|
||||
# save last time stamp
|
||||
delta[0] = array[0]
|
||||
|
||||
return delta
|
||||
|
||||
def output_label(self, label, hide):
|
||||
self.fo.write(label[0] + "(uS)")
|
||||
for i in range(1, len(label)):
|
||||
if hide[i]:
|
||||
continue
|
||||
self.fo.write(" " + label[i] + "(uW)")
|
||||
|
||||
self.fo.write("\n")
|
||||
|
||||
def output_power(self, array, hide):
|
||||
#skip partial line. Most probably the last one
|
||||
if len(array) < len(hide):
|
||||
return
|
||||
|
||||
# write not hidden colums
|
||||
self.fo.write(str(array[0]))
|
||||
for i in range(1, len(array)):
|
||||
if hide[i]:
|
||||
continue
|
||||
|
||||
self.fo.write(" "+str(array[i]))
|
||||
|
||||
self.fo.write("\n")
|
||||
|
||||
# pylint: disable-redefined-outer-name,
|
||||
def prepare(self, input_file, outfile, summaryfile):
|
||||
try:
|
||||
self.fi = open(input_file, "r")
|
||||
except IOError:
|
||||
logger.warning('Unable to open input file {}'.format(input_file))
|
||||
logger.warning('Usage: parse_arp.py -i <inputfile> [-o <outputfile>]')
|
||||
sys.exit(2)
|
||||
|
||||
self.parse = True
|
||||
if outfile:
|
||||
try:
|
||||
self.fo = open(outfile, "w")
|
||||
except IOError:
|
||||
logger.warning('Unable to create {}'.format(outfile))
|
||||
self.parse = False
|
||||
else:
|
||||
self.parse = False
|
||||
|
||||
self.summary = True
|
||||
if summaryfile:
|
||||
try:
|
||||
self.fs = open(summaryfile, "w")
|
||||
except IOError:
|
||||
logger.warning('Unable to create {}'.format(summaryfile))
|
||||
self.fs = sys.stdout
|
||||
else:
|
||||
self.fs = sys.stdout
|
||||
|
||||
self.prepared = True
|
||||
|
||||
def unprepare(self):
|
||||
if not self.prepared:
|
||||
# nothing has been prepared
|
||||
return
|
||||
|
||||
self.fi.close()
|
||||
|
||||
if self.parse:
|
||||
self.fo.close()
|
||||
|
||||
self.prepared = False
|
||||
|
||||
# pylint: disable=too-many-branches,too-many-statements,redefined-outer-name,too-many-locals
|
||||
def parse_aep(self, start=0, length=-1):
|
||||
# Parse aep data and calculate the energy consumed
|
||||
begin = 0
|
||||
|
||||
label_line = 1
|
||||
|
||||
topo = {}
|
||||
|
||||
lines = self.fi.readlines()
|
||||
|
||||
for myline in lines:
|
||||
array = myline.split()
|
||||
|
||||
if "#" in myline:
|
||||
# update power topology
|
||||
topo = self.topology_from_data(array, topo)
|
||||
continue
|
||||
|
||||
if label_line:
|
||||
label_line = 0
|
||||
# 1st line not starting with # gives label of each column
|
||||
label, unit = self.get_label(array)
|
||||
# hide useless columns and detect channels that are children
|
||||
# of other channels
|
||||
hide, duplicate = self.filter_column(label, unit, topo)
|
||||
|
||||
# Create virtual power domains
|
||||
virtual = self.create_virtual(topo, label, hide, duplicate)
|
||||
if self.parse:
|
||||
self.output_label(label, hide)
|
||||
|
||||
logger.debug('Topology : {}'.format(topo))
|
||||
logger.debug('Virtual power domain : {}'.format(virtual))
|
||||
logger.debug('Duplicated power domain : : {}'.format(duplicate))
|
||||
logger.debug('Name of columns : {}'.format(label))
|
||||
logger.debug('Hidden columns : {}'.format(hide))
|
||||
logger.debug('Unit of columns : {}'.format(unit))
|
||||
|
||||
# Init arrays
|
||||
nrj = [0]*len(label)
|
||||
minimum = [100000000]*len(label)
|
||||
maximum = [0]*len(label)
|
||||
offset = [0]*len(label)
|
||||
|
||||
continue
|
||||
|
||||
# convert text to int and unit to micro-unit
|
||||
data = self.parse_text(array, hide)
|
||||
|
||||
# get 1st time stamp
|
||||
if begin <= 0:
|
||||
begin = data[0]
|
||||
|
||||
# skip data before start
|
||||
if (data[0]-begin) < start:
|
||||
continue
|
||||
|
||||
# stop after length
|
||||
if length >= 0 and (data[0]-begin) > (start + length):
|
||||
continue
|
||||
|
||||
# add virtual domains
|
||||
data = self.add_virtual_data(data, virtual)
|
||||
|
||||
# extract power figures
|
||||
self.delta_nrj(data, nrj, minimum, maximum, hide)
|
||||
|
||||
# write data into new file
|
||||
if self.parse:
|
||||
self.output_power(data, hide)
|
||||
|
||||
# if there is no data just return
|
||||
if label_line or len(nrj) == 1:
|
||||
raise ValueError('No data found in the data file. Please check the Arm Energy Probe')
|
||||
|
||||
# display energy consumption of each channel and total energy consumption
|
||||
total = 0
|
||||
results_table = {}
|
||||
for i in range(1, len(nrj)):
|
||||
if hide[i]:
|
||||
continue
|
||||
|
||||
nrj[i] -= offset[i] * nrj[0]
|
||||
|
||||
total_nrj = nrj[i]/1000000000000.0
|
||||
duration = (maximum[0]-minimum[0])/1000000.0
|
||||
channel_name = label[i]
|
||||
average_power = total_nrj/duration
|
||||
|
||||
total = nrj[i]/1000000000000.0
|
||||
duration = (maximum[0]-minimum[0])/1000000.0
|
||||
min_power = minimum[i]/1000000.0
|
||||
max_power = maximum[i]/1000000.0
|
||||
output = "Total nrj: %8.3f J for %s -- duration %8.3f sec -- min %8.3f W -- max %8.3f W\n"
|
||||
self.fs.write(output.format(total, label[i], duration, min_power, max_power))
|
||||
|
||||
# store each AEP channel info except Platform in the results table
|
||||
results_table[channel_name] = total_nrj, average_power
|
||||
|
||||
if minimum[i] < offset[i]:
|
||||
self.fs.write("!!! Min below offset\n")
|
||||
|
||||
if duplicate[i]:
|
||||
continue
|
||||
|
||||
total += nrj[i]
|
||||
|
||||
output = "Total nrj: %8.3f J for Platform -- duration %8.3f sec\n"
|
||||
self.fs.write(output.format(total/1000000000000.0, (maximum[0]-minimum[0])/1000000.0))
|
||||
|
||||
total_nrj = total/1000000000000.0
|
||||
duration = (maximum[0]-minimum[0])/1000000.0
|
||||
average_power = total_nrj/duration
|
||||
|
||||
# store AEP Platform channel info in the results table
|
||||
results_table["Platform"] = total_nrj, average_power
|
||||
|
||||
return results_table
|
||||
|
||||
# pylint: disable=too-many-branches,no-self-use,too-many-locals
|
||||
def topology_from_config(self, topofile):
|
||||
try:
|
||||
ft = open(topofile, "r")
|
||||
except IOError:
|
||||
logger.warning('Unable to open config file {}'.format(topofile))
|
||||
return
|
||||
lines = ft.readlines()
|
||||
|
||||
topo = {}
|
||||
virtual = {}
|
||||
name = ""
|
||||
offset = 0
|
||||
index = 0
|
||||
#parse config file
|
||||
for myline in lines:
|
||||
if myline.startswith("#"):
|
||||
# skip comment
|
||||
continue
|
||||
|
||||
if myline == "\n":
|
||||
# skip empty line
|
||||
continue
|
||||
|
||||
if name == "":
|
||||
# 1st valid line is the config's name
|
||||
name = myline
|
||||
continue
|
||||
|
||||
if not myline.startswith((' ', '\t')):
|
||||
# new device path
|
||||
offset = index
|
||||
continue
|
||||
|
||||
# Get parameters of channel configuration
|
||||
items = myline.split()
|
||||
|
||||
info = {}
|
||||
info['name'] = items[0]
|
||||
info['parent'] = items[9]
|
||||
info['pretty'] = items[8]
|
||||
info['index'] = int(items[2])+offset
|
||||
|
||||
# Add channel
|
||||
topo[items[0]] = info
|
||||
|
||||
# Increase index
|
||||
index += 1
|
||||
|
||||
|
||||
# Create an entry for each virtual parent
|
||||
# pylint: disable=consider-iterating-dictionary
|
||||
for supply in topo.keys():
|
||||
# Parent is in the topology
|
||||
parent = topo[supply]['parent']
|
||||
if parent in topo:
|
||||
continue
|
||||
|
||||
if parent not in virtual:
|
||||
virtual[parent] = {supply : topo[supply]['index']}
|
||||
|
||||
virtual[parent][supply] = topo[supply]['index']
|
||||
|
||||
|
||||
# Remove parent with 1 child as they don't give more information than their
|
||||
# child
|
||||
# pylint: disable=consider-iterating-dictionary
|
||||
for supply in list(virtual.keys()):
|
||||
if len(virtual[supply]) == 1:
|
||||
del virtual[supply]
|
||||
|
||||
topo_list = ['']*(1+len(topo)+len(virtual))
|
||||
topo_list[0] = 'time'
|
||||
# pylint: disable=consider-iterating-dictionary
|
||||
for chnl in topo.keys():
|
||||
topo_list[topo[chnl]['index']] = chnl
|
||||
for chnl in virtual.keys():
|
||||
index += 1
|
||||
topo_list[index] = chnl
|
||||
|
||||
ft.close()
|
||||
|
||||
return topo_list
|
||||
|
||||
def __del__(self):
|
||||
self.unprepare()
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def handleSigTERM(signum, frame):
|
||||
sys.exit(2)
|
||||
|
||||
signal.signal(signal.SIGTERM, handleSigTERM)
|
||||
signal.signal(signal.SIGINT, handleSigTERM)
|
||||
|
||||
logger.setLevel(logging.WARN)
|
||||
ch = logging.StreamHandler()
|
||||
ch.setLevel(logging.DEBUG)
|
||||
logger.addHandler(ch)
|
||||
|
||||
in_file = ""
|
||||
out_file = ""
|
||||
figurefile = ""
|
||||
start = 0
|
||||
length = -1
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "i:vo:s:l:t:")
|
||||
except getopt.GetoptError as err:
|
||||
print(str(err)) # will print something like "option -a not recognized"
|
||||
sys.exit(2)
|
||||
|
||||
for o, a in opts:
|
||||
if o == "-i":
|
||||
in_file = a
|
||||
if o == "-v":
|
||||
logger.setLevel(logging.DEBUG)
|
||||
if o == "-o":
|
||||
parse = True
|
||||
out_file = a
|
||||
if o == "-s":
|
||||
start = int(float(a)*1000000)
|
||||
if o == "-l":
|
||||
length = int(float(a)*1000000)
|
||||
if o == "-t":
|
||||
topfile = a
|
||||
parser = AepParser()
|
||||
print(parser.topology_from_config(topfile))
|
||||
exit(0)
|
||||
|
||||
parser = AepParser()
|
||||
parser.prepare(in_file, out_file, figurefile)
|
||||
parser.parse_aep(start, length)
|
276
devlib/utils/rendering.py
Normal file
276
devlib/utils/rendering.py
Normal file
@@ -0,0 +1,276 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
from collections import namedtuple
|
||||
from pipes import quote
|
||||
|
||||
# pylint: disable=redefined-builtin
|
||||
from devlib.exception import WorkerThreadError, TargetNotRespondingError, TimeoutError
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
|
||||
|
||||
logger = logging.getLogger('rendering')
|
||||
|
||||
SurfaceFlingerFrame = namedtuple('SurfaceFlingerFrame',
|
||||
'desired_present_time actual_present_time frame_ready_time')
|
||||
|
||||
VSYNC_INTERVAL = 16666667
|
||||
|
||||
|
||||
class FrameCollector(threading.Thread):
|
||||
|
||||
def __init__(self, target, period):
|
||||
super(FrameCollector, self).__init__()
|
||||
self.target = target
|
||||
self.period = period
|
||||
self.stop_signal = threading.Event()
|
||||
self.frames = []
|
||||
|
||||
self.temp_file = None
|
||||
self.refresh_period = None
|
||||
self.drop_threshold = None
|
||||
self.unresponsive_count = 0
|
||||
self.last_ready_time = 0
|
||||
self.exc = None
|
||||
self.header = None
|
||||
|
||||
def run(self):
|
||||
logger.debug('Frame data collection started.')
|
||||
try:
|
||||
self.stop_signal.clear()
|
||||
fd, self.temp_file = tempfile.mkstemp()
|
||||
logger.debug('temp file: {}'.format(self.temp_file))
|
||||
wfh = os.fdopen(fd, 'wb')
|
||||
try:
|
||||
while not self.stop_signal.is_set():
|
||||
self.collect_frames(wfh)
|
||||
time.sleep(self.period)
|
||||
finally:
|
||||
wfh.close()
|
||||
except (TargetNotRespondingError, TimeoutError): # pylint: disable=W0703
|
||||
raise
|
||||
except Exception as e: # pylint: disable=W0703
|
||||
logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e))
|
||||
self.exc = WorkerThreadError(self.name, sys.exc_info())
|
||||
logger.debug('Frame data collection stopped.')
|
||||
|
||||
def stop(self):
|
||||
self.stop_signal.set()
|
||||
self.join()
|
||||
if self.unresponsive_count:
|
||||
message = 'FrameCollector was unrepsonsive {} times.'.format(self.unresponsive_count)
|
||||
if self.unresponsive_count > 10:
|
||||
logger.warning(message)
|
||||
else:
|
||||
logger.debug(message)
|
||||
if self.exc:
|
||||
raise self.exc # pylint: disable=E0702
|
||||
|
||||
def process_frames(self, outfile=None):
|
||||
if not self.temp_file:
|
||||
raise RuntimeError('Attempting to process frames before running the collector')
|
||||
with open(self.temp_file) as fh:
|
||||
self._process_raw_file(fh)
|
||||
if outfile:
|
||||
shutil.copy(self.temp_file, outfile)
|
||||
os.unlink(self.temp_file)
|
||||
self.temp_file = None
|
||||
|
||||
def write_frames(self, outfile, columns=None):
|
||||
if columns is None:
|
||||
header = self.header
|
||||
frames = self.frames
|
||||
else:
|
||||
indexes = []
|
||||
for c in columns:
|
||||
if c not in self.header:
|
||||
msg = 'Invalid column "{}"; must be in {}'
|
||||
raise ValueError(msg.format(c, self.header))
|
||||
indexes.append(self.header.index(c))
|
||||
frames = [[f[i] for i in indexes] for f in self.frames]
|
||||
header = columns
|
||||
with csvwriter(outfile) as writer:
|
||||
if header:
|
||||
writer.writerow(header)
|
||||
writer.writerows(frames)
|
||||
|
||||
def collect_frames(self, wfh):
|
||||
raise NotImplementedError()
|
||||
|
||||
def clear(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _process_raw_file(self, fh):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class SurfaceFlingerFrameCollector(FrameCollector):
|
||||
|
||||
def __init__(self, target, period, view, header=None):
|
||||
super(SurfaceFlingerFrameCollector, self).__init__(target, period)
|
||||
self.view = view
|
||||
self.header = header or SurfaceFlingerFrame._fields
|
||||
|
||||
def collect_frames(self, wfh):
|
||||
for activity in self.list():
|
||||
if activity == self.view:
|
||||
wfh.write(self.get_latencies(activity).encode('utf-8'))
|
||||
|
||||
def clear(self):
|
||||
self.target.execute('dumpsys SurfaceFlinger --latency-clear ')
|
||||
|
||||
def get_latencies(self, activity):
|
||||
cmd = 'dumpsys SurfaceFlinger --latency {}'
|
||||
return self.target.execute(cmd.format(quote(activity)))
|
||||
|
||||
def list(self):
|
||||
text = self.target.execute('dumpsys SurfaceFlinger --list')
|
||||
return text.replace('\r\n', '\n').replace('\r', '\n').split('\n')
|
||||
|
||||
def _process_raw_file(self, fh):
|
||||
text = fh.read().replace('\r\n', '\n').replace('\r', '\n')
|
||||
for line in text.split('\n'):
|
||||
line = line.strip()
|
||||
if line:
|
||||
self._process_trace_line(line)
|
||||
|
||||
def _process_trace_line(self, line):
|
||||
parts = line.split()
|
||||
if len(parts) == 3:
|
||||
frame = SurfaceFlingerFrame(*list(map(int, parts)))
|
||||
if not frame.frame_ready_time:
|
||||
return # "null" frame
|
||||
if frame.frame_ready_time <= self.last_ready_time:
|
||||
return # duplicate frame
|
||||
if (frame.frame_ready_time - frame.desired_present_time) > self.drop_threshold:
|
||||
logger.debug('Dropping bogus frame {}.'.format(line))
|
||||
return # bogus data
|
||||
self.last_ready_time = frame.frame_ready_time
|
||||
self.frames.append(frame)
|
||||
elif len(parts) == 1:
|
||||
self.refresh_period = int(parts[0])
|
||||
self.drop_threshold = self.refresh_period * 1000
|
||||
elif 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
|
||||
self.unresponsive_count += 1
|
||||
else:
|
||||
logger.warning('Unexpected SurfaceFlinger dump output: {}'.format(line))
|
||||
|
||||
|
||||
def read_gfxinfo_columns(target):
|
||||
output = target.execute('dumpsys gfxinfo --list framestats')
|
||||
lines = iter(output.split('\n'))
|
||||
for line in lines:
|
||||
if line.startswith('---PROFILEDATA---'):
|
||||
break
|
||||
columns_line = next(lines)
|
||||
return columns_line.split(',')[:-1] # has a trailing ','
|
||||
|
||||
|
||||
class GfxinfoFrameCollector(FrameCollector):
|
||||
|
||||
def __init__(self, target, period, package, header=None):
|
||||
super(GfxinfoFrameCollector, self).__init__(target, period)
|
||||
self.package = package
|
||||
self.header = None
|
||||
self._init_header(header)
|
||||
|
||||
def collect_frames(self, wfh):
|
||||
cmd = 'dumpsys gfxinfo {} framestats'
|
||||
result = self.target.execute(cmd.format(self.package))
|
||||
if sys.version_info[0] == 3:
|
||||
wfh.write(result.encode('utf-8'))
|
||||
else:
|
||||
wfh.write(result)
|
||||
|
||||
def clear(self):
|
||||
pass
|
||||
|
||||
def _init_header(self, header):
|
||||
if header is not None:
|
||||
self.header = header
|
||||
else:
|
||||
self.header = read_gfxinfo_columns(self.target)
|
||||
|
||||
def _process_raw_file(self, fh):
|
||||
found = False
|
||||
try:
|
||||
last_vsync = 0
|
||||
while True:
|
||||
for line in fh:
|
||||
if line.startswith('---PROFILEDATA---'):
|
||||
found = True
|
||||
break
|
||||
|
||||
next(fh) # headers
|
||||
for line in fh:
|
||||
if line.startswith('---PROFILEDATA---'):
|
||||
break
|
||||
entries = list(map(int, line.strip().split(',')[:-1])) # has a trailing ','
|
||||
if entries[1] <= last_vsync:
|
||||
continue # repeat frame
|
||||
last_vsync = entries[1]
|
||||
self.frames.append(entries)
|
||||
except StopIteration:
|
||||
pass
|
||||
if not found:
|
||||
logger.warning('Could not find frames data in gfxinfo output')
|
||||
return
|
||||
|
||||
|
||||
def _file_reverse_iter(fh, buf_size=1024):
|
||||
fh.seek(0, os.SEEK_END)
|
||||
offset = 0
|
||||
file_size = remaining_size = fh.tell()
|
||||
while remaining_size > 0:
|
||||
offset = min(file_size, offset + buf_size)
|
||||
fh.seek(file_size - offset)
|
||||
buf = fh.read(min(remaining_size, buf_size))
|
||||
remaining_size -= buf_size
|
||||
yield buf
|
||||
|
||||
|
||||
def gfxinfo_get_last_dump(filepath):
|
||||
"""
|
||||
Return the last gfxinfo dump from the frame collector's raw output.
|
||||
|
||||
"""
|
||||
record = ''
|
||||
with open(filepath, 'r') as fh:
|
||||
fh_iter = _file_reverse_iter(fh)
|
||||
try:
|
||||
while True:
|
||||
buf = next(fh_iter)
|
||||
ix = buf.find('** Graphics')
|
||||
if ix >= 0:
|
||||
return buf[ix:] + record
|
||||
|
||||
ix = buf.find(' **\n')
|
||||
if ix >= 0:
|
||||
buf = next(fh_iter) + buf
|
||||
ix = buf.find('** Graphics')
|
||||
if ix < 0:
|
||||
msg = '"{}" appears to be corrupted'
|
||||
raise RuntimeError(msg.format(filepath))
|
||||
return buf[ix:] + record
|
||||
record = buf + record
|
||||
except StopIteration:
|
||||
pass
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
# Copyright 2013-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,6 +20,7 @@ from logging import Logger
|
||||
|
||||
import serial
|
||||
|
||||
# pylint: disable=import-error,wrong-import-position,ungrouped-imports,wrong-import-order
|
||||
import pexpect
|
||||
from distutils.version import StrictVersion as V
|
||||
if V(pexpect.__version__) < V('4.0.0'):
|
||||
@@ -32,6 +33,14 @@ from pexpect import EOF, TIMEOUT # NOQA pylint: disable=W0611
|
||||
from devlib.exception import HostError
|
||||
|
||||
|
||||
class SerialLogger(Logger):
|
||||
|
||||
write = Logger.debug
|
||||
|
||||
def flush(self):
|
||||
pass
|
||||
|
||||
|
||||
def pulse_dtr(conn, state=True, duration=0.1):
|
||||
"""Set the DTR line of the specified serial connection to the specified state
|
||||
for the specified duration (note: the initial state of the line is *not* checked."""
|
||||
@@ -40,19 +49,20 @@ def pulse_dtr(conn, state=True, duration=0.1):
|
||||
conn.setDTR(not state)
|
||||
|
||||
|
||||
def get_connection(timeout, init_dtr=None, logcls=Logger,
|
||||
*args, **kwargs):
|
||||
# pylint: disable=keyword-arg-before-vararg
|
||||
def get_connection(timeout, init_dtr=None, logcls=SerialLogger,
|
||||
logfile=None, *args, **kwargs):
|
||||
if init_dtr is not None:
|
||||
kwargs['dsrdtr'] = True
|
||||
try:
|
||||
conn = serial.Serial(*args, **kwargs)
|
||||
except serial.SerialException as e:
|
||||
raise HostError(e.message)
|
||||
raise HostError(str(e))
|
||||
if init_dtr is not None:
|
||||
conn.setDTR(init_dtr)
|
||||
conn.nonblocking()
|
||||
conn.flushOutput()
|
||||
target = fdpexpect.fdspawn(conn.fileno(), timeout=timeout)
|
||||
target = fdpexpect.fdspawn(conn.fileno(), timeout=timeout, logfile=logfile)
|
||||
target.logfile_read = logcls('read')
|
||||
target.logfile_send = logcls('send')
|
||||
|
||||
@@ -81,9 +91,10 @@ def write_characters(conn, line, delay=0.05):
|
||||
conn.sendline('')
|
||||
|
||||
|
||||
# pylint: disable=keyword-arg-before-vararg
|
||||
@contextmanager
|
||||
def open_serial_connection(timeout, get_conn=False, init_dtr=None,
|
||||
logcls=Logger, *args, **kwargs):
|
||||
logcls=SerialLogger, *args, **kwargs):
|
||||
"""
|
||||
Opens a serial connection to a device.
|
||||
|
||||
@@ -103,11 +114,13 @@ def open_serial_connection(timeout, get_conn=False, init_dtr=None,
|
||||
"""
|
||||
target, conn = get_connection(timeout, init_dtr=init_dtr,
|
||||
logcls=logcls, *args, **kwargs)
|
||||
|
||||
if get_conn:
|
||||
yield target, conn
|
||||
target_and_conn = (target, conn)
|
||||
else:
|
||||
yield target
|
||||
|
||||
target.close() # Closes the file descriptor used by the conn.
|
||||
del conn
|
||||
target_and_conn = target
|
||||
|
||||
try:
|
||||
yield target_and_conn
|
||||
finally:
|
||||
target.close() # Closes the file descriptor used by the conn.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -23,8 +23,13 @@ import threading
|
||||
import tempfile
|
||||
import shutil
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import atexit
|
||||
from pipes import quote
|
||||
from future.utils import raise_from
|
||||
|
||||
# pylint: disable=import-error,wrong-import-position,ungrouped-imports,wrong-import-order
|
||||
import pexpect
|
||||
from distutils.version import StrictVersion as V
|
||||
if V(pexpect.__version__) < V('4.0.0'):
|
||||
@@ -33,8 +38,11 @@ else:
|
||||
from pexpect import pxssh
|
||||
from pexpect import EOF, TIMEOUT, spawn
|
||||
|
||||
from devlib.exception import HostError, TargetError, TimeoutError
|
||||
from devlib.utils.misc import which, strip_bash_colors, escape_single_quotes, check_output
|
||||
# pylint: disable=redefined-builtin,wrong-import-position
|
||||
from devlib.exception import (HostError, TargetStableError, TargetNotRespondingError,
|
||||
TimeoutError, TargetTransientError)
|
||||
from devlib.utils.misc import (which, strip_bash_colors, check_output,
|
||||
sanitize_cmd_template, memoized)
|
||||
from devlib.utils.types import boolean
|
||||
|
||||
|
||||
@@ -55,7 +63,7 @@ def ssh_get_shell(host, username, password=None, keyfile=None, port=None, timeou
|
||||
raise ValueError('keyfile may not be used with a telnet connection.')
|
||||
conn = TelnetPxssh(original_prompt=original_prompt)
|
||||
else: # ssh
|
||||
conn = pxssh.pxssh()
|
||||
conn = pxssh.pxssh(echo=False)
|
||||
|
||||
try:
|
||||
if keyfile:
|
||||
@@ -67,10 +75,10 @@ def ssh_get_shell(host, username, password=None, keyfile=None, port=None, timeou
|
||||
timeout -= time.time() - start_time
|
||||
if timeout <= 0:
|
||||
message = 'Could not connect to {}; is the host name correct?'
|
||||
raise TargetError(message.format(host))
|
||||
raise TargetTransientError(message.format(host))
|
||||
time.sleep(5)
|
||||
|
||||
conn.setwinsize(500,200)
|
||||
conn.setwinsize(500, 200)
|
||||
conn.sendline('')
|
||||
conn.prompt()
|
||||
conn.setecho(False)
|
||||
@@ -144,12 +152,13 @@ class SshConnection(object):
|
||||
|
||||
default_password_prompt = '[sudo] password'
|
||||
max_cancel_attempts = 5
|
||||
default_timeout=10
|
||||
default_timeout = 10
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.host
|
||||
|
||||
# pylint: disable=unused-argument,super-init-not-called
|
||||
def __init__(self,
|
||||
host,
|
||||
username,
|
||||
@@ -160,7 +169,8 @@ class SshConnection(object):
|
||||
telnet=False,
|
||||
password_prompt=None,
|
||||
original_prompt=None,
|
||||
platform=None
|
||||
platform=None,
|
||||
sudo_cmd="sudo -- sh -c {}"
|
||||
):
|
||||
self.host = host
|
||||
self.username = username
|
||||
@@ -169,9 +179,11 @@ class SshConnection(object):
|
||||
self.port = port
|
||||
self.lock = threading.Lock()
|
||||
self.password_prompt = password_prompt if password_prompt is not None else self.default_password_prompt
|
||||
self.sudo_cmd = sanitize_cmd_template(sudo_cmd)
|
||||
logger.debug('Logging in {}@{}'.format(username, host))
|
||||
timeout = timeout if timeout is not None else self.default_timeout
|
||||
self.conn = ssh_get_shell(host, username, password, self.keyfile, port, timeout, False, None)
|
||||
atexit.register(self.close)
|
||||
|
||||
def push(self, source, dest, timeout=30):
|
||||
dest = '{}@{}:{}'.format(self.username, self.host, dest)
|
||||
@@ -182,7 +194,7 @@ class SshConnection(object):
|
||||
return self._scp(source, dest, timeout)
|
||||
|
||||
def execute(self, command, timeout=None, check_exit_code=True,
|
||||
as_root=False, strip_colors=True): #pylint: disable=unused-argument
|
||||
as_root=False, strip_colors=True, will_succeed=False): #pylint: disable=unused-argument
|
||||
if command == '':
|
||||
# Empty command is valid but the __devlib_ec stuff below will
|
||||
# produce a syntax error with bash. Treat as a special case.
|
||||
@@ -190,46 +202,59 @@ class SshConnection(object):
|
||||
try:
|
||||
with self.lock:
|
||||
_command = '({}); __devlib_ec=$?; echo; echo $__devlib_ec'.format(command)
|
||||
raw_output = self._execute_and_wait_for_prompt(
|
||||
_command, timeout, as_root, strip_colors)
|
||||
output, exit_code_text, _ = raw_output.rsplit('\r\n', 2)
|
||||
full_output = self._execute_and_wait_for_prompt(_command, timeout, as_root, strip_colors)
|
||||
split_output = full_output.rsplit('\r\n', 2)
|
||||
try:
|
||||
output, exit_code_text, _ = split_output
|
||||
except ValueError as e:
|
||||
raise TargetStableError(
|
||||
"cannot split reply (target misconfiguration?):\n'{}'".format(full_output))
|
||||
if check_exit_code:
|
||||
try:
|
||||
exit_code = int(exit_code_text)
|
||||
if exit_code:
|
||||
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'
|
||||
raise TargetError(message.format(exit_code, command, output))
|
||||
raise TargetStableError(message.format(exit_code, command, output))
|
||||
except (ValueError, IndexError):
|
||||
logger.warning(
|
||||
'Could not get exit code for "{}",\ngot: "{}"'\
|
||||
.format(command, exit_code_text))
|
||||
return output
|
||||
except EOF:
|
||||
raise TargetError('Connection lost.')
|
||||
raise TargetNotRespondingError('Connection lost.')
|
||||
except TargetStableError as e:
|
||||
if will_succeed:
|
||||
raise TargetTransientError(e)
|
||||
else:
|
||||
raise
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
try:
|
||||
port_string = '-p {}'.format(self.port) if self.port else ''
|
||||
keyfile_string = '-i {}'.format(self.keyfile) if self.keyfile else ''
|
||||
if as_root:
|
||||
command = "sudo -- sh -c '{}'".format(command)
|
||||
command = self.sudo_cmd.format(command)
|
||||
command = '{} {} {} {}@{} {}'.format(ssh, keyfile_string, port_string, self.username, self.host, command)
|
||||
logger.debug(command)
|
||||
if self.password:
|
||||
command = _give_password(self.password, command)
|
||||
command, _ = _give_password(self.password, command)
|
||||
return subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True)
|
||||
except EOF:
|
||||
raise TargetError('Connection lost.')
|
||||
raise TargetNotRespondingError('Connection lost.')
|
||||
|
||||
def close(self):
|
||||
logger.debug('Logging out {}@{}'.format(self.username, self.host))
|
||||
self.conn.logout()
|
||||
try:
|
||||
self.conn.logout()
|
||||
except:
|
||||
logger.debug('Connection lost.')
|
||||
self.conn.close(force=True)
|
||||
|
||||
def cancel_running_command(self):
|
||||
# simulate impatiently hitting ^C until command prompt appears
|
||||
logger.debug('Sending ^C')
|
||||
for _ in xrange(self.max_cancel_attempts):
|
||||
self.conn.sendline(chr(3))
|
||||
for _ in range(self.max_cancel_attempts):
|
||||
self._sendline(chr(3))
|
||||
if self.conn.prompt(0.1):
|
||||
return True
|
||||
return False
|
||||
@@ -240,25 +265,24 @@ class SshConnection(object):
|
||||
# As we're already root, there is no need to use sudo.
|
||||
as_root = False
|
||||
if as_root:
|
||||
command = "sudo -- sh -c '{}'".format(escape_single_quotes(command))
|
||||
command = self.sudo_cmd.format(quote(command))
|
||||
if log:
|
||||
logger.debug(command)
|
||||
self.conn.sendline(command)
|
||||
self._sendline(command)
|
||||
if self.password:
|
||||
index = self.conn.expect_exact([self.password_prompt, TIMEOUT], timeout=0.5)
|
||||
if index == 0:
|
||||
self.conn.sendline(self.password)
|
||||
self._sendline(self.password)
|
||||
else: # not as_root
|
||||
if log:
|
||||
logger.debug(command)
|
||||
self.conn.sendline(command)
|
||||
self._sendline(command)
|
||||
timed_out = self._wait_for_prompt(timeout)
|
||||
# the regex removes line breaks potential introduced when writing
|
||||
# command to shell.
|
||||
output = process_backspaces(self.conn.before)
|
||||
output = re.sub(r'\r([^\n])', r'\1', output)
|
||||
if '\r\n' in output: # strip the echoed command
|
||||
output = output.split('\r\n', 1)[1]
|
||||
if sys.version_info[0] == 3:
|
||||
output = process_backspaces(self.conn.before.decode(sys.stdout.encoding or 'utf-8', 'replace'))
|
||||
else:
|
||||
output = process_backspaces(self.conn.before)
|
||||
|
||||
if timed_out:
|
||||
self.cancel_running_command()
|
||||
raise TimeoutError(command, output)
|
||||
@@ -279,23 +303,40 @@ class SshConnection(object):
|
||||
# fails to connect to a device if port is explicitly specified using -P
|
||||
# option, even if it is the default port, 22. To minimize this problem,
|
||||
# only specify -P for scp if the port is *not* the default.
|
||||
port_string = '-P {}'.format(self.port) if (self.port and self.port != 22) else ''
|
||||
keyfile_string = '-i {}'.format(self.keyfile) if self.keyfile else ''
|
||||
command = '{} -r {} {} {} {}'.format(scp, keyfile_string, port_string, source, dest)
|
||||
pass_string = ''
|
||||
port_string = '-P {}'.format(quote(str(self.port))) if (self.port and self.port != 22) else ''
|
||||
keyfile_string = '-i {}'.format(quote(self.keyfile)) if self.keyfile else ''
|
||||
command = '{} -r {} {} {} {}'.format(scp, keyfile_string, port_string, quote(source), quote(dest))
|
||||
command_redacted = command
|
||||
logger.debug(command)
|
||||
if self.password:
|
||||
command = _give_password(self.password, command)
|
||||
command, command_redacted = _give_password(self.password, command)
|
||||
try:
|
||||
check_output(command, timeout=timeout, shell=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise subprocess.CalledProcessError(e.returncode, e.cmd.replace(pass_string, ''), e.output)
|
||||
raise_from(HostError("Failed to copy file with '{}'. Output:\n{}".format(
|
||||
command_redacted, e.output)), None)
|
||||
except TimeoutError as e:
|
||||
raise TimeoutError(e.command.replace(pass_string, ''), e.output)
|
||||
raise TimeoutError(command_redacted, e.output)
|
||||
|
||||
def _sendline(self, command):
|
||||
# Workaround for https://github.com/pexpect/pexpect/issues/552
|
||||
if len(command) == self._get_window_size()[1] - self._get_prompt_length():
|
||||
command += ' '
|
||||
self.conn.sendline(command)
|
||||
|
||||
@memoized
|
||||
def _get_prompt_length(self):
|
||||
self.conn.sendline()
|
||||
self.conn.prompt()
|
||||
return len(self.conn.after)
|
||||
|
||||
@memoized
|
||||
def _get_window_size(self):
|
||||
return self.conn.getwinsize()
|
||||
|
||||
class TelnetConnection(SshConnection):
|
||||
|
||||
# pylint: disable=super-init-not-called
|
||||
def __init__(self,
|
||||
host,
|
||||
username,
|
||||
@@ -319,6 +360,7 @@ class TelnetConnection(SshConnection):
|
||||
|
||||
class Gem5Connection(TelnetConnection):
|
||||
|
||||
# pylint: disable=super-init-not-called
|
||||
def __init__(self,
|
||||
platform,
|
||||
host=None,
|
||||
@@ -328,14 +370,15 @@ class Gem5Connection(TelnetConnection):
|
||||
timeout=None,
|
||||
password_prompt=None,
|
||||
original_prompt=None,
|
||||
strip_echoed_commands=False,
|
||||
):
|
||||
if host is not None:
|
||||
host_system = socket.gethostname()
|
||||
if host_system != host:
|
||||
raise TargetError("Gem5Connection can only connect to gem5 "
|
||||
"simulations on your current host, which "
|
||||
"differs from the one given {}!"
|
||||
.format(host_system, host))
|
||||
raise TargetStableError("Gem5Connection can only connect to gem5 "
|
||||
"simulations on your current host {}, which "
|
||||
"differs from the one given {}!"
|
||||
.format(host_system, host))
|
||||
if username is not None and username != 'root':
|
||||
raise ValueError('User should be root in gem5!')
|
||||
if password is not None and password != '':
|
||||
@@ -344,6 +387,8 @@ class Gem5Connection(TelnetConnection):
|
||||
self.is_rooted = True
|
||||
self.password = None
|
||||
self.port = None
|
||||
# Flag to indicate whether commands are echoed by the simulated system
|
||||
self.strip_echoed_commands = strip_echoed_commands
|
||||
# Long timeouts to account for gem5 being slow
|
||||
# Can be overriden if the given timeout is longer
|
||||
self.default_timeout = 3600
|
||||
@@ -418,13 +463,12 @@ class Gem5Connection(TelnetConnection):
|
||||
if os.path.basename(dest) != filename:
|
||||
dest = os.path.join(dest, filename)
|
||||
# Back to the gem5 world
|
||||
self._gem5_shell("ls -al {}{}".format(self.gem5_input_dir, filename))
|
||||
self._gem5_shell("cat '{}''{}' > '{}'".format(self.gem5_input_dir,
|
||||
filename,
|
||||
dest))
|
||||
filename = quote(self.gem5_input_dir + filename)
|
||||
self._gem5_shell("ls -al {}".format(filename))
|
||||
self._gem5_shell("cat {} > {}".format(filename, quote(dest)))
|
||||
self._gem5_shell("sync")
|
||||
self._gem5_shell("ls -al {}".format(dest))
|
||||
self._gem5_shell("ls -al {}".format(self.gem5_input_dir))
|
||||
self._gem5_shell("ls -al {}".format(quote(dest)))
|
||||
self._gem5_shell("ls -al {}".format(quote(self.gem5_input_dir)))
|
||||
logger.debug("Push complete.")
|
||||
|
||||
def pull(self, source, dest, timeout=0): #pylint: disable=unused-argument
|
||||
@@ -439,36 +483,59 @@ class Gem5Connection(TelnetConnection):
|
||||
# First check if the connection is set up to interact with gem5
|
||||
self._check_ready()
|
||||
|
||||
filename = os.path.basename(source)
|
||||
result = self._gem5_shell("ls {}".format(source))
|
||||
files = strip_bash_colors(result).split()
|
||||
|
||||
logger.debug("pull_file {} {}".format(source, filename))
|
||||
# We don't check the exit code here because it is non-zero if the source
|
||||
# and destination are the same. The ls below will cause an error if the
|
||||
# file was not where we expected it to be.
|
||||
if os.path.dirname(source) != os.getcwd():
|
||||
self._gem5_shell("cat '{}' > '{}'".format(source, filename))
|
||||
self._gem5_shell("sync")
|
||||
self._gem5_shell("ls -la {}".format(filename))
|
||||
logger.debug('Finished the copy in the simulator')
|
||||
self._gem5_util("writefile {}".format(filename))
|
||||
for filename in files:
|
||||
dest_file = os.path.basename(filename)
|
||||
logger.debug("pull_file {} {}".format(filename, dest_file))
|
||||
# writefile needs the file to be copied to be in the current
|
||||
# working directory so if needed, copy to the working directory
|
||||
# We don't check the exit code here because it is non-zero if the
|
||||
# source and destination are the same. The ls below will cause an
|
||||
# error if the file was not where we expected it to be.
|
||||
if os.path.isabs(source):
|
||||
if os.path.dirname(source) != self.execute('pwd',
|
||||
check_exit_code=False):
|
||||
self._gem5_shell("cat {} > {}".format(quote(filename),
|
||||
quote(dest_file)))
|
||||
self._gem5_shell("sync")
|
||||
self._gem5_shell("ls -la {}".format(dest_file))
|
||||
logger.debug('Finished the copy in the simulator')
|
||||
self._gem5_util("writefile {}".format(dest_file))
|
||||
|
||||
if 'cpu' not in filename:
|
||||
while not os.path.exists(os.path.join(self.gem5_out_dir, filename)):
|
||||
time.sleep(1)
|
||||
if 'cpu' not in filename:
|
||||
while not os.path.exists(os.path.join(self.gem5_out_dir,
|
||||
dest_file)):
|
||||
time.sleep(1)
|
||||
|
||||
# Perform the local move
|
||||
shutil.move(os.path.join(self.gem5_out_dir, filename), dest)
|
||||
logger.debug("Pull complete.")
|
||||
# Perform the local move
|
||||
if os.path.exists(os.path.join(dest, dest_file)):
|
||||
logger.warning(
|
||||
'Destination file {} already exists!'\
|
||||
.format(dest_file))
|
||||
else:
|
||||
shutil.move(os.path.join(self.gem5_out_dir, dest_file), dest)
|
||||
logger.debug("Pull complete.")
|
||||
|
||||
def execute(self, command, timeout=1000, check_exit_code=True,
|
||||
as_root=False, strip_colors=True):
|
||||
as_root=False, strip_colors=True, will_succeed=False):
|
||||
"""
|
||||
Execute a command on the gem5 platform
|
||||
"""
|
||||
# First check if the connection is set up to interact with gem5
|
||||
self._check_ready()
|
||||
|
||||
output = self._gem5_shell(command, as_root=as_root)
|
||||
try:
|
||||
output = self._gem5_shell(command,
|
||||
check_exit_code=check_exit_code,
|
||||
as_root=as_root)
|
||||
except TargetStableError as e:
|
||||
if will_succeed:
|
||||
raise TargetTransientError(e)
|
||||
else:
|
||||
raise
|
||||
|
||||
if strip_colors:
|
||||
output = strip_bash_colors(output)
|
||||
return output
|
||||
@@ -484,8 +551,8 @@ class Gem5Connection(TelnetConnection):
|
||||
trial = 0
|
||||
while os.path.isfile(redirection_file):
|
||||
# Log file already exists so add to name
|
||||
redirection_file = 'BACKGROUND_{}{}.log'.format(command_name, trial)
|
||||
trial += 1
|
||||
redirection_file = 'BACKGROUND_{}{}.log'.format(command_name, trial)
|
||||
trial += 1
|
||||
|
||||
# Create the command to pass on to gem5 shell
|
||||
complete_command = '{} >> {} 2>&1 &'.format(command, redirection_file)
|
||||
@@ -503,6 +570,10 @@ class Gem5Connection(TelnetConnection):
|
||||
"""
|
||||
gem5_logger.info("Gracefully terminating the gem5 simulation.")
|
||||
try:
|
||||
# Unmount the virtio device BEFORE we kill the
|
||||
# simulation. This is done to simplify checkpointing at
|
||||
# the end of a simulation!
|
||||
self._unmount_virtio()
|
||||
self._gem5_util("exit")
|
||||
self.gem5simulation.wait()
|
||||
except EOF:
|
||||
@@ -511,7 +582,7 @@ class Gem5Connection(TelnetConnection):
|
||||
try:
|
||||
shutil.rmtree(self.gem5_interact_dir)
|
||||
except OSError:
|
||||
gem5_logger.warn("Failed to remove the temporary directory!")
|
||||
gem5_logger.warning("Failed to remove the temporary directory!")
|
||||
|
||||
# Delete the lock file
|
||||
os.remove(self.lock_file_name)
|
||||
@@ -525,7 +596,22 @@ class Gem5Connection(TelnetConnection):
|
||||
|
||||
self.connect_gem5(port, gem5_simulation, gem5_interact_dir, gem5_out_dir)
|
||||
|
||||
# Handle the EOF exception raised by pexpect
|
||||
# pylint: disable=no-self-use
|
||||
def _gem5_EOF_handler(self, gem5_simulation, gem5_out_dir, err):
|
||||
# If we have reached the "EOF", it typically means
|
||||
# that gem5 crashed and closed the connection. Let's
|
||||
# check and actually tell the user what happened here,
|
||||
# rather than spewing out pexpect errors.
|
||||
if gem5_simulation.poll():
|
||||
message = "The gem5 process has crashed with error code {}!\n\tPlease see {} for details."
|
||||
raise TargetNotRespondingError(message.format(gem5_simulation.poll(), gem5_out_dir))
|
||||
else:
|
||||
# Let's re-throw the exception in this case.
|
||||
raise err
|
||||
|
||||
# This function connects to the gem5 simulation
|
||||
# pylint: disable=too-many-statements
|
||||
def connect_gem5(self, port, gem5_simulation, gem5_interact_dir,
|
||||
gem5_out_dir):
|
||||
"""
|
||||
@@ -547,7 +633,7 @@ class Gem5Connection(TelnetConnection):
|
||||
lock_file_name = '{}{}_{}.LOCK'.format(self.lock_directory, host, port)
|
||||
if os.path.isfile(lock_file_name):
|
||||
# There is already a connection to this gem5 simulation
|
||||
raise TargetError('There is already a connection to the gem5 '
|
||||
raise TargetStableError('There is already a connection to the gem5 '
|
||||
'simulation using port {} on {}!'
|
||||
.format(port, host))
|
||||
|
||||
@@ -562,9 +648,11 @@ class Gem5Connection(TelnetConnection):
|
||||
break
|
||||
except pxssh.ExceptionPxssh:
|
||||
pass
|
||||
except EOF as err:
|
||||
self._gem5_EOF_handler(gem5_simulation, gem5_out_dir, err)
|
||||
else:
|
||||
gem5_simulation.kill()
|
||||
raise TargetError("Failed to connect to the gem5 telnet session.")
|
||||
raise TargetNotRespondingError("Failed to connect to the gem5 telnet session.")
|
||||
|
||||
gem5_logger.info("Connected! Waiting for prompt...")
|
||||
|
||||
@@ -582,13 +670,18 @@ class Gem5Connection(TelnetConnection):
|
||||
self._login_to_device()
|
||||
except TIMEOUT:
|
||||
pass
|
||||
except EOF as err:
|
||||
self._gem5_EOF_handler(gem5_simulation, gem5_out_dir, err)
|
||||
|
||||
try:
|
||||
# Try and force a prompt to be shown
|
||||
self.conn.send('\n')
|
||||
self.conn.expect([r'# ', self.conn.UNIQUE_PROMPT, r'\[PEXPECT\][\\\$\#]+ '], timeout=60)
|
||||
self.conn.expect([r'# ', r'\$ ', self.conn.UNIQUE_PROMPT, r'\[PEXPECT\][\\\$\#]+ '], timeout=60)
|
||||
prompt_found = True
|
||||
except TIMEOUT:
|
||||
pass
|
||||
except EOF as err:
|
||||
self._gem5_EOF_handler(gem5_simulation, gem5_out_dir, err)
|
||||
|
||||
gem5_logger.info("Successfully logged in")
|
||||
gem5_logger.info("Setting unique prompt...")
|
||||
@@ -654,7 +747,7 @@ class Gem5Connection(TelnetConnection):
|
||||
def _gem5_util(self, command):
|
||||
""" Execute a gem5 utility command using the m5 binary on the device """
|
||||
if self.m5_path is None:
|
||||
raise TargetError('Path to m5 binary on simulated system is not set!')
|
||||
raise TargetStableError('Path to m5 binary on simulated system is not set!')
|
||||
self._gem5_shell('{} {}'.format(self.m5_path, command))
|
||||
|
||||
def _gem5_shell(self, command, as_root=False, timeout=None, check_exit_code=True, sync=True): # pylint: disable=R0912
|
||||
@@ -668,13 +761,16 @@ class Gem5Connection(TelnetConnection):
|
||||
fails, warn, but continue with the potentially wrong output.
|
||||
|
||||
The exit code is also checked by default, and non-zero exit codes will
|
||||
raise a TargetError.
|
||||
raise a TargetStableError.
|
||||
"""
|
||||
if sync:
|
||||
self._sync_gem5_shell()
|
||||
|
||||
gem5_logger.debug("gem5_shell command: {}".format(command))
|
||||
|
||||
if as_root:
|
||||
command = 'echo {} | su'.format(quote(command))
|
||||
|
||||
# Send the actual command
|
||||
self.conn.send("{}\n".format(command))
|
||||
|
||||
@@ -694,17 +790,17 @@ class Gem5Connection(TelnetConnection):
|
||||
# prompt has returned. Hence, we have a bit of an issue. We
|
||||
# warn, and return the whole output.
|
||||
if command_index == -1:
|
||||
gem5_logger.warn("gem5_shell: Unable to match command in "
|
||||
gem5_logger.warning("gem5_shell: Unable to match command in "
|
||||
"command output. Expect parsing errors!")
|
||||
command_index = 0
|
||||
|
||||
output = output[command_index + len(command):].strip()
|
||||
|
||||
# It is possible that gem5 will echo the command. Therefore, we need to
|
||||
# remove that too!
|
||||
command_index = output.find(command)
|
||||
if command_index != -1:
|
||||
output = output[command_index + len(command):].strip()
|
||||
# If the gem5 system echoes the executed command, we need to remove that too!
|
||||
if self.strip_echoed_commands:
|
||||
command_index = output.find(command)
|
||||
if command_index != -1:
|
||||
output = output[command_index + len(command):].strip()
|
||||
|
||||
gem5_logger.debug("gem5_shell output: {}".format(output))
|
||||
|
||||
@@ -721,7 +817,7 @@ class Gem5Connection(TelnetConnection):
|
||||
exit_code = int(exit_code_text.split()[0])
|
||||
if exit_code:
|
||||
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'
|
||||
raise TargetError(message.format(exit_code, command, output))
|
||||
raise TargetStableError(message.format(exit_code, command, output))
|
||||
except (ValueError, IndexError):
|
||||
gem5_logger.warning('Could not get exit code for "{}",\ngot: "{}"'.format(command, exit_code_text))
|
||||
|
||||
@@ -733,9 +829,33 @@ class Gem5Connection(TelnetConnection):
|
||||
"""
|
||||
gem5_logger.info("Mounting VirtIO device in simulated system")
|
||||
|
||||
self._gem5_shell('su -c "mkdir -p {}" root'.format(self.gem5_input_dir))
|
||||
self._gem5_shell('mkdir -p {}'.format(self.gem5_input_dir), as_root=True)
|
||||
mount_command = "mount -t 9p -o trans=virtio,version=9p2000.L,aname={} gem5 {}".format(self.gem5_interact_dir, self.gem5_input_dir)
|
||||
self._gem5_shell(mount_command)
|
||||
self._gem5_shell(mount_command, as_root=True)
|
||||
|
||||
def _unmount_virtio(self):
|
||||
"""
|
||||
Unmount the VirtIO device in the simulated system.
|
||||
"""
|
||||
gem5_logger.info("Unmounting VirtIO device in simulated system")
|
||||
|
||||
unmount_command = "umount {}".format(self.gem5_input_dir)
|
||||
self._gem5_shell(unmount_command, as_root=True)
|
||||
|
||||
def take_checkpoint(self):
|
||||
"""
|
||||
Take a checkpoint of the simulated system.
|
||||
|
||||
In order to take a checkpoint we first unmount the virtio
|
||||
device, take then checkpoint, and then remount the device to
|
||||
allow us to continue the current run. This needs to be done to
|
||||
ensure that future gem5 simulations are able to utilise the
|
||||
virtio device (i.e., we need to drop the current state
|
||||
information that the device has).
|
||||
"""
|
||||
self._unmount_virtio()
|
||||
self._gem5_util("checkpoint")
|
||||
self._mount_virtio()
|
||||
|
||||
def _move_to_temp_dir(self, source):
|
||||
"""
|
||||
@@ -752,7 +872,7 @@ class Gem5Connection(TelnetConnection):
|
||||
Check if the gem5 platform is ready
|
||||
"""
|
||||
if not self.ready:
|
||||
raise TargetError('Gem5 is not ready to interact yet')
|
||||
raise TargetTransientError('Gem5 is not ready to interact yet')
|
||||
|
||||
def _wait_for_boot(self):
|
||||
pass
|
||||
@@ -761,7 +881,8 @@ class Gem5Connection(TelnetConnection):
|
||||
"""
|
||||
Internal method to check if the target has a certain file
|
||||
"""
|
||||
command = 'if [ -e \'{}\' ]; then echo 1; else echo 0; fi'
|
||||
filepath = quote(filepath)
|
||||
command = 'if [ -e {} ]; then echo 1; else echo 0; fi'
|
||||
output = self.execute(command.format(filepath), as_root=self.is_rooted)
|
||||
return boolean(output.strip())
|
||||
|
||||
@@ -817,8 +938,10 @@ class AndroidGem5Connection(Gem5Connection):
|
||||
def _give_password(password, command):
|
||||
if not sshpass:
|
||||
raise HostError('Must have sshpass installed on the host in order to use password-based auth.')
|
||||
pass_string = "sshpass -p '{}' ".format(password)
|
||||
return pass_string + command
|
||||
pass_template = "sshpass -p {} "
|
||||
pass_string = pass_template.format(quote(password))
|
||||
redacted_string = pass_template.format(quote('<redacted>'))
|
||||
return (pass_string + command, redacted_string + command)
|
||||
|
||||
|
||||
def _check_env():
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -26,6 +26,11 @@ is not the best language to use for configuration.
|
||||
|
||||
"""
|
||||
import math
|
||||
import re
|
||||
import sys
|
||||
from functools import total_ordering
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.utils.misc import isiterable, to_identifier, ranges_to_list, list_to_mask
|
||||
|
||||
@@ -68,6 +73,15 @@ def numeric(value):
|
||||
"""
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
|
||||
if isinstance(value, basestring):
|
||||
value = value.strip()
|
||||
if value.endswith('%'):
|
||||
try:
|
||||
return float(value.rstrip('%')) / 100
|
||||
except ValueError:
|
||||
raise ValueError('Not numeric: {}'.format(value))
|
||||
|
||||
try:
|
||||
fvalue = float(value)
|
||||
except ValueError:
|
||||
@@ -79,6 +93,7 @@ def numeric(value):
|
||||
return fvalue
|
||||
|
||||
|
||||
@total_ordering
|
||||
class caseless_string(str):
|
||||
"""
|
||||
Just like built-in Python string except case-insensitive on comparisons. However, the
|
||||
@@ -92,12 +107,17 @@ class caseless_string(str):
|
||||
return self.lower() == other
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __cmp__(self, other):
|
||||
if isinstance(basestring, other):
|
||||
if isinstance(other, basestring):
|
||||
other = other.lower()
|
||||
return cmp(self.lower(), other)
|
||||
return self.lower() != other
|
||||
|
||||
def __lt__(self, other):
|
||||
if isinstance(other, basestring):
|
||||
other = other.lower()
|
||||
return self.lower() < other
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.lower())
|
||||
|
||||
def format(self, *args, **kwargs):
|
||||
return caseless_string(super(caseless_string, self).format(*args, **kwargs))
|
||||
@@ -111,3 +131,40 @@ def bitmask(value):
|
||||
if not isinstance(value, int):
|
||||
raise ValueError(value)
|
||||
return value
|
||||
|
||||
|
||||
regex_type = type(re.compile(''))
|
||||
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
def regex(value):
|
||||
if isinstance(value, regex_type):
|
||||
if isinstance(value.pattern, str):
|
||||
return value
|
||||
return re.compile(value.pattern.decode(),
|
||||
value.flags | re.UNICODE)
|
||||
else:
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode()
|
||||
return re.compile(value)
|
||||
|
||||
|
||||
def bytes_regex(value):
|
||||
if isinstance(value, regex_type):
|
||||
if isinstance(value.pattern, bytes):
|
||||
return value
|
||||
return re.compile(value.pattern.encode(sys.stdout.encoding or 'utf-8'),
|
||||
value.flags & ~re.UNICODE)
|
||||
else:
|
||||
if isinstance(value, str):
|
||||
value = value.encode(sys.stdout.encoding or 'utf-8')
|
||||
return re.compile(value)
|
||||
else:
|
||||
def regex(value):
|
||||
if isinstance(value, regex_type):
|
||||
return value
|
||||
else:
|
||||
return re.compile(value)
|
||||
|
||||
|
||||
bytes_regex = regex
|
||||
|
@@ -113,4 +113,3 @@ class UbootMenu(object):
|
||||
except TIMEOUT:
|
||||
pass
|
||||
self.conn.buffer = ''
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
# Copyright 2014-2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -19,6 +19,8 @@ import time
|
||||
import logging
|
||||
from copy import copy
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.utils.serial_port import write_characters, TIMEOUT
|
||||
from devlib.utils.types import boolean
|
||||
|
||||
@@ -193,14 +195,14 @@ class UefiMenu(object):
|
||||
is not in the current menu, ``LookupError`` will be raised."""
|
||||
if not self.prompt:
|
||||
self.read_menu(timeout)
|
||||
return self.options.items()
|
||||
return list(self.options.items())
|
||||
|
||||
def get_option_index(self, text, timeout=default_timeout):
|
||||
"""Returns the menu index of the specified option text (uses regex matching). If the option
|
||||
is not in the current menu, ``LookupError`` will be raised."""
|
||||
if not self.prompt:
|
||||
self.read_menu(timeout)
|
||||
for k, v in self.options.iteritems():
|
||||
for k, v in self.options.items():
|
||||
if re.search(text, v):
|
||||
return k
|
||||
raise LookupError(text)
|
||||
@@ -235,5 +237,3 @@ class UefiMenu(object):
|
||||
self.options = {}
|
||||
self.prompt = None
|
||||
self.empty_buffer()
|
||||
|
||||
|
||||
|
45
devlib/utils/version.py
Normal file
45
devlib/utils/version.py
Normal file
@@ -0,0 +1,45 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
|
||||
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev'])
|
||||
|
||||
version = VersionTuple(1, 1, 1, '')
|
||||
|
||||
|
||||
def get_devlib_version():
|
||||
version_string = '{}.{}.{}'.format(
|
||||
version.major, version.minor, version.revision)
|
||||
if version.dev:
|
||||
version_string += '.{}'.format(version.dev)
|
||||
return version_string
|
||||
|
||||
|
||||
def get_commit():
|
||||
p = Popen(['git', 'rev-parse', 'HEAD'], cwd=os.path.dirname(__file__),
|
||||
stdout=PIPE, stderr=PIPE)
|
||||
std, _ = p.communicate()
|
||||
p.wait()
|
||||
if p.returncode:
|
||||
return None
|
||||
if sys.version_info[0] == 3 and isinstance(std, bytes):
|
||||
return std[:8].decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
else:
|
||||
return std[:8]
|
@@ -31,6 +31,9 @@ import shlex
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.graphviz',
|
||||
'sphinx.ext.mathjax',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.viewcode',
|
||||
]
|
||||
|
||||
@@ -58,9 +61,9 @@ author = u'ARM Limited'
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '0.1'
|
||||
version = '1.0.0'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '0.1'
|
||||
release = '1.0.0'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@@ -104,7 +107,7 @@ pygments_style = 'sphinx'
|
||||
#keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
todo_include_todos = True
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
@@ -40,7 +40,7 @@ class that implements the following methods.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
|
||||
.. method:: execute(self, command, timeout=None, check_exit_code=False, as_root=False)
|
||||
.. method:: execute(self, command, timeout=None, check_exit_code=False, as_root=False, strip_colors=True, will_succeed=False)
|
||||
|
||||
Execute the specified command on the connected device and return its output.
|
||||
|
||||
@@ -53,6 +53,13 @@ class that implements the following methods.
|
||||
raised if it is not ``0``.
|
||||
:param as_root: The command will be executed as root. This will fail on
|
||||
unrooted connected devices.
|
||||
:param strip_colours: The command output will have colour encodings and
|
||||
most ANSI escape sequences striped out before returning.
|
||||
:param will_succeed: The command is assumed to always succeed, unless there is
|
||||
an issue in the environment like the loss of network connectivity. That
|
||||
will make the method always raise an instance of a subclass of
|
||||
:class:`DevlibTransientError' when the command fails, instead of a
|
||||
:class:`DevlibStableError`.
|
||||
|
||||
.. method:: background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False)
|
||||
|
||||
@@ -99,19 +106,19 @@ Connection Types
|
||||
``adb`` is part of the Android SDK (though stand-alone versions are also
|
||||
available).
|
||||
|
||||
:param device: The name of the adb divice. This is usually a unique hex
|
||||
:param device: The name of the adb device. This is usually a unique hex
|
||||
string for USB-connected devices, or an ip address/port
|
||||
combination. To see connected devices, you can run ``adb
|
||||
devices`` on the host.
|
||||
:param timeout: Connection timeout in seconds. If a connection to the device
|
||||
is not esblished within this period, :class:`HostError`
|
||||
is not established within this period, :class:`HostError`
|
||||
is raised.
|
||||
|
||||
|
||||
.. class:: SshConnection(host, username, password=None, keyfile=None, port=None,\
|
||||
timeout=None, password_prompt=None)
|
||||
|
||||
A connectioned to a device on the network over SSH.
|
||||
A connection to a device on the network over SSH.
|
||||
|
||||
:param host: SSH host to which to connect
|
||||
:param username: username for SSH login
|
||||
@@ -126,21 +133,21 @@ Connection Types
|
||||
.. note:: ``keyfile`` and ``password`` can't be specified
|
||||
at the same time.
|
||||
|
||||
:param port: TCP port on which SSH server is litening on the remoted device.
|
||||
:param port: TCP port on which SSH server is listening on the remote device.
|
||||
Omit to use the default port.
|
||||
:param timeout: Timeout for the connection in seconds. If a connection
|
||||
cannot be established within this time, an error will be
|
||||
raised.
|
||||
:param password_prompt: A string with the password prompt used by
|
||||
``sshpass``. Set this if your version of ``sshpass``
|
||||
uses somethin other than ``"[sudo] password"``.
|
||||
uses something other than ``"[sudo] password"``.
|
||||
|
||||
|
||||
.. class:: TelnetConnection(host, username, password=None, port=None,\
|
||||
timeout=None, password_prompt=None,\
|
||||
original_prompt=None)
|
||||
|
||||
A connectioned to a device on the network over Telenet.
|
||||
A connection to a device on the network over Telenet.
|
||||
|
||||
.. note:: Since Telenet protocol is does not support file transfer, scp is
|
||||
used for that purpose.
|
||||
@@ -153,19 +160,19 @@ Connection Types
|
||||
``sshpass`` utility must be installed on the
|
||||
system.
|
||||
|
||||
:param port: TCP port on which SSH server is litening on the remoted device.
|
||||
:param port: TCP port on which SSH server is listening on the remote device.
|
||||
Omit to use the default port.
|
||||
:param timeout: Timeout for the connection in seconds. If a connection
|
||||
cannot be established within this time, an error will be
|
||||
raised.
|
||||
:param password_prompt: A string with the password prompt used by
|
||||
``sshpass``. Set this if your version of ``sshpass``
|
||||
uses somethin other than ``"[sudo] password"``.
|
||||
uses something other than ``"[sudo] password"``.
|
||||
:param original_prompt: A regex for the shell prompted presented in the Telenet
|
||||
connection (the prompt will be reset to a
|
||||
randomly-generated pattern for the duration of the
|
||||
connection to reduce the possibility of clashes).
|
||||
This paramer is ignored for SSH connections.
|
||||
This parameter is ignored for SSH connections.
|
||||
|
||||
|
||||
.. class:: LocalConnection(keep_password=True, unrooted=False, password=None)
|
||||
@@ -189,20 +196,19 @@ Connection Types
|
||||
A connection to a gem5 simulation using a local Telnet connection.
|
||||
|
||||
.. note:: Some of the following input parameters are optional and will be ignored during
|
||||
initialisation. They were kept to keep the anology with a :class:`TelnetConnection`
|
||||
initialisation. They were kept to keep the analogy with a :class:`TelnetConnection`
|
||||
(i.e. ``host``, `username``, ``password``, ``port``,
|
||||
``password_prompt`` and ``original_promp``)
|
||||
|
||||
|
||||
:param host: Host on which the gem5 simulation is running
|
||||
|
||||
.. note:: Even thought the input parameter for the ``host``
|
||||
will be ignored, the gem5 simulation needs to on
|
||||
the same host as the user as the user is
|
||||
currently on, so if the host given as input
|
||||
parameter is not the same as the actual host, a
|
||||
``TargetError`` will be raised to prevent
|
||||
confusion.
|
||||
.. note:: Even though the input parameter for the ``host``
|
||||
will be ignored, the gem5 simulation needs to be
|
||||
on the same host the user is currently on, so if
|
||||
the host given as input parameter is not the
|
||||
same as the actual host, a ``TargetStableError``
|
||||
will be raised to prevent confusion.
|
||||
|
||||
:param username: Username in the simulated system
|
||||
:param password: No password required in gem5 so does not need to be set
|
||||
@@ -220,7 +226,7 @@ Connection Types
|
||||
There are two classes that inherit from :class:`Gem5Connection`:
|
||||
:class:`AndroidGem5Connection` and :class:`LinuxGem5Connection`.
|
||||
They inherit *almost* all methods from the parent class, without altering them.
|
||||
The only methods discussed belows are those that will be overwritten by the
|
||||
The only methods discussed below are those that will be overwritten by the
|
||||
:class:`LinuxGem5Connection` and :class:`AndroidGem5Connection` respectively.
|
||||
|
||||
.. class:: LinuxGem5Connection
|
||||
|
221
doc/derived_measurements.rst
Normal file
221
doc/derived_measurements.rst
Normal file
@@ -0,0 +1,221 @@
|
||||
Derived Measurements
|
||||
=====================
|
||||
|
||||
|
||||
The ``DerivedMeasurements`` API provides a consistent way of performing post
|
||||
processing on a provided :class:`MeasurementCsv` file.
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
The following example shows how to use an implementation of a
|
||||
:class:`DerivedMeasurement` to obtain a list of calculated ``DerivedMetric``'s.
|
||||
|
||||
.. code-block:: ipython
|
||||
|
||||
# Import the relevant derived measurement module
|
||||
# in this example the derived energy module is used.
|
||||
In [1]: from devlib import DerivedEnergyMeasurements
|
||||
|
||||
# Obtain a MeasurementCsv file from an instrument or create from
|
||||
# existing .csv file. In this example an existing csv file is used which was
|
||||
# created with a sampling rate of 100Hz
|
||||
In [2]: from devlib import MeasurementsCsv
|
||||
In [3]: measurement_csv = MeasurementsCsv('/example/measurements.csv', sample_rate_hz=100)
|
||||
|
||||
# Process the file and obtain a list of the derived measurements
|
||||
In [4]: derived_measurements = DerivedEnergyMeasurements.process(measurement_csv)
|
||||
|
||||
In [5]: derived_measurements
|
||||
Out[5]: [device_energy: 239.1854075 joules, device_power: 5.5494089227 watts]
|
||||
|
||||
API
|
||||
---
|
||||
|
||||
Derived Measurements
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. class:: DerivedMeasurements
|
||||
|
||||
The ``DerivedMeasurements`` class provides an API for post-processing
|
||||
instrument output offline (i.e. without a connection to the target device) to
|
||||
generate additional metrics.
|
||||
|
||||
.. method:: DerivedMeasurements.process(measurement_csv)
|
||||
|
||||
Process a :class:`MeasurementsCsv`, returning a list of
|
||||
:class:`DerivedMetric` and/or :class:`MeasurementsCsv` objects that have been
|
||||
derived from the input. The exact nature and ordering of the list members
|
||||
is specific to individual 'class'`DerivedMeasurements` implementations.
|
||||
|
||||
.. method:: DerivedMeasurements.process_raw(\*args)
|
||||
|
||||
Process raw output from an instrument, returning a list :class:`DerivedMetric`
|
||||
and/or :class:`MeasurementsCsv` objects that have been derived from the
|
||||
input. The exact nature and ordering of the list members is specific to
|
||||
individual 'class'`DerivedMeasurements` implementations.
|
||||
|
||||
The arguments to this method should be paths to raw output files generated by
|
||||
an instrument. The number and order of expected arguments is specific to
|
||||
particular implementations.
|
||||
|
||||
|
||||
Derived Metric
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. class:: DerivedMetric
|
||||
|
||||
Represents a metric derived from previously collected ``Measurement``s.
|
||||
Unlike, a ``Measurement``, this was not measured directly from the target.
|
||||
|
||||
|
||||
.. attribute:: DerivedMetric.name
|
||||
|
||||
The name of the derived metric. This uniquely defines a metric -- two
|
||||
``DerivedMetric`` objects with the same ``name`` represent to instances of
|
||||
the same metric (e.g. computed from two different inputs).
|
||||
|
||||
.. attribute:: DerivedMetric.value
|
||||
|
||||
The ``numeric`` value of the metric that has been computed for a particular
|
||||
input.
|
||||
|
||||
.. attribute:: DerivedMetric.measurement_type
|
||||
|
||||
The ``MeasurementType`` of the metric. This indicates which conceptual
|
||||
category the metric falls into, its units, and conversions to other
|
||||
measurement types.
|
||||
|
||||
.. attribute:: DerivedMetric.units
|
||||
|
||||
The units in which the metric's value is expressed.
|
||||
|
||||
|
||||
Available Derived Measurements
|
||||
-------------------------------
|
||||
|
||||
.. note:: If a method of the API is not documented for a particular
|
||||
implementation, that means that it s not overridden by that
|
||||
implementation. It is still safe to call it -- an empty list will be
|
||||
returned.
|
||||
|
||||
Energy
|
||||
~~~~~~
|
||||
|
||||
.. class:: DerivedEnergyMeasurements
|
||||
|
||||
The ``DerivedEnergyMeasurements`` class is used to calculate average power and
|
||||
cumulative energy for each site if the required data is present.
|
||||
|
||||
The calculation of cumulative energy can occur in 3 ways. If a
|
||||
``site`` contains ``energy`` results, the first and last measurements are extracted
|
||||
and the delta calculated. If not, a ``timestamp`` channel will be used to calculate
|
||||
the energy from the power channel, failing back to using the sample rate attribute
|
||||
of the :class:`MeasurementCsv` file if timestamps are not available. If neither
|
||||
timestamps or a sample rate are available then an error will be raised.
|
||||
|
||||
|
||||
.. method:: DerivedEnergyMeasurements.process(measurement_csv)
|
||||
|
||||
This will return total cumulative energy for each energy channel, and the
|
||||
average power for each power channel in the input CSV. The output will contain
|
||||
all energy metrics followed by power metrics. The ordering of both will match
|
||||
the ordering of channels in the input. The metrics will by named based on the
|
||||
sites of the corresponding channels according to the following patters:
|
||||
``"<site>_total_energy"`` and ``"<site>_average_power"``.
|
||||
|
||||
|
||||
FPS / Rendering
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. class:: DerivedGfxInfoStats(drop_threshold=5, suffix='-fps', filename=None, outdir=None)
|
||||
|
||||
Produces FPS (frames-per-second) and other derived statistics from
|
||||
:class:`GfxInfoFramesInstrument` output. This takes several optional
|
||||
parameters in creation:
|
||||
|
||||
:param drop_threshold: FPS in an application, such as a game, which this
|
||||
processor is primarily targeted at, cannot reasonably
|
||||
drop to a very low value. This is specified to this
|
||||
threshold. If an FPS for a frame is computed to be
|
||||
lower than this threshold, it will be dropped on the
|
||||
assumption that frame rendering was suspended by the
|
||||
system (e.g. when idling), or there was some sort of
|
||||
error, and therefore this should be used in
|
||||
performance calculations. defaults to ``5``.
|
||||
:param suffix: The name of the generated per-frame FPS csv file will be
|
||||
derived from the input frames csv file by appending this
|
||||
suffix. This cannot be specified at the same time as
|
||||
a ``filename``.
|
||||
:param filename: As an alternative to the suffix, a complete file name for
|
||||
FPS csv can be specified. This cannot be used at the same
|
||||
time as the ``suffix``.
|
||||
:param outdir: By default, the FPS csv file will be placed in the same
|
||||
directory as the input frames csv file. This can be changed
|
||||
by specifying an alternate directory here
|
||||
|
||||
.. warning:: Specifying both ``filename`` and ``oudir`` will mean that exactly
|
||||
the same file will be used for FPS output on each invocation of
|
||||
``process()`` (even for different inputs) resulting in previous
|
||||
results being overwritten.
|
||||
|
||||
.. method:: DerivedGfxInfoStats.process(measurement_csv)
|
||||
|
||||
Process the fames csv generated by :class:`GfxInfoFramesInstrument` and
|
||||
returns a list containing exactly three entries: :class:`DerivedMetric`\ s
|
||||
``fps`` and ``total_frames``, followed by a :class:`MeasurentCsv` containing
|
||||
per-frame FPSs values.
|
||||
|
||||
.. method:: DerivedGfxInfoStats.process_raw(gfxinfo_frame_raw_file)
|
||||
|
||||
As input, this takes a single argument, which should be the path to the raw
|
||||
output file of :class:`GfxInfoFramesInstrument`. The returns stats
|
||||
accumulated by gfxinfo. At the time of writing, the stats (in order) are:
|
||||
``janks``, ``janks_pc`` (percentage of all frames),
|
||||
``render_time_50th_ptile`` (50th percentile, or median, for time to render a
|
||||
frame), ``render_time_90th_ptile``, ``render_time_95th_ptile``,
|
||||
``render_time_99th_ptile``, ``missed_vsync``, ``hight_input_latency``,
|
||||
``slow_ui_thread``, ``slow_bitmap_uploads``, ``slow_issue_draw_commands``.
|
||||
Please see the `gfxinfo documentation`_ for details.
|
||||
|
||||
.. _gfxinfo documentation: https://developer.android.com/training/testing/performance.html
|
||||
|
||||
|
||||
.. class:: DerivedSurfaceFlingerStats(drop_threshold=5, suffix='-fps', filename=None, outdir=None)
|
||||
|
||||
Produces FPS (frames-per-second) and other derived statistics from
|
||||
:class:`SurfaceFlingerFramesInstrument` output. This takes several optional
|
||||
parameters in creation:
|
||||
|
||||
:param drop_threshold: FPS in an application, such as a game, which this
|
||||
processor is primarily targeted at, cannot reasonably
|
||||
drop to a very low value. This is specified to this
|
||||
threshold. If an FPS for a frame is computed to be
|
||||
lower than this threshold, it will be dropped on the
|
||||
assumption that frame rendering was suspended by the
|
||||
system (e.g. when idling), or there was some sort of
|
||||
error, and therefore this should be used in
|
||||
performance calculations. defaults to ``5``.
|
||||
:param suffix: The name of the generated per-frame FPS csv file will be
|
||||
derived from the input frames csv file by appending this
|
||||
suffix. This cannot be specified at the same time as
|
||||
a ``filename``.
|
||||
:param filename: As an alternative to the suffix, a complete file name for
|
||||
FPS csv can be specified. This cannot be used at the same
|
||||
time as the ``suffix``.
|
||||
:param outdir: By default, the FPS csv file will be placed in the same
|
||||
directory as the input frames csv file. This can be changed
|
||||
by specifying an alternate directory here
|
||||
|
||||
.. warning:: Specifying both ``filename`` and ``oudir`` will mean that exactly
|
||||
the same file will be used for FPS output on each invocation of
|
||||
``process()`` (even for different inputs) resulting in previous
|
||||
results being overwritten.
|
||||
|
||||
.. method:: DerivedSurfaceFlingerStats.process(measurement_csv)
|
||||
|
||||
Process the fames csv generated by :class:`SurfaceFlingerFramesInstrument` and
|
||||
returns a list containing exactly three entries: :class:`DerivedMetric`\ s
|
||||
``fps`` and ``total_frames``, followed by a :class:`MeasurentCsv` containing
|
||||
per-frame FPSs values, followed by ``janks`` ``janks_pc``, and
|
||||
``missed_vsync`` metrics.
|
BIN
doc/images/instrumentation/baylibre_acme/bottleneck.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/bottleneck.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 46 KiB |
BIN
doc/images/instrumentation/baylibre_acme/buffer.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/buffer.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 140 KiB |
BIN
doc/images/instrumentation/baylibre_acme/cape.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/cape.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.1 MiB |
BIN
doc/images/instrumentation/baylibre_acme/ina226_circuit.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/ina226_circuit.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 70 KiB |
BIN
doc/images/instrumentation/baylibre_acme/ina226_functional.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/ina226_functional.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 42 KiB |
BIN
doc/images/instrumentation/baylibre_acme/int_time.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/int_time.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 132 KiB |
@@ -19,6 +19,7 @@ Contents:
|
||||
target
|
||||
modules
|
||||
instrumentation
|
||||
derived_measurements
|
||||
platform
|
||||
connection
|
||||
|
||||
|
@@ -13,7 +13,7 @@ Example
|
||||
The following example shows how to use an instrument to read temperature from an
|
||||
Android target.
|
||||
|
||||
.. code-block:: ipython
|
||||
.. code-block:: python
|
||||
|
||||
# import and instantiate the Target and the instrument
|
||||
# (note: this assumes exactly one android target connected
|
||||
@@ -51,7 +51,7 @@ API
|
||||
Instrument
|
||||
~~~~~~~~~~
|
||||
|
||||
.. class:: Instrument(target, **kwargs)
|
||||
.. class:: Instrument(target, \*\*kwargs)
|
||||
|
||||
An ``Instrument`` allows collection of measurement from one or more
|
||||
channels. An ``Instrument`` may support ``INSTANTANEOUS`` or ``CONTINUOUS``
|
||||
@@ -65,8 +65,8 @@ Instrument
|
||||
:INSTANTANEOUS: The instrument supports taking a single sample via
|
||||
``take_measurement()``.
|
||||
:CONTINUOUS: The instrument supports collecting measurements over a
|
||||
period of time via ``start()``, ``stop()``, and
|
||||
``get_data()`` methods.
|
||||
period of time via ``start()``, ``stop()``, ``get_data()``,
|
||||
and (optionally) ``get_raw`` methods.
|
||||
|
||||
.. note:: It's possible for one instrument to support more than a single
|
||||
mode.
|
||||
@@ -88,7 +88,7 @@ Instrument
|
||||
Returns channels for a particular ``measure`` type. A ``measure`` can be
|
||||
either a string (e.g. ``"power"``) or a :class:`MeasurmentType` instance.
|
||||
|
||||
.. method:: Instrument.setup(*args, **kwargs)
|
||||
.. method:: Instrument.setup(\*args, \*\*kwargs)
|
||||
|
||||
This will set up the instrument on the target. Parameters this method takes
|
||||
are particular to subclasses (see documentation for specific instruments
|
||||
@@ -99,29 +99,36 @@ Instrument
|
||||
``teardown()`` has been called), but see documentation for the instrument
|
||||
you're interested in.
|
||||
|
||||
.. method:: Instrument.reset([sites, [kinds]])
|
||||
.. method:: Instrument.reset(sites=None, kinds=None, channels=None)
|
||||
|
||||
This is used to configure an instrument for collection. This must be invoked
|
||||
before ``start()`` is called to begin collection. ``sites`` and ``kinds``
|
||||
parameters may be used to specify which channels measurements should be
|
||||
collected from (if omitted, then measurements will be collected for all
|
||||
available sites/kinds). This methods sets the ``active_channels`` attribute
|
||||
of the ``Instrument``.
|
||||
before ``start()`` is called to begin collection. This methods sets the
|
||||
``active_channels`` attribute of the ``Instrument``.
|
||||
|
||||
.. method:: Instrument.take_measurment()
|
||||
If ``channels`` is provided, it is a list of names of channels to enable and
|
||||
``sites`` and ``kinds`` must both be ``None``.
|
||||
|
||||
Otherwise, if one of ``sites`` or ``kinds`` is provided, all channels
|
||||
matching the given sites or kinds are enabled. If both are provided then all
|
||||
channels of the given kinds at the given sites are enabled.
|
||||
|
||||
If none of ``sites``, ``kinds`` or ``channels`` are provided then all
|
||||
available channels are enabled.
|
||||
|
||||
.. method:: Instrument.take_measurement()
|
||||
|
||||
Take a single measurement from ``active_channels``. Returns a list of
|
||||
:class:`Measurement` objects (one for each active channel).
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
support ``INSTANTANEOUS`` measurment.
|
||||
support ``INSTANTANEOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.start()
|
||||
|
||||
Starts collecting measurements from ``active_channels``.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurment.
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.stop()
|
||||
|
||||
@@ -129,50 +136,65 @@ Instrument
|
||||
:func:`start()`.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurment.
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.get_data(outfile)
|
||||
|
||||
Write collected data into ``outfile``. Must be called after :func:`stop()`.
|
||||
Data will be written in CSV format with a column for each channel and a row
|
||||
for each sample. Column heading will be channel, labels in the form
|
||||
``<site>_<kind>`` (see :class:`InstrumentChannel`). The order of the coluns
|
||||
``<site>_<kind>`` (see :class:`InstrumentChannel`). The order of the columns
|
||||
will be the same as the order of channels in ``Instrument.active_channels``.
|
||||
|
||||
If reporting timestamps, one channel must have a ``site`` named ``"timestamp"``
|
||||
and a ``kind`` of a :class:`MeasurmentType` of an appropriate time unit which will
|
||||
be used, if appropriate, during any post processing.
|
||||
|
||||
.. note:: Currently supported time units are seconds, milliseconds and
|
||||
microseconds, other units can also be used if an appropriate
|
||||
conversion is provided.
|
||||
|
||||
This returns a :class:`MeasurementCsv` instance associated with the outfile
|
||||
that can be used to stream :class:`Measurement`\ s lists (similar to what is
|
||||
returned by ``take_measurement()``.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurment.
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.get_raw()
|
||||
|
||||
Returns a list of paths to files containing raw output from the underlying
|
||||
source(s) that is used to produce the data CSV. If now raw output is
|
||||
generated or saved, an empty list will be returned. The format of the
|
||||
contents of the raw files is entirely source-dependent.
|
||||
|
||||
.. attribute:: Instrument.sample_rate_hz
|
||||
|
||||
Sample rate of the instrument in Hz. Assumed to be the same for all channels.
|
||||
|
||||
.. note:: This attribute is only provided by :class:`Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurment.
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
Instrument Channel
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. class:: InstrumentChannel(name, site, measurement_type, **attrs)
|
||||
.. class:: InstrumentChannel(name, site, measurement_type, \*\*attrs)
|
||||
|
||||
An :class:`InstrumentChannel` describes a single type of measurement that may
|
||||
be collected by an :class:`Instrument`. A channel is primarily defined by a
|
||||
``site`` and a ``measurement_type``.
|
||||
|
||||
A ``site`` indicates where on the target a measurement is collected from
|
||||
(e.g. a volage rail or location of a sensor).
|
||||
(e.g. a voltage rail or location of a sensor).
|
||||
|
||||
A ``measurement_type`` is an instance of :class:`MeasurmentType` that
|
||||
describes what sort of measurment this is (power, temperature, etc). Each
|
||||
mesurement type has a standard unit it is reported in, regardless of an
|
||||
describes what sort of measurement this is (power, temperature, etc). Each
|
||||
measurement type has a standard unit it is reported in, regardless of an
|
||||
instrument used to collect it.
|
||||
|
||||
A channel (i.e. site/measurement_type combination) is unique per instrument,
|
||||
however there may be more than one channel associated with one site (e.g. for
|
||||
both volatage and power).
|
||||
both voltage and power).
|
||||
|
||||
It should not be assumed that any site/measurement_type combination is valid.
|
||||
The list of available channels can queried with
|
||||
@@ -180,22 +202,22 @@ Instrument Channel
|
||||
|
||||
.. attribute:: InstrumentChannel.site
|
||||
|
||||
The name of the "site" from which the measurments are collected (e.g. voltage
|
||||
The name of the "site" from which the measurements are collected (e.g. voltage
|
||||
rail, sensor, etc).
|
||||
|
||||
.. attribute:: InstrumentChannel.kind
|
||||
|
||||
A string indingcating the type of measrument that will be collted. This is
|
||||
A string indicating the type of measurement that will be collected. This is
|
||||
the ``name`` of the :class:`MeasurmentType` associated with this channel.
|
||||
|
||||
.. attribute:: InstrumentChannel.units
|
||||
|
||||
Units in which measurment will be reported. this is determined by the
|
||||
Units in which measurement will be reported. this is determined by the
|
||||
underlying :class:`MeasurmentType`.
|
||||
|
||||
.. attribute:: InstrumentChannel.label
|
||||
|
||||
A label that can be attached to measurments associated with with channel.
|
||||
A label that can be attached to measurements associated with with channel.
|
||||
This is constructed with ::
|
||||
|
||||
'{}_{}'.format(self.site, self.kind)
|
||||
@@ -206,32 +228,38 @@ Measurement Types
|
||||
|
||||
In order to make instruments easer to use, and to make it easier to swap them
|
||||
out when necessary (e.g. change method of collecting power), a number of
|
||||
standard measurement types are defined. This way, for example, power will always
|
||||
be reported as "power" in Watts, and never as "pwr" in milliWatts. Currently
|
||||
defined measurement types are
|
||||
standard measurement types are defined. This way, for example, power will
|
||||
always be reported as "power" in Watts, and never as "pwr" in milliWatts.
|
||||
Currently defined measurement types are
|
||||
|
||||
|
||||
+-------------+---------+---------------+
|
||||
| name | units | category |
|
||||
+=============+=========+===============+
|
||||
| time | seconds | |
|
||||
+-------------+---------+---------------+
|
||||
| temperature | degrees | |
|
||||
+-------------+---------+---------------+
|
||||
| power | watts | power/energy |
|
||||
+-------------+---------+---------------+
|
||||
| voltage | volts | power/energy |
|
||||
+-------------+---------+---------------+
|
||||
| current | amps | power/energy |
|
||||
+-------------+---------+---------------+
|
||||
| energy | joules | power/energy |
|
||||
+-------------+---------+---------------+
|
||||
| tx | bytes | data transfer |
|
||||
+-------------+---------+---------------+
|
||||
| rx | bytes | data transfer |
|
||||
+-------------+---------+---------------+
|
||||
| tx/rx | bytes | data transfer |
|
||||
+-------------+---------+---------------+
|
||||
+-------------+-------------+---------------+
|
||||
| name | units | category |
|
||||
+=============+=============+===============+
|
||||
| count | count | |
|
||||
+-------------+-------------+---------------+
|
||||
| percent | percent | |
|
||||
+-------------+-------------+---------------+
|
||||
| time_us | microseconds| time |
|
||||
+-------------+-------------+---------------+
|
||||
| time_ms | milliseconds| time |
|
||||
+-------------+-------------+---------------+
|
||||
| temperature | degrees | thermal |
|
||||
+-------------+-------------+---------------+
|
||||
| power | watts | power/energy |
|
||||
+-------------+-------------+---------------+
|
||||
| voltage | volts | power/energy |
|
||||
+-------------+-------------+---------------+
|
||||
| current | amps | power/energy |
|
||||
+-------------+-------------+---------------+
|
||||
| energy | joules | power/energy |
|
||||
+-------------+-------------+---------------+
|
||||
| tx | bytes | data transfer |
|
||||
+-------------+-------------+---------------+
|
||||
| rx | bytes | data transfer |
|
||||
+-------------+-------------+---------------+
|
||||
| tx/rx | bytes | data transfer |
|
||||
+-------------+-------------+---------------+
|
||||
|
||||
|
||||
.. instruments:
|
||||
@@ -241,4 +269,644 @@ Available Instruments
|
||||
|
||||
This section lists instruments that are currently part of devlib.
|
||||
|
||||
TODO
|
||||
.. todo:: Add other instruments
|
||||
|
||||
|
||||
Baylibre ACME BeagleBone Black Cape
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. _official project page: http://baylibre.com/acme/
|
||||
.. _image built for using the ACME: https://gitlab.com/baylibre-acme/ACME-Software-Release/blob/master/README.md
|
||||
.. _libiio (the Linux IIO interface): https://github.com/analogdevicesinc/libiio
|
||||
.. _Linux Industrial I/O Subsystem: https://wiki.analog.com/software/linux/docs/iio/iio
|
||||
.. _Texas Instruments INA226: http://www.ti.com/lit/ds/symlink/ina226.pdf
|
||||
|
||||
From the `official project page`_:
|
||||
|
||||
[The Baylibre Another Cute Measurement Equipment (ACME)] is an extension for
|
||||
the BeagleBone Black (the ACME Cape), designed to provide multi-channel power
|
||||
and temperature measurements capabilities to the BeagleBone Black (BBB). It
|
||||
comes with power and temperature probes integrating a power switch (the ACME
|
||||
Probes), turning it into an advanced all-in-one power/temperature measurement
|
||||
solution.
|
||||
|
||||
The ACME initiative is completely open source, from HW to SW drivers and
|
||||
applications.
|
||||
|
||||
|
||||
The Infrastructure
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Retrieving measurement from the ACME through devlib requires:
|
||||
|
||||
- a BBB running the `image built for using the ACME`_ (micro SD card required);
|
||||
|
||||
- an ACME cape on top of the BBB;
|
||||
|
||||
- at least one ACME probe [#acme_probe_variants]_ connected to the ACME cape;
|
||||
|
||||
- a BBB-host interface (typically USB or Ethernet) [#acme_name_conflicts]_;
|
||||
|
||||
- a host (the one running devlib) with `libiio (the Linux IIO interface)`_
|
||||
installed, and a Python environment able to find the libiio Python wrapper
|
||||
*i.e.* able to ``import iio`` as communications between the BBB and the
|
||||
host rely on the `Linux Industrial I/O Subsystem`_ (IIO).
|
||||
|
||||
The ACME probes are built on top of the `Texas Instruments INA226`_ and the
|
||||
data acquisition chain is as follows:
|
||||
|
||||
.. graphviz::
|
||||
|
||||
digraph target {
|
||||
rankdir = LR
|
||||
bgcolor = transparent
|
||||
|
||||
subgraph cluster_target {
|
||||
|
||||
subgraph cluster_BBB {
|
||||
node [style = filled, color = white];
|
||||
style = filled;
|
||||
color = lightgrey;
|
||||
label = "BeagleBone Black";
|
||||
|
||||
drivers -> "IIO Daemon" [dir = both]
|
||||
}
|
||||
|
||||
subgraph cluster_INA226 {
|
||||
node [style = filled, color = white];
|
||||
style = filled;
|
||||
color = lightgrey;
|
||||
label = INA226;
|
||||
|
||||
ADC -> Processing
|
||||
Processing -> Registers
|
||||
}
|
||||
|
||||
subgraph cluster_inputs {
|
||||
node [style = filled, color = white];
|
||||
style = filled;
|
||||
color = lightgrey;
|
||||
label = Inputs;
|
||||
|
||||
"Bus Voltage" -> ADC;
|
||||
"Shunt Voltage" -> ADC;
|
||||
}
|
||||
|
||||
Registers -> drivers [dir = both, label = I2C];
|
||||
}
|
||||
|
||||
subgraph cluster_IIO {
|
||||
style = none
|
||||
"IIO Daemon" -> "IIO Interface" [dir = both, label = "Eth./USB"]
|
||||
}
|
||||
}
|
||||
|
||||
For reference, the software stack on the host is roughly given by:
|
||||
|
||||
.. graphviz::
|
||||
|
||||
digraph host {
|
||||
rankdir = LR
|
||||
bgcolor = transparent
|
||||
|
||||
subgraph cluster_host {
|
||||
|
||||
subgraph cluster_backend {
|
||||
node [style = filled, color = white];
|
||||
style = filled;
|
||||
color = lightgrey;
|
||||
label = Backend;
|
||||
|
||||
"IIO Daemon" -> "C API" [dir = both]
|
||||
}
|
||||
|
||||
subgraph cluster_Python {
|
||||
node [style = filled, color = white];
|
||||
style = filled;
|
||||
color = lightgrey;
|
||||
label = Python;
|
||||
|
||||
"C API" -> "iio Wrapper" [dir = both]
|
||||
"iio Wrapper" -> devlib [dir = both]
|
||||
devlib -> "User" [dir = both]
|
||||
}
|
||||
}
|
||||
|
||||
subgraph cluster_IIO {
|
||||
style = none
|
||||
"IIO Interface" -> "IIO Daemon" [dir = both, label = "Eth./USB"]
|
||||
}
|
||||
}
|
||||
|
||||
Ethernet was the only IIO Interface used and tested during the development of
|
||||
this instrument. However,
|
||||
`USB seems to be supported<https://gitlab.com/baylibre-acme/ACME/issues/2>`_.
|
||||
The IIO library also provides "Local" and "XML" connections but these are to be
|
||||
used when the IIO devices are directly connected to the host *i.e.* in our
|
||||
case, if we were to run Python and devlib on the BBB. These are also untested.
|
||||
|
||||
Measuring Power
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
In IIO terminology, the ACME cape is an *IIO context* and ACME probes are *IIO
|
||||
devices* with *IIO channels*. An input *IIO channel* (the ACME has no *output
|
||||
IIO channel*) is a stream of samples and an ACME cape can be connected to up to
|
||||
8 probes *i.e.* have 8 *IIO devices*. The probes are discovered at startup by
|
||||
the IIO drivers on the BBB and are indexed according to the order in which they
|
||||
are connected to the ACME cape (with respect to the "Probe *X*" connectors on
|
||||
the cape).
|
||||
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/cape.png
|
||||
:width: 50%
|
||||
:alt: ACME Cape
|
||||
:align: center
|
||||
|
||||
ACME Cape on top of a BBB: Notice the numbered probe connectors (
|
||||
`source <https://baylibre.com/wp-content/uploads/2015/11/20150916_BayLibre_ACME_RevB-010-1030x599.png>`_)
|
||||
|
||||
|
||||
Please note that the numbers on the PCB do not represent the index of a probe
|
||||
in IIO; on top of being 1-based (as opposed to IIO device indexing being
|
||||
0-based), skipped connectors do not result in skipped indices *e.g.* if three
|
||||
probes are connected to the cape at ``Probe 1``, ``Probe 3`` and ``Probe 7``,
|
||||
IIO (and therefore the entire software stack, including devlib) will still
|
||||
refer to them as devices ``0``, ``1`` and ``2``, respectively. Furthermore,
|
||||
probe "hot swapping" does not seem to be supported.
|
||||
|
||||
INA226: The probing spearhead
|
||||
"""""""""""""""""""""""""""""
|
||||
|
||||
An ACME probe has 5 *IIO channels*, 4 of which being "IIO wrappers" around what
|
||||
the INA226 outputs (through its I2C registers): the bus voltage, the shunt
|
||||
voltage, the shunt current and the load power. The last channel gives the
|
||||
timestamps and is probably added further down the pipeline. A typical circuit
|
||||
configuration for the INA226 (useful when shunt-based ACME probes are used as
|
||||
their PCB does not contain the full circuit unlike the USB and jack variants)
|
||||
is given by its datasheet:
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/ina226_circuit.png
|
||||
:width: 90%
|
||||
:alt: Typical circuit configuration, INA226
|
||||
:align: center
|
||||
|
||||
Typical Circuit Configuration (source: `Texas Instruments INA226`_)
|
||||
|
||||
|
||||
The analog-to-digital converter (ADC)
|
||||
'''''''''''''''''''''''''''''''''''''
|
||||
|
||||
The digital time-discrete sampled signal of the analog time-continuous input
|
||||
voltage signal is obtained through an analog-to-digital converter (ADC). To
|
||||
measure the "instantaneous input voltage", the ADC "charges up or down" a
|
||||
capacitor before measuring its charge.
|
||||
|
||||
The *integration time* is the time spend by the ADC acquiring the input signal
|
||||
in its capacitor. The longer this time is, the more resilient the sampling
|
||||
process is to unwanted noise. The drawback is that, if the integration time is
|
||||
increased then the sampling rate decreases. This effect can be somewhat
|
||||
compared to a *low-pass filter*.
|
||||
|
||||
As the INA226 alternatively connects its ADC to the bus voltage and shunt
|
||||
voltage (see previous figure), samples are retrieved at a frequency of
|
||||
|
||||
.. math::
|
||||
\frac{1}{T_{bus} + T_{shunt}}
|
||||
|
||||
where :math:`T_X` is the integration time for the :math:`X` voltage.
|
||||
|
||||
As described below (:meth:`BaylibreAcmeInstrument.reset`), the integration
|
||||
times for the bus and shunt voltage can be set separately which allows a
|
||||
tradeoff of accuracy between signals. This is particularly useful as the shunt
|
||||
voltage returned by the INA226 has a higher resolution than the bus voltage
|
||||
(2.5 μV and 1.25 mV LSB, respectively) and therefore would benefit more from a
|
||||
longer integration time.
|
||||
|
||||
As an illustration, consider the following sampled sine wave and notice how
|
||||
increasing the integration time (of the bus voltage in this case) "smoothes"
|
||||
out the signal:
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/int_time.png
|
||||
:alt: Illustration of the impact of the integration time
|
||||
:align: center
|
||||
|
||||
Increasing the integration time increases the resilience to noise
|
||||
|
||||
|
||||
Internal signal processing
|
||||
''''''''''''''''''''''''''
|
||||
|
||||
The INA226 is able to accumulate samples acquired by its ADC and output to the
|
||||
ACME board (technically, to its I2C registers) the average value of :math:`N`
|
||||
samples. This is called *oversampling*. While the integration time somewhat
|
||||
behaves as an analog low-pass filter, the oversampling feature is a digital
|
||||
low-pass filter by definition. The former should be set to reduce sampling
|
||||
noise (*i.e.* noise on a single sample coming from the sampling process) while
|
||||
the latter should be used to filter out high-frequency noise present in the
|
||||
input signal and control the sampling frequency.
|
||||
|
||||
Therefore, samples are available at the output of the INA226 at a frequency
|
||||
|
||||
.. math::
|
||||
\frac{1}{N(T_{bus} + T_{shunt})}
|
||||
|
||||
and oversampling ratio provides a way to control the output sampling frequency
|
||||
(*i.e.* to limit the required output bandwidth) while making sure the signal
|
||||
fidelity is as desired.
|
||||
|
||||
|
||||
The 4 IIO channels coming from the INA226 can be grouped according to their
|
||||
respective origins: the bus and shunt voltages are measured (and, potentially
|
||||
filtered) while the shunt current and load power are computed. Indeed, the
|
||||
INA226 contains on-board fixed-point arithmetic units to compute the trivial
|
||||
expressions:
|
||||
|
||||
.. math::
|
||||
|
||||
I_{shunt} = \frac{V_{shunt}}{R_{shunt}}
|
||||
,\ \
|
||||
P_{load} = V_{load}\ I_{load}
|
||||
\approx V_{bus} \ I_{shunt}
|
||||
|
||||
A functional block diagram of this is also given by the datasheet:
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/ina226_functional.png
|
||||
:width: 60%
|
||||
:alt: Functional block diagram, INA226
|
||||
:align: center
|
||||
|
||||
Acquisition and Processing: Functional Block Diagram
|
||||
(source: `Texas Instruments INA226`_)
|
||||
|
||||
In the end, there are therefore 3 channels (bus voltage, shunt voltage and
|
||||
timestamps) that are necessary to figure out the load power consumption, while
|
||||
the others are being provided for convenience *e.g.* in case the rest of the
|
||||
hardware does not have the computing power to make the computation.
|
||||
|
||||
|
||||
Sampling Frequency Issues
|
||||
"""""""""""""""""""""""""
|
||||
|
||||
It looks like the INA226-ACME-BBB setup has a bottleneck preventing the
|
||||
sampling frequency to go higher than ~1.4 kHz (the maximal theoretical sampling
|
||||
frequency is ~3.6 kHz). We know that this issue is not internal to the ADC
|
||||
itself (inside of the INA226) because modifying the integration time affects
|
||||
the output signal even when the sampling frequency is capped (as shown above)
|
||||
but it may come from anywhere after that.
|
||||
|
||||
Because of this, there is no point in using a (theoretical) sampling frequency
|
||||
that is larger than 1.4 kHz. But it is important to note that the ACME will
|
||||
still report the theoretical sampling rate (probably computed with the formula
|
||||
given above) through :attr:`BaylibreAcmeInstrument.sample_rate_hz` and
|
||||
:attr:`IIOINA226Instrument.sample_rate_hz` even if it differs from the actual
|
||||
sampling rate.
|
||||
|
||||
Note that, even though this is obvious for the theoretical sampling rate, the
|
||||
specific values of the bus and shunt integration times do not seem to have an
|
||||
influence on the measured sampling rate; only their sum matters. This further
|
||||
points toward a data-processing bottleneck rather than a hardware bug in the
|
||||
acquisition device.
|
||||
|
||||
The following chart compares the evolution of the measured sampling rate with
|
||||
the expected one as we modify it through :math:`T_{shunt}`, :math:`T_{bus}` and
|
||||
:math:`N`:
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/bottleneck.png
|
||||
:alt: Sampling frequency does not go higher than 1.4 kHz
|
||||
:align: center
|
||||
|
||||
Theoretical vs measured sampling rates
|
||||
|
||||
|
||||
Furthermore, because the transactions are done through a buffer (see next
|
||||
section), if the sampling frequency is too low, the connection may time-out
|
||||
before the buffer is full and ready to be sent. This may be fixed in an
|
||||
upcoming release.
|
||||
|
||||
Buffer-based transactions
|
||||
"""""""""""""""""""""""""
|
||||
|
||||
Samples made available by the INA226 are retrieved by the BBB and stored in a
|
||||
buffer which is sent back to the host once it is full (see
|
||||
``buffer_samples_count`` in :meth:`BaylibreAcmeInstrument.setup` for setting
|
||||
its size). Therefore, the larger the buffer is, the longer it takes to be
|
||||
transmitted back but the less often it has to be transmitted. To illustrate
|
||||
this, consider the following graphs showing the time difference between
|
||||
successive samples in a retrieved signal when the size of the buffer changes:
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/buffer.png
|
||||
:alt: Buffer size impact on the sampled signal
|
||||
:align: center
|
||||
|
||||
Impact of the buffer size on the sampling regularity
|
||||
|
||||
devlib API
|
||||
^^^^^^^^^^
|
||||
|
||||
ACME Cape + BBB (IIO Context)
|
||||
"""""""""""""""""""""""""""""
|
||||
|
||||
devlib provides wrapper classes for all the IIO connections to an IIO context
|
||||
given by `libiio (the Linux IIO interface)`_ however only the network-based one
|
||||
has been tested. For the other classes, please refer to the official IIO
|
||||
documentation for the meaning of their constructor parameters.
|
||||
|
||||
.. class:: BaylibreAcmeInstrument(target=None, iio_context=None, use_base_iio_context=False, probe_names=None)
|
||||
|
||||
Base class wrapper for the ACME instrument which itself is a wrapper for the
|
||||
IIO context base class. This class wraps around the passed ``iio_context``;
|
||||
if ``use_base_iio_context`` is ``True``, ``iio_context`` is first passed to
|
||||
the :class:`iio.Context` base class (see its documentation for how this
|
||||
parameter is then used), else ``iio_context`` is expected to be a valid
|
||||
instance of :class:`iio.Context`.
|
||||
|
||||
``probe_names`` is expected to be a string or list of strings; if passed,
|
||||
the probes in the instance are named according to it in the order in which
|
||||
they are discovered (see previous comment about probe discovery and
|
||||
:attr:`BaylibreAcmeInstrument.probes`). There should be as many
|
||||
``probe_names`` as there are probes connected to the ACME. By default, the
|
||||
probes keep their IIO names.
|
||||
|
||||
To ensure that the setup is reliable, ``devlib`` requires minimal versions
|
||||
for ``iio``, the IIO drivers and the ACME BBB SD image.
|
||||
|
||||
.. class:: BaylibreAcmeNetworkInstrument(target=None, hostname=None, probe_names=None)
|
||||
|
||||
Child class of :class:`BaylibreAcmeInstrument` for Ethernet-based IIO
|
||||
communication. The ``hostname`` should be the IP address or network name of
|
||||
the BBB. If it is ``None``, the ``IIOD_REMOTE`` environment variable will be
|
||||
used as the hostname. If that environment variable is empty, the server will
|
||||
be discovered using ZeroConf. If that environment variable is not set, a
|
||||
local context is created.
|
||||
|
||||
.. class:: BaylibreAcmeXMLInstrument(target=None, xmlfile=None, probe_names=None)
|
||||
|
||||
Child class of :class:`BaylibreAcmeInstrument` using the XML backend of the
|
||||
IIO library and building an IIO context from the provided ``xmlfile`` (a
|
||||
string giving the path to the file is expected).
|
||||
|
||||
.. class:: BaylibreAcmeLocalInstrument(target=None, probe_names=None)
|
||||
|
||||
Child class of :class:`BaylibreAcmeInstrument` using the Local IIO backend.
|
||||
|
||||
.. attribute:: BaylibreAcmeInstrument.mode
|
||||
|
||||
The collection mode for the ACME is ``CONTINUOUS``.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.setup(shunt_resistor, integration_time_bus, integration_time_shunt, oversampling_ratio, buffer_samples_count=None, buffer_is_circular=False, absolute_timestamps=False, high_resolution=True)
|
||||
|
||||
The ``shunt_resistor`` (:math:`R_{shunt}` [:math:`\mu\Omega`]),
|
||||
``integration_time_bus`` (:math:`T_{bus}` [s]), ``integration_time_shunt``
|
||||
(:math:`T_{shunt}` [s]) and ``oversampling_ratio`` (:math:`N`) are copied
|
||||
into on-board registers inside of the INA226 to be used as described above.
|
||||
Please note that there exists a limited set of accepted values for these
|
||||
parameters; for the integration times, refer to
|
||||
``IIOINA226Instrument.INTEGRATION_TIMES_AVAILABLE`` and for the
|
||||
``oversampling_ratio``, refer to
|
||||
``IIOINA226Instrument.OVERSAMPLING_RATIOS_AVAILABLE``. If all probes share
|
||||
the same value for these attributes, this class provides
|
||||
:attr:`BaylibreAcmeInstrument.OVERSAMPLING_RATIOS_AVAILABLE` and
|
||||
:attr:`BaylibreAcmeInstrument.INTEGRATION_TIMES_AVAILABLE`.
|
||||
|
||||
The ``buffer_samples_count`` is the size of the IIO buffer expressed **in
|
||||
samples**; this is independent of the number of active channels! By default,
|
||||
if ``buffer_samples_count`` is not passed, the IIO buffer of size
|
||||
:attr:`IIOINA226Instrument.sample_rate_hz` is created meaning that a buffer
|
||||
transfer happens roughly every second.
|
||||
|
||||
If ``absolute_timestamps`` is ``False``, the first sample from the
|
||||
``timestamps`` channel is substracted from all the following samples of this
|
||||
channel, effectively making its signal start at 0.
|
||||
|
||||
``high_resolution`` is used to enable a mode where power and current are
|
||||
computed offline on the host machine running ``devlib``: even if the user
|
||||
asks for power or current channels, they are not enabled in hardware
|
||||
(INA226) and instead the necessary voltage signal(s) are enabled to allow
|
||||
the computation of the desired signals using the FPU of the host (which is
|
||||
very likely to be much more accurate than the fixed-point 16-bit unit of the
|
||||
INA226).
|
||||
|
||||
A circular buffer can be used by setting ``buffer_is_circular`` to ``True``
|
||||
(directly passed to :class:`iio.Buffer`).
|
||||
|
||||
Each one of the arguments of this method can either be a single value which
|
||||
will be used for all probes or a list of values giving the corresponding
|
||||
setting for each probe (in the order of ``probe_names`` passed to the
|
||||
constructor) with the exception of ``absolute_timestamps`` (as all signals
|
||||
are resampled onto a common time signal) which, if passed as an array, will
|
||||
be ``True`` only if all of its elements are ``True``.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.reset(sites=None, kinds=None, channels=None)
|
||||
|
||||
:meth:`BaylibreAcmeInstrument.setup` should **always** be called before
|
||||
calling this method so that the hardware is correctly configured. Once this
|
||||
method has been called, :meth:`BaylibreAcmeInstrument.setup` can only be
|
||||
called again once :meth:`BaylibreAcmeInstrument.teardown` has been called.
|
||||
|
||||
This method inherits from :meth:`Instrument.reset`; call
|
||||
:meth:`list_channels` for a list of available channels from a given
|
||||
instance.
|
||||
|
||||
Please note that the size of the transaction buffer is proportional to the
|
||||
number of active channels (for a fixed ``buffer_samples_count``). Therefore,
|
||||
limiting the number of active channels allows to limit the required
|
||||
bandwidth. ``high_resolution`` in :meth:`BaylibreAcmeInstrument.setup`
|
||||
limits the number of active channels to the minimum required.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.start()
|
||||
|
||||
:meth:`BaylibreAcmeInstrument.reset` should **always** be called before
|
||||
calling this method so that the right channels are active,
|
||||
:meth:`BaylibreAcmeInstrument.stop` should **always** be called after
|
||||
calling this method and no other method of the object should be called
|
||||
in-between.
|
||||
|
||||
This method starts the sampling process of the active channels. The samples
|
||||
are stored but are not available until :meth:`BaylibreAcmeInstrument.stop`
|
||||
has been called.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.stop()
|
||||
|
||||
:meth:`BaylibreAcmeInstrument.start` should **always** be called before
|
||||
calling this method so that samples are being captured.
|
||||
|
||||
This method stops the sampling process of the active channels and retrieves
|
||||
and pre-processes the samples. Once this function has been called, the
|
||||
samples are made available through :meth:`BaylibreAcmeInstrument.get_data`.
|
||||
Note that it is safe to call :meth:`BaylibreAcmeInstrument.start` after this
|
||||
method returns but this will discard the data previously acquired.
|
||||
|
||||
When this method returns, It is guaranteed that the content of at least one
|
||||
IIO buffer will have been captured.
|
||||
|
||||
If different sampling frequencies were used for the different probes, the
|
||||
signals are resampled to share the time signal with the highest sampling
|
||||
frequency.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.teardown()
|
||||
|
||||
This method can be called at any point (unless otherwise specified *e.g.*
|
||||
:meth:`BaylibreAcmeInstrument.start`) to deactive any active probe once
|
||||
:meth:`BaylibreAcmeInstrument.reset` has been called. This method does not
|
||||
affect already captured samples.
|
||||
|
||||
The following graph gives a summary of the allowed calling sequence(s) where
|
||||
each edge means "can be called directly after":
|
||||
|
||||
.. graphviz::
|
||||
|
||||
digraph acme_calls {
|
||||
rankdir = LR
|
||||
bgcolor = transparent
|
||||
|
||||
__init__ -> setup -> reset -> start -> stop -> teardown
|
||||
|
||||
teardown:sw -> setup [style=dashed]
|
||||
teardown -> reset [style=dashed]
|
||||
|
||||
stop -> reset [style=dashed]
|
||||
stop:nw -> start [style=dashed]
|
||||
|
||||
reset -> teardown [style=dashed]
|
||||
}
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.get_data(outfile=None)
|
||||
|
||||
Inherited from :meth:`Instrument.get_data`. If ``outfile`` is ``None``
|
||||
(default), the samples are returned as a `pandas.DataFrame` with the
|
||||
channels as columns. Else, it behaves like the parent class, returning a
|
||||
``MeasurementCsv``.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.add_channel()
|
||||
|
||||
Should not be used as new channels are discovered through the IIO context.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.list_channels()
|
||||
|
||||
Inherited from :meth:`Instrument.list_channels`.
|
||||
|
||||
.. attribute:: BaylibreAcmeInstrument.sample_rate_hz
|
||||
.. attribute:: BaylibreAcmeInstrument.OVERSAMPLING_RATIOS_AVAILABLE
|
||||
.. attribute:: BaylibreAcmeInstrument.INTEGRATION_TIMES_AVAILABLE
|
||||
|
||||
These attributes return the corresponding attributes of the probes if they
|
||||
all share the same value (and are therefore provided to avoid reading from a
|
||||
single probe and expecting the others to share this value). They should be
|
||||
used whenever the assumption that all probes share the same value for the
|
||||
accessed attribute is made. For this reason, an exception is raised if it is
|
||||
not the case.
|
||||
|
||||
If probes are active (*i.e.* :meth:`BaylibreAcmeInstrument.reset` has been
|
||||
called), only these are read for the value of the attribute (as others have
|
||||
been tagged to be ignored). If not, all probes are used.
|
||||
|
||||
.. attribute:: BaylibreAcmeInstrument.probes
|
||||
|
||||
Dictionary of :class:`IIOINA226Instrument` instances representing the probes
|
||||
connected to the ACME. If provided to the constructor, the keys are the
|
||||
``probe_names`` that were passed.
|
||||
|
||||
ACME Probes (IIO Devices)
|
||||
"""""""""""""""""""""""""
|
||||
|
||||
The following class is not supposed to be instantiated by the user code: the
|
||||
API is provided as the ACME probes can be accessed through the
|
||||
:attr:`BaylibreAcmeInstrument.probes` attribute.
|
||||
|
||||
.. class:: IIOINA226Instrument(iio_device)
|
||||
|
||||
This class is a wrapper for the :class:`iio.Device` class and takes a valid
|
||||
instance as ``iio_device``. It is not supposed to be instantiated by the
|
||||
user and its partial documentation is provided for read-access only.
|
||||
|
||||
.. attribute:: IIOINA226Instrument.shunt_resistor
|
||||
.. attribute:: IIOINA226Instrument.sample_rate_hz
|
||||
.. attribute:: IIOINA226Instrument.oversampling_ratio
|
||||
.. attribute:: IIOINA226Instrument.integration_time_shunt
|
||||
.. attribute:: IIOINA226Instrument.integration_time_bus
|
||||
.. attribute:: IIOINA226Instrument.OVERSAMPLING_RATIOS_AVAILABLE
|
||||
.. attribute:: IIOINA226Instrument.INTEGRATION_TIMES_AVAILABLE
|
||||
|
||||
These attributes are provided *for reference* and should not be assigned to
|
||||
but can be used to make the user code more readable, if needed. Please note
|
||||
that, as reading these attributes reads the underlying value from the
|
||||
hardware, they should not be read when the ACME is active *i.e* when
|
||||
:meth:`BaylibreAcmeInstrument.setup` has been called without calling
|
||||
:meth:`BaylibreAcmeInstrument.teardown`.
|
||||
|
||||
|
||||
Examples
|
||||
""""""""
|
||||
|
||||
The following example shows a basic use of an ACME at IP address
|
||||
``ACME_IP_ADDR`` with 2 probes connected, capturing all the channels during
|
||||
(roughly) 10 seconds at a sampling rate of 613 Hz and outputing the
|
||||
measurements to the CSV file ``acme.csv``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import time
|
||||
import devlib
|
||||
|
||||
acme = devlib.BaylibreAcmeNetworkInstrument(hostname=ACME_IP_ADDR,
|
||||
probe_names=['battery', 'usb'])
|
||||
|
||||
int_times = acme.INTEGRATION_TIMES_AVAILABLE
|
||||
ratios = acme.OVERSAMPLING_RATIOS_AVAILABLE
|
||||
|
||||
acme.setup(shunt_resistor=20000,
|
||||
integration_time_bus=int_times[1],
|
||||
integration_time_shunt=int_times[1],
|
||||
oversampling_ratio=ratios[1])
|
||||
|
||||
acme.reset()
|
||||
acme.start()
|
||||
time.sleep(10)
|
||||
acme.stop()
|
||||
acme.get_data('acme.csv')
|
||||
acme.teardown()
|
||||
|
||||
It is common to have different resistances for different probe shunt resistors.
|
||||
Furthermore, we may want to have different sampling frequencies for different
|
||||
probes (*e.g.* if it is known that the USB voltage changes rather slowly).
|
||||
Finally, it is possible to set the integration times for the bus and shunt
|
||||
voltages of a same probe to different values. The following call to
|
||||
:meth:`BaylibreAcmeInstrument.setup` illustrates these:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
acme.setup(shunt_resistor=[20000, 10000],
|
||||
integration_time_bus=[int_times[2], int_times[3]],
|
||||
integration_time_shunt=[int_times[3], int_times[4]],
|
||||
oversampling_ratio=[ratios[0], ratios[1]])
|
||||
|
||||
for n, p in acme.probes.iteritems():
|
||||
print('{}:'.format(n))
|
||||
print(' T_bus = {} s'.format(p.integration_time_bus))
|
||||
print(' T_shn = {} s'.format(p.integration_time_shunt))
|
||||
print(' N = {}'.format(p.oversampling_ratio))
|
||||
print(' freq = {} Hz'.format(p.sample_rate_hz))
|
||||
|
||||
# Output:
|
||||
#
|
||||
# battery:
|
||||
# T_bus = 0.000332 s
|
||||
# T_shn = 0.000588 s
|
||||
# N = 1
|
||||
# freq = 1087 Hz
|
||||
# usb:
|
||||
# T_bus = 0.000588 s
|
||||
# T_shn = 0.0011 s
|
||||
# N = 4
|
||||
# freq = 148 Hz
|
||||
|
||||
Please keep in mind that calling ``acme.get_data('acme.csv')`` after capturing
|
||||
samples with this setup will output signals with the same sampling frequency
|
||||
(the highest one among the sampling frequencies) as the signals are resampled
|
||||
to output a single time signal.
|
||||
|
||||
.. rubric:: Footnotes
|
||||
|
||||
.. [#acme_probe_variants] There exist different variants of the ACME probe (USB, Jack, shunt resistor) but they all use the same probing hardware (the TI INA226) and don't differ from the point of view of the software stack (at any level, including devlib, the highest one)
|
||||
|
||||
.. [#acme_name_conflicts] Be careful that in cases where multiple ACME boards are being used, it may be required to manually handle name conflicts
|
||||
|
@@ -106,11 +106,20 @@ policies (governors). The ``devlib`` module exposes the following interface
|
||||
target.cpufreq.set_min_frequency(cpu, frequency[, exact=True])
|
||||
target.cpufreq.set_max_frequency(cpu, frequency[, exact=True])
|
||||
|
||||
Get and set min and max frequencies on the specified CPU. "set" functions are
|
||||
available with all governors other than ``userspace``.
|
||||
Get the currently set, or set new min and max frequencies for the specified
|
||||
CPU. "set" functions are available with all governors other than
|
||||
``userspace``.
|
||||
|
||||
:param cpu: The cpu; could be a numeric or the corresponding string (e.g.
|
||||
``1`` or ``"cpu1"``).
|
||||
|
||||
.. method:: target.cpufreq.get_min_available_frequency(cpu)
|
||||
target.cpufreq.get_max_available_frequency(cpu)
|
||||
|
||||
Retrieve the min or max DVFS frequency that is supported (as opposed to
|
||||
currently enforced) for a given CPU. Returns an int or None if could not be
|
||||
determined.
|
||||
|
||||
:param frequency: Frequency to set.
|
||||
|
||||
.. method:: target.cpufreq.get_frequency(cpu)
|
||||
@@ -126,7 +135,7 @@ policies (governors). The ``devlib`` module exposes the following interface
|
||||
cpuidle
|
||||
-------
|
||||
|
||||
``cpufreq`` is the kernel subsystem for managing CPU low power (idle) states.
|
||||
``cpuidle`` is the kernel subsystem for managing CPU low power (idle) states.
|
||||
|
||||
.. method:: target.cpuidle.get_driver()
|
||||
|
||||
@@ -182,7 +191,7 @@ Every module (ultimately) derives from :class:`Module` class. A module must
|
||||
define the following class attributes:
|
||||
|
||||
:name: A unique name for the module. This cannot clash with any of the existing
|
||||
names and must be a valid Python identifier, but is otherwise free-from.
|
||||
names and must be a valid Python identifier, but is otherwise free-form.
|
||||
:kind: This identifies the type of functionality a module implements, which in
|
||||
turn determines the interface implemented by the module (all modules of
|
||||
the same kind must expose a consistent interface). This must be a valid
|
||||
@@ -203,6 +212,9 @@ define the following class attributes:
|
||||
:early: The module will be installed when a :class:`Target` is first
|
||||
created. This should be used for modules that do not rely on a
|
||||
live connection to the target.
|
||||
:setup: The module will be installed after initial setup of the device
|
||||
has been performed. This allows the module to utilize assets
|
||||
deployed during the setup stage for example 'Busybox'.
|
||||
|
||||
Additionally, a module must implement a static (or class) method :func:`probe`:
|
||||
|
||||
|
102
doc/overview.rst
102
doc/overview.rst
@@ -2,10 +2,12 @@ Overview
|
||||
========
|
||||
|
||||
A :class:`Target` instance serves as the main interface to the target device.
|
||||
There currently three target interfaces:
|
||||
There are currently four target interfaces:
|
||||
|
||||
- :class:`LinuxTarget` for interacting with Linux devices over SSH.
|
||||
- :class:`AndroidTraget` for interacting with Android devices over adb.
|
||||
- :class:`AndroidTarget` for interacting with Android devices over adb.
|
||||
- :class:`ChromeOsTarget`: for interacting with ChromeOS devices over SSH, and
|
||||
their Android containers over adb.
|
||||
- :class:`LocalLinuxTarget`: for interacting with the local Linux host.
|
||||
|
||||
They all work in more-or-less the same way, with the major difference being in
|
||||
@@ -37,6 +39,7 @@ instantiating each of the three target types.
|
||||
'password': 'sekrit',
|
||||
# or
|
||||
'keyfile': '/home/me/.ssh/id_rsa'})
|
||||
# ChromeOsTarget connection is performed in the same way as LinuxTarget
|
||||
|
||||
# For an Android target, you will need to pass the device name as reported
|
||||
# by "adb devices". If there is only one device visible to adb, you can omit
|
||||
@@ -74,13 +77,19 @@ This sets up the target for ``devlib`` interaction. This includes creating
|
||||
working directories, deploying busybox, etc. It's usually enough to do this once
|
||||
for a new device, as the changes this makes will persist across reboots.
|
||||
However, there is no issue with calling this multiple times, so, to be on the
|
||||
safe site, it's a good idea to call this once at the beginning of your scripts.
|
||||
safe side, it's a good idea to call this once at the beginning of your scripts.
|
||||
|
||||
Command Execution
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are several ways to execute a command on the target. In each case, a
|
||||
:class:`TargetError` will be raised if something goes wrong. In very case, it is
|
||||
There are several ways to execute a command on the target. In each case, an
|
||||
instance of a subclass of :class:`TargetError` will be raised if something goes
|
||||
wrong. When a transient error is encountered such as the loss of the network
|
||||
connectivity, it will raise a :class:`TargetTransientError`. When the command
|
||||
fails, it will raise a :class:`TargetStableError` unless the
|
||||
``will_succeed=True`` parameter is specified, in which case a
|
||||
:class:`TargetTransientError` will be raised since it is assumed that the
|
||||
command cannot fail unless there is an environment issue. In each case, it is
|
||||
also possible to specify ``as_root=True`` if the specified command should be
|
||||
executed as root.
|
||||
|
||||
@@ -154,7 +163,7 @@ Process Control
|
||||
# kill all running instances of a process.
|
||||
t.killall('badexe', signal=signal.SIGKILL)
|
||||
|
||||
# List processes running on the target. This retruns a list of parsed
|
||||
# List processes running on the target. This returns a list of parsed
|
||||
# PsEntry records.
|
||||
entries = t.ps()
|
||||
# e.g. print virtual memory sizes of all running sshd processes:
|
||||
@@ -173,7 +182,7 @@ Super User Privileges
|
||||
|
||||
It is not necessary for the account logged in on the target to have super user
|
||||
privileges, however the functionality will obviously be diminished, if that is
|
||||
not the case. ``devilib`` will determine if the logged in user has root
|
||||
not the case. ``devlib`` will determine if the logged in user has root
|
||||
privileges and the correct way to invoke it. You should avoid including "sudo"
|
||||
directly in your commands, instead, specify ``as_root=True`` where needed. This
|
||||
will make your scripts portable across multiple devices and OS's.
|
||||
@@ -213,6 +222,66 @@ executables_directory
|
||||
t.push('/local/path/to/assets.tar.gz', t.get_workpath('assets.tar.gz'))
|
||||
|
||||
|
||||
Exceptions Handling
|
||||
-------------------
|
||||
|
||||
Devlib custom exceptions all derive from :class:`DevlibError`. Some exceptions
|
||||
are further categorized into :class:`DevlibTransientError` and
|
||||
:class:`DevlibStableError`. Transient errors are raised when there is an issue
|
||||
in the environment that can happen randomly such as the loss of network
|
||||
connectivity. Even a properly configured environment can be subject to such
|
||||
transient errors. Stable errors are related to either programming errors or
|
||||
configuration issues in the broad sense. This distinction allows quicker
|
||||
analysis of failures, since most transient errors can be ignored unless they
|
||||
happen at an alarming rate. :class:`DevlibTransientError` usually propagates up
|
||||
to the caller of devlib APIs, since it means that an operation could not
|
||||
complete. Retrying it or bailing out is therefore a responsability of the caller.
|
||||
|
||||
The hierarchy is as follows:
|
||||
|
||||
- :class:`DevlibError`
|
||||
|
||||
- :class:`WorkerThreadError`
|
||||
- :class:`HostError`
|
||||
- :class:`TargetError`
|
||||
|
||||
- :class:`TargetStableError`
|
||||
- :class:`TargetTransientError`
|
||||
- :class:`TargetNotRespondingError`
|
||||
|
||||
- :class:`DevlibStableError`
|
||||
|
||||
- :class:`TargetStableError`
|
||||
|
||||
- :class:`DevlibTransientError`
|
||||
|
||||
- :class:`TimeoutError`
|
||||
- :class:`TargetTransientError`
|
||||
- :class:`TargetNotRespondingError`
|
||||
|
||||
|
||||
Extending devlib
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
New devlib code is likely to face the decision of raising a transient or stable
|
||||
error. When it is unclear which one should be used, it can generally be assumed
|
||||
that the system is properly configured and therefore, the error is linked to an
|
||||
environment transient failure. If a function is somehow probing a property of a
|
||||
system in the broad meaning, it can use a stable error as a way to signal a
|
||||
non-expected value of that property even if it can also face transient errors.
|
||||
An example are the various ``execute()`` methods where the command can generally
|
||||
not be assumed to be supposed to succeed by devlib. Their failure does not
|
||||
usually come from an environment random issue, but for example a permission
|
||||
error. The user can use such expected failure to probe the system. Another
|
||||
example is boot completion detection on Android: boot failure cannot be
|
||||
distinguished from a timeout which is too small. A non-transient exception is
|
||||
still raised, since assuming the timeout comes from a network failure would
|
||||
either make the function useless, or force the calling code to handle a
|
||||
transient exception under normal operation. The calling code would potentially
|
||||
wrongly catch transient exceptions raised by other functions as well and attach
|
||||
a wrong meaning to them.
|
||||
|
||||
|
||||
Modules
|
||||
-------
|
||||
|
||||
@@ -254,17 +323,14 @@ You can collected traces (currently, just ftrace) using
|
||||
# the buffer size to be used.
|
||||
trace = FtraceCollector(t, events=['power*'], buffer_size=40000)
|
||||
|
||||
# clear ftrace buffer
|
||||
trace.reset()
|
||||
|
||||
# start trace collection
|
||||
trace.start()
|
||||
|
||||
# Perform the operations you want to trace here...
|
||||
import time; time.sleep(5)
|
||||
|
||||
# stop trace collection
|
||||
trace.stop()
|
||||
# As a context manager, clear ftrace buffer using trace.reset(),
|
||||
# start trace collection using trace.start(), then stop it Using
|
||||
# trace.stop(). Using a context manager brings the guarantee that
|
||||
# tracing will stop even if an exception occurs, including
|
||||
# KeyboardInterrupt (ctr-C) and SystemExit (sys.exit)
|
||||
with trace:
|
||||
# Perform the operations you want to trace here...
|
||||
import time; time.sleep(5)
|
||||
|
||||
# extract the trace file from the target into a local file
|
||||
trace.get_trace('/tmp/trace.bin')
|
||||
|
@@ -18,17 +18,17 @@ it was not specified explicitly by the user.
|
||||
:param core_names: A list of CPU core names in the order they appear
|
||||
registered with the OS. If they are not specified,
|
||||
they will be queried at run time.
|
||||
:param core_clusters: Alist with cluster ids of each core (starting with
|
||||
:param core_clusters: A list with cluster ids of each core (starting with
|
||||
0). If this is not specified, clusters will be
|
||||
inferred from core names (cores with the same name are
|
||||
assumed to be in a cluster).
|
||||
:param big_core: The name of the big core in a big.LITTLE system. If this is
|
||||
not specified it will be inferred (on systems with exactly
|
||||
two clasters).
|
||||
two clusters).
|
||||
:param model: Model name of the hardware system. If this is not specified it
|
||||
will be queried at run time.
|
||||
:param modules: Modules with additional functionality supported by the
|
||||
platfrom (e.g. for handling flashing, rebooting, etc). These
|
||||
platform (e.g. for handling flashing, rebooting, etc). These
|
||||
would be added to the Target's modules. (See :ref:`modules`\ ).
|
||||
|
||||
|
||||
@@ -38,13 +38,13 @@ Versatile Express
|
||||
The generic platform may be extended to support hardware- or
|
||||
infrastructure-specific functionality. Platforms exist for ARM
|
||||
VersatileExpress-based :class:`Juno` and :class:`TC2` development boards. In
|
||||
addition to the standard :class:`Platform` parameters above, these platfroms
|
||||
addition to the standard :class:`Platform` parameters above, these platforms
|
||||
support additional configuration:
|
||||
|
||||
|
||||
.. class:: VersatileExpressPlatform
|
||||
|
||||
Normally, this would be instatiated via one of its derived classes
|
||||
Normally, this would be instantiated via one of its derived classes
|
||||
(:class:`Juno` or :class:`TC2`) that set appropriate defaults for some of
|
||||
the parameters.
|
||||
|
||||
@@ -63,7 +63,7 @@ support additional configuration:
|
||||
mounted on the host system.
|
||||
:param hard_reset_method: Specifies the method for hard-resetting the devices
|
||||
(e.g. if it becomes unresponsive and normal reboot
|
||||
method doesn not work). Currently supported methods
|
||||
method doesn't not work). Currently supported methods
|
||||
are:
|
||||
|
||||
:dtr: reboot by toggling DTR line on the serial
|
||||
@@ -80,7 +80,7 @@ support additional configuration:
|
||||
The following values are currently supported:
|
||||
|
||||
:uefi: Boot via UEFI menu, by selecting the entry
|
||||
specified by ``uefi_entry`` paramter. If this
|
||||
specified by ``uefi_entry`` parameter. If this
|
||||
entry does not exist, it will be automatically
|
||||
created based on values provided for ``image``,
|
||||
``initrd``, ``fdt``, and ``bootargs`` parameters.
|
||||
|
262
doc/target.rst
262
doc/target.rst
@@ -2,7 +2,7 @@ Target
|
||||
======
|
||||
|
||||
|
||||
.. class:: Target(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT)
|
||||
.. class:: Target(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=None)
|
||||
|
||||
:class:`Target` is the primary interface to the remote device. All interactions
|
||||
with the device are performed via a :class:`Target` instance, either
|
||||
@@ -38,7 +38,7 @@ Target
|
||||
by the connection's account). This location will be created,
|
||||
if necessary, during ``setup()``.
|
||||
|
||||
This location does *not* to be same as the system's executables
|
||||
This location does *not* need to be same as the system's executables
|
||||
location. In fact, to prevent devlib from overwriting system's defaults,
|
||||
it better if this is a separate location, if possible.
|
||||
|
||||
@@ -68,6 +68,9 @@ Target
|
||||
prompted on the target. This may be used by some modules that establish
|
||||
auxiliary connections to a target over UART.
|
||||
|
||||
:param conn_cls: This is the type of connection that will be used to communicate
|
||||
with the device.
|
||||
|
||||
.. attribute:: Target.core_names
|
||||
|
||||
This is a list containing names of CPU cores on the target, in the order in
|
||||
@@ -83,12 +86,12 @@ Target
|
||||
|
||||
.. attribute:: Target.big_core
|
||||
|
||||
This is the name of the cores that the "big"s in an ARM big.LITTLE
|
||||
This is the name of the cores that are the "big"s in an ARM big.LITTLE
|
||||
configuration. This is obtained via the underlying :class:`Platform`.
|
||||
|
||||
.. attribute:: Target.little_core
|
||||
|
||||
This is the name of the cores that the "little"s in an ARM big.LITTLE
|
||||
This is the name of the cores that are the "little"s in an ARM big.LITTLE
|
||||
configuration. This is obtained via the underlying :class:`Platform`.
|
||||
|
||||
.. attribute:: Target.is_connected
|
||||
@@ -117,6 +120,16 @@ Target
|
||||
This is a dict that contains a mapping of OS version elements to their
|
||||
values. This mapping is OS-specific.
|
||||
|
||||
.. attribute:: Target.system_id
|
||||
|
||||
A unique identifier for the system running on the target. This identifier is
|
||||
intended to be uninque for the combination of hardware, kernel, and file
|
||||
system.
|
||||
|
||||
.. attribute:: Target.model
|
||||
|
||||
The model name/number of the target device.
|
||||
|
||||
.. attribute:: Target.cpuinfo
|
||||
|
||||
This is a :class:`Cpuinfo` instance which contains parsed contents of
|
||||
@@ -199,25 +212,27 @@ Target
|
||||
operations during reboot process to detect if the reboot has failed and
|
||||
the device has hung.
|
||||
|
||||
.. method:: Target.push(source, dest [, timeout])
|
||||
.. method:: Target.push(source, dest [,as_root , timeout])
|
||||
|
||||
Transfer a file from the host machine to the target device.
|
||||
|
||||
:param source: path of to the file on the host
|
||||
:param dest: path of to the file on the target
|
||||
:param as_root: whether root is required. Defaults to false.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
|
||||
.. method:: Target.pull(source, dest [, timeout])
|
||||
.. method:: Target.pull(source, dest [, as_root, timeout])
|
||||
|
||||
Transfer a file from the target device to the host machine.
|
||||
|
||||
:param source: path of to the file on the target
|
||||
:param dest: path of to the file on the host
|
||||
:param as_root: whether root is required. Defaults to false.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
|
||||
.. method:: Target.execute(command [, timeout [, check_exit_code [, as_root]]])
|
||||
.. method:: Target.execute(command [, timeout [, check_exit_code [, as_root [, strip_colors [, will_succeed]]]]])
|
||||
|
||||
Execute the specified command on the target device and return its output.
|
||||
|
||||
@@ -230,6 +245,13 @@ Target
|
||||
raised if it is not ``0``.
|
||||
:param as_root: The command will be executed as root. This will fail on
|
||||
unrooted targets.
|
||||
:param strip_colours: The command output will have colour encodings and
|
||||
most ANSI escape sequences striped out before returning.
|
||||
:param will_succeed: The command is assumed to always succeed, unless there is
|
||||
an issue in the environment like the loss of network connectivity. That
|
||||
will make the method always raise an instance of a subclass of
|
||||
:class:`DevlibTransientError` when the command fails, instead of a
|
||||
:class:`DevlibStableError`.
|
||||
|
||||
.. method:: Target.background(command [, stdout [, stderr [, as_root]]])
|
||||
|
||||
@@ -265,6 +287,24 @@ Target
|
||||
:param timeout: If this is specified and invocation does not terminate within this number
|
||||
of seconds, an exception will be raised.
|
||||
|
||||
.. method:: Target.background_invoke(binary [, args [, in_directory [, on_cpus [, as_root ]]]])
|
||||
|
||||
Execute the specified binary on target (must already be installed) as a background
|
||||
task, under the specified conditions and return the :class:`subprocess.Popen`
|
||||
instance for the command.
|
||||
|
||||
:param binary: binary to execute. Must be present and executable on the device.
|
||||
:param args: arguments to be passed to the binary. The can be either a list or
|
||||
a string.
|
||||
:param in_directory: execute the binary in the specified directory. This must
|
||||
be an absolute path.
|
||||
:param on_cpus: taskset the binary to these CPUs. This may be a single ``int`` (in which
|
||||
case, it will be interpreted as the mask), a list of ``ints``, in which
|
||||
case this will be interpreted as the list of cpus, or string, which
|
||||
will be interpreted as a comma-separated list of cpu ranges, e.g.
|
||||
``"0,4-7"``.
|
||||
:param as_root: Specify whether the command should be run as root
|
||||
|
||||
.. method:: Target.kick_off(command [, as_root])
|
||||
|
||||
Kick off the specified command on the target and return immediately. Unlike
|
||||
@@ -288,11 +328,11 @@ Target
|
||||
|
||||
.. method:: Target.read_int(self, path)
|
||||
|
||||
Equivalent to ``Target.read_value(path, kind=devlab.utils.types.integer)``
|
||||
Equivalent to ``Target.read_value(path, kind=devlib.utils.types.integer)``
|
||||
|
||||
.. method:: Target.read_bool(self, path)
|
||||
|
||||
Equivalent to ``Target.read_value(path, kind=devlab.utils.types.boolean)``
|
||||
Equivalent to ``Target.read_value(path, kind=devlib.utils.types.boolean)``
|
||||
|
||||
.. method:: Target.write_value(path, value [, verify])
|
||||
|
||||
@@ -306,6 +346,41 @@ Target
|
||||
some sysfs entries silently failing to set the written value without
|
||||
returning an error code.
|
||||
|
||||
.. method:: Target.read_tree_values(path, depth=1, dictcls=dict, [, tar [, decode_unicode [, strip_null_char ]]]):
|
||||
|
||||
Read values of all sysfs (or similar) file nodes under ``path``, traversing
|
||||
up to the maximum depth ``depth``.
|
||||
|
||||
Returns a nested structure of dict-like objects (``dict``\ s by default) that
|
||||
follows the structure of the scanned sub-directory tree. The top-level entry
|
||||
has a single item who's key is ``path``. If ``path`` points to a single file,
|
||||
the value of the entry is the value ready from that file node. Otherwise, the
|
||||
value is a dict-line object with a key for every entry under ``path``
|
||||
mapping onto its value or further dict-like objects as appropriate.
|
||||
|
||||
Although the default behaviour should suit most users, it is possible to
|
||||
encounter issues when reading binary files, or files with colons in their
|
||||
name for example. In such cases, the ``tar`` parameter can be set to force a
|
||||
full archive of the tree using tar, hence providing a more robust behaviour.
|
||||
This can, however, slow down the read process significantly.
|
||||
|
||||
:param path: sysfs path to scan
|
||||
:param depth: maximum depth to descend
|
||||
:param dictcls: a dict-like type to be used for each level of the hierarchy.
|
||||
:param tar: the files will be read using tar rather than grep
|
||||
:param decode_unicode: decode the content of tar-ed files as utf-8
|
||||
:param strip_null_char: remove null chars from utf-8 decoded files
|
||||
|
||||
.. method:: Target.read_tree_values_flat(path, depth=1):
|
||||
|
||||
Read values of all sysfs (or similar) file nodes under ``path``, traversing
|
||||
up to the maximum depth ``depth``.
|
||||
|
||||
Returns a dict mapping paths of file nodes to corresponding values.
|
||||
|
||||
:param path: sysfs path to scan
|
||||
:param depth: maximum depth to descend
|
||||
|
||||
.. method:: Target.reset()
|
||||
|
||||
Soft reset the target. Typically, this means executing ``reboot`` on the
|
||||
@@ -392,7 +467,9 @@ Target
|
||||
.. method:: Target.capture_screen(filepath)
|
||||
|
||||
Take a screenshot on the device and save it to the specified file on the
|
||||
host. This may not be supported by the target.
|
||||
host. This may not be supported by the target. You can optionally insert a
|
||||
``{ts}`` tag into the file name, in which case it will be substituted with
|
||||
on-target timestamp of the screen shot in ISO8601 format.
|
||||
|
||||
.. method:: Target.install(filepath[, timeout[, with_name]])
|
||||
|
||||
@@ -402,6 +479,17 @@ Target
|
||||
:param timeout: Optional timeout (in seconds) for the installation
|
||||
:param with_name: This may be used to rename the executable on the target
|
||||
|
||||
|
||||
.. method:: Target.install_if_needed(host_path, search_system_binaries=True)
|
||||
|
||||
Check to see if the binary is already installed on the device and if not,
|
||||
install it.
|
||||
|
||||
:param host_path: path to the executable on the host
|
||||
:param search_system_binaries: Specify whether to search the devices PATH
|
||||
when checking to see if the executable is installed, otherwise only check
|
||||
user installed binaries.
|
||||
|
||||
.. method:: Target.uninstall(name)
|
||||
|
||||
Uninstall the specified executable from the target
|
||||
@@ -422,13 +510,163 @@ Target
|
||||
|
||||
.. method:: Target.extract(path, dest=None)
|
||||
|
||||
Extracts the specified archive/file and returns the path to the extrated
|
||||
Extracts the specified archive/file and returns the path to the extracted
|
||||
contents. The extraction method is determined based on the file extension.
|
||||
``zip``, ``tar``, ``gzip``, and ``bzip2`` are supported.
|
||||
|
||||
:param dest: Specified an on-target destination directory (which must exist)
|
||||
for the extrated contents.
|
||||
for the extracted contents.
|
||||
|
||||
Returns the path to the extracted contents. In case of files (gzip and
|
||||
bzip2), the path to the decompressed file is returned; for archives, the
|
||||
path to the directory with the archive's contents is returned.
|
||||
|
||||
.. method:: Target.is_network_connected()
|
||||
|
||||
Checks for internet connectivity on the device. This doesn't actually
|
||||
guarantee that the internet connection is "working" (which is rather
|
||||
nebulous), it's intended just for failing early when definitively _not_
|
||||
connected to the internet.
|
||||
|
||||
:returns: ``True`` if internet seems available, ``False`` otherwise.
|
||||
|
||||
Android Target
|
||||
---------------
|
||||
|
||||
.. class:: AndroidTarget(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=AdbConnection, package_data_directory="/data/data")
|
||||
|
||||
:class:`AndroidTarget` is a subclass of :class:`Target` with additional features specific to a device running Android.
|
||||
|
||||
:param package_data_directory: This is the location of the data stored
|
||||
for installed Android packages on the device.
|
||||
|
||||
.. method:: AndroidTarget.set_rotation(rotation)
|
||||
|
||||
Specify an integer representing the desired screen rotation with the
|
||||
following mappings: Natural: ``0``, Rotated Left: ``1``, Inverted : ``2``
|
||||
and Rotated Right : ``3``.
|
||||
|
||||
.. method:: AndroidTarget.get_rotation(rotation)
|
||||
|
||||
Returns an integer value representing the orientation of the devices
|
||||
screen. ``0`` : Natural, ``1`` : Rotated Left, ``2`` : Inverted
|
||||
and ``3`` : Rotated Right.
|
||||
|
||||
.. method:: AndroidTarget.set_natural_rotation()
|
||||
|
||||
Sets the screen orientation of the device to its natural (0 degrees)
|
||||
orientation.
|
||||
|
||||
.. method:: AndroidTarget.set_left_rotation()
|
||||
|
||||
Sets the screen orientation of the device to 90 degrees.
|
||||
|
||||
.. method:: AndroidTarget.set_inverted_rotation()
|
||||
|
||||
Sets the screen orientation of the device to its inverted (180 degrees)
|
||||
orientation.
|
||||
|
||||
.. method:: AndroidTarget.set_right_rotation()
|
||||
|
||||
Sets the screen orientation of the device to 270 degrees.
|
||||
|
||||
.. method:: AndroidTarget.set_auto_rotation(autorotate)
|
||||
|
||||
Specify a boolean value for whether the devices auto-rotation should
|
||||
be enabled.
|
||||
|
||||
.. method:: AndroidTarget.get_auto_rotation()
|
||||
|
||||
Returns ``True`` if the targets auto rotation is currently enabled and
|
||||
``False`` otherwise.
|
||||
|
||||
.. method:: AndroidTarget.set_airplane_mode(mode)
|
||||
|
||||
Specify a boolean value for whether the device should be in airplane mode.
|
||||
|
||||
.. note:: Requires the device to be rooted if the device is running Android 7+.
|
||||
|
||||
.. method:: AndroidTarget.get_airplane_mode()
|
||||
|
||||
Returns ``True`` if the target is currently in airplane mode and
|
||||
``False`` otherwise.
|
||||
|
||||
.. method:: AndroidTarget.set_brightness(value)
|
||||
|
||||
Sets the devices screen brightness to a specified integer between ``0`` and
|
||||
``255``.
|
||||
|
||||
.. method:: AndroidTarget.get_brightness()
|
||||
|
||||
Returns an integer between ``0`` and ``255`` representing the devices
|
||||
current screen brightness.
|
||||
|
||||
.. method:: AndroidTarget.set_auto_brightness(auto_brightness)
|
||||
|
||||
Specify a boolean value for whether the devices auto brightness
|
||||
should be enabled.
|
||||
|
||||
.. method:: AndroidTarget.get_auto_brightness()
|
||||
|
||||
Returns ``True`` if the targets auto brightness is currently
|
||||
enabled and ``False`` otherwise.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_off()
|
||||
|
||||
Checks if the devices screen is on and if so turns it off.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_on()
|
||||
|
||||
Checks if the devices screen is off and if so turns it on.
|
||||
|
||||
.. method:: AndroidTarget.is_screen_on()
|
||||
|
||||
Returns ``True`` if the targets screen is currently on and ``False``
|
||||
otherwise.
|
||||
|
||||
.. method:: AndroidTarget.homescreen()
|
||||
|
||||
Returns the device to its home screen.
|
||||
|
||||
.. method:: AndroidTarget.swipe_to_unlock(direction="diagonal")
|
||||
|
||||
Performs a swipe input on the device to try and unlock the device.
|
||||
A direction of ``"horizontal"``, ``"vertical"`` or ``"diagonal"``
|
||||
can be supplied to specify in which direction the swipe should be
|
||||
performed. By default ``"diagonal"`` will be used to try and
|
||||
support the majority of newer devices.
|
||||
|
||||
|
||||
ChromeOS Target
|
||||
---------------
|
||||
|
||||
.. class:: ChromeOsTarget(connection_settings=None, platform=None, working_directory=None, executables_directory=None, android_working_directory=None, android_executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, package_data_directory="/data/data")
|
||||
|
||||
:class:`ChromeOsTarget` is a subclass of :class:`LinuxTarget` with
|
||||
additional features specific to a device running ChromeOS for example,
|
||||
if supported, its own android container which can be accessed via the
|
||||
``android_container`` attribute. When making calls to or accessing
|
||||
properties and attributes of the ChromeOS target, by default they will
|
||||
be applied to Linux target as this is where the majority of device
|
||||
configuration will be performed and if not available, will fall back to
|
||||
using the android container if available. This means that all the
|
||||
available methods from
|
||||
:class:`LinuxTarget` and :class:`AndroidTarget` are available for
|
||||
:class:`ChromeOsTarget` if the device supports android otherwise only the
|
||||
:class:`LinuxTarget` methods will be available.
|
||||
|
||||
:param working_directory: This is the location of the working
|
||||
directory to be used for the Linux target container. If not specified will
|
||||
default to ``"/mnt/stateful_partition/devlib-target"``.
|
||||
|
||||
:param android_working_directory: This is the location of the working
|
||||
directory to be used for the android container. If not specified it will
|
||||
use the working directory default for :class:`AndroidTarget.`.
|
||||
|
||||
:param android_executables_directory: This is the location of the
|
||||
executables directory to be used for the android container. If not
|
||||
specified will default to a ``bin`` subfolder in the
|
||||
``android_working_directory.``
|
||||
|
||||
:param package_data_directory: This is the location of the data stored
|
||||
for installed Android packages on the device.
|
||||
|
46
setup.py
46
setup.py
@@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import imp
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
@@ -20,8 +21,10 @@ from itertools import chain
|
||||
|
||||
try:
|
||||
from setuptools import setup
|
||||
from setuptools.command.sdist import sdist as orig_sdist
|
||||
except ImportError:
|
||||
from distutils.core import setup
|
||||
from distutils.command.sdist import sdist as orig_sdist
|
||||
|
||||
|
||||
devlib_dir = os.path.join(os.path.dirname(__file__), 'devlib')
|
||||
@@ -37,6 +40,16 @@ try:
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
vh_path = os.path.join(devlib_dir, 'utils', 'version.py')
|
||||
# can load this, as it does not have any devlib imports
|
||||
version_helper = imp.load_source('version_helper', vh_path)
|
||||
__version__ = version_helper.get_devlib_version()
|
||||
commit = version_helper.get_commit()
|
||||
if commit:
|
||||
__version__ = '{}+{}'.format(__version__, commit)
|
||||
|
||||
|
||||
packages = []
|
||||
data_files = {}
|
||||
source_dir = os.path.dirname(__file__)
|
||||
@@ -59,10 +72,10 @@ for root, dirs, files in os.walk(devlib_dir):
|
||||
params = dict(
|
||||
name='devlib',
|
||||
description='A framework for automating workload execution and measurment collection on ARM devices.',
|
||||
version='0.0.4',
|
||||
version=__version__,
|
||||
packages=packages,
|
||||
package_data=data_files,
|
||||
url='N/A',
|
||||
url='https://github.com/ARM-software/devlib',
|
||||
license='Apache v2',
|
||||
maintainer='ARM Ltd.',
|
||||
install_requires=[
|
||||
@@ -70,10 +83,16 @@ params = dict(
|
||||
'pexpect>=3.3', # Send/recieve to/from device
|
||||
'pyserial', # Serial port interface
|
||||
'wrapt', # Basic for construction of decorator functions
|
||||
'future', # Python 2-3 compatibility
|
||||
'enum34;python_version<"3.4"', # Enums for Python < 3.4
|
||||
'pandas',
|
||||
'numpy',
|
||||
],
|
||||
extras_require={
|
||||
'daq': ['daqpower'],
|
||||
'doc': ['sphinx'],
|
||||
'monsoon': ['python-gflags'],
|
||||
'acme': ['pandas', 'numpy'],
|
||||
},
|
||||
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
|
||||
classifiers=[
|
||||
@@ -84,7 +103,28 @@ params = dict(
|
||||
],
|
||||
)
|
||||
|
||||
all_extras = list(chain(params['extras_require'].itervalues()))
|
||||
all_extras = list(chain(iter(params['extras_require'].values())))
|
||||
params['extras_require']['full'] = all_extras
|
||||
|
||||
|
||||
class sdist(orig_sdist):
|
||||
|
||||
user_options = orig_sdist.user_options + [
|
||||
('strip-commit', 's',
|
||||
"Strip git commit hash from package version ")
|
||||
]
|
||||
|
||||
def initialize_options(self):
|
||||
orig_sdist.initialize_options(self)
|
||||
self.strip_commit = False
|
||||
|
||||
|
||||
def run(self):
|
||||
if self.strip_commit:
|
||||
self.distribution.get_version = lambda : __version__.split('+')[0]
|
||||
orig_sdist.run(self)
|
||||
|
||||
|
||||
params['cmdclass'] = {'sdist': sdist}
|
||||
|
||||
setup(**params)
|
||||
|
@@ -114,7 +114,7 @@ struct reading
|
||||
double sys_enm_ch0_gpu;
|
||||
};
|
||||
|
||||
inline uint64_t join_64bit_register(uint32_t *buffer, int index)
|
||||
static inline uint64_t join_64bit_register(uint32_t *buffer, int index)
|
||||
{
|
||||
uint64_t result = 0;
|
||||
result |= buffer[index];
|
||||
@@ -254,10 +254,10 @@ void emeter_init(struct emeter *this, char *outfile)
|
||||
}
|
||||
|
||||
if(this->out) {
|
||||
fprintf(this->out, "sys_curr,a57_curr,a53_curr,gpu_curr,"
|
||||
"sys_volt,a57_volt,a53_volt,gpu_volt,"
|
||||
"sys_pow,a57_pow,a53_pow,gpu_pow,"
|
||||
"sys_cenr,a57_cenr,a53_cenr,gpu_cenr\n");
|
||||
fprintf(this->out, "sys_current,a57_current,a53_current,gpu_current,"
|
||||
"sys_voltage,a57_voltage,a53_voltage,gpu_voltage,"
|
||||
"sys_power,a57_power,a53_power,gpu_power,"
|
||||
"sys_energy,a57_energy,a53_energy,gpu_energy\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
32
tests/test_target.py
Normal file
32
tests/test_target.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
from unittest import TestCase
|
||||
|
||||
from devlib import LocalLinuxTarget
|
||||
|
||||
|
||||
class TestReadTreeValues(TestCase):
|
||||
|
||||
def test_read_multiline_values(self):
|
||||
data = {
|
||||
'test1': '1',
|
||||
'test2': '2\n\n',
|
||||
'test3': '3\n\n4\n\n',
|
||||
}
|
||||
|
||||
tempdir = tempfile.mkdtemp(prefix='devlib-test-')
|
||||
for key, value in data.items():
|
||||
path = os.path.join(tempdir, key)
|
||||
with open(path, 'w') as wfh:
|
||||
wfh.write(value)
|
||||
|
||||
t = LocalLinuxTarget(connection_settings={'unrooted': True})
|
||||
raw_result = t.read_tree_values_flat(tempdir)
|
||||
result = {os.path.basename(k): v for k, v in raw_result.items()}
|
||||
|
||||
shutil.rmtree(tempdir)
|
||||
|
||||
self.assertEqual({k: v.strip()
|
||||
for k, v in data.items()},
|
||||
result)
|
Reference in New Issue
Block a user